diff options
374 files changed, 4406 insertions, 1713 deletions
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml b/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml index 7b553d559c83..98c6fcf7bf26 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-mpc.yaml @@ -46,6 +46,13 @@ properties: description: | I2C bus timeout in microseconds + fsl,i2c-erratum-a004447: + $ref: /schemas/types.yaml#/definitions/flag + description: | + Indicates the presence of QorIQ erratum A-004447, which + says that the standard i2c recovery scheme mechanism does + not work and an alternate implementation is needed. + required: - compatible - reg diff --git a/Documentation/driver-api/usb/usb.rst b/Documentation/driver-api/usb/usb.rst index 820e867af45a..2c94ff2f4385 100644 --- a/Documentation/driver-api/usb/usb.rst +++ b/Documentation/driver-api/usb/usb.rst @@ -123,6 +123,8 @@ are in ``drivers/usb/common/common.c``. In addition, some functions useful for creating debugging output are defined in ``drivers/usb/common/debug.c``. +.. _usb_header: + Host-Side Data Types and Macros =============================== diff --git a/Documentation/userspace-api/seccomp_filter.rst b/Documentation/userspace-api/seccomp_filter.rst index bd9165241b6c..6efb41cc8072 100644 --- a/Documentation/userspace-api/seccomp_filter.rst +++ b/Documentation/userspace-api/seccomp_filter.rst @@ -250,14 +250,14 @@ Users can read via ``ioctl(SECCOMP_IOCTL_NOTIF_RECV)`` (or ``poll()``) on a seccomp notification fd to receive a ``struct seccomp_notif``, which contains five members: the input length of the structure, a unique-per-filter ``id``, the ``pid`` of the task which triggered this request (which may be 0 if the -task is in a pid ns not visible from the listener's pid namespace), a ``flags`` -member which for now only has ``SECCOMP_NOTIF_FLAG_SIGNALED``, representing -whether or not the notification is a result of a non-fatal signal, and the -``data`` passed to seccomp. Userspace can then make a decision based on this -information about what to do, and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a -response, indicating what should be returned to userspace. The ``id`` member of -``struct seccomp_notif_resp`` should be the same ``id`` as in ``struct -seccomp_notif``. +task is in a pid ns not visible from the listener's pid namespace). The +notification also contains the ``data`` passed to seccomp, and a filters flag. +The structure should be zeroed out prior to calling the ioctl. + +Userspace can then make a decision based on this information about what to do, +and ``ioctl(SECCOMP_IOCTL_NOTIF_SEND)`` a response, indicating what should be +returned to userspace. The ``id`` member of ``struct seccomp_notif_resp`` should +be the same ``id`` as in ``struct seccomp_notif``. It is worth noting that ``struct seccomp_data`` contains the values of register arguments to the syscall, but does not contain pointers to memory. The task's diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst index 5feb3706a7ae..af1b37441e0a 100644 --- a/Documentation/virt/kvm/vcpu-requests.rst +++ b/Documentation/virt/kvm/vcpu-requests.rst @@ -118,10 +118,12 @@ KVM_REQ_MMU_RELOAD necessary to inform each VCPU to completely refresh the tables. This request is used for that. -KVM_REQ_PENDING_TIMER +KVM_REQ_UNBLOCK - This request may be made from a timer handler run on the host on behalf - of a VCPU. It informs the VCPU thread to inject a timer interrupt. + This request informs the vCPU to exit kvm_vcpu_block. It is used for + example from timer handlers that run on the host on behalf of a vCPU, + or in order to update the interrupt routing and ensure that assigned + devices will wake up the vCPU. KVM_REQ_UNHALT diff --git a/MAINTAINERS b/MAINTAINERS index bfb3d0931cba..b706dd20ff2b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3877,6 +3877,7 @@ L: linux-btrfs@vger.kernel.org S: Maintained W: http://btrfs.wiki.kernel.org/ Q: http://patchwork.kernel.org/project/linux-btrfs/list/ +C: irc://irc.libera.chat/btrfs T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git F: Documentation/filesystems/btrfs.rst F: fs/btrfs/ @@ -6945,6 +6946,7 @@ F: net/core/failover.c FANOTIFY M: Jan Kara <jack@suse.cz> R: Amir Goldstein <amir73il@gmail.com> +R: Matthew Bobrowski <repnop@google.com> L: linux-fsdevel@vger.kernel.org S: Maintained F: fs/notify/fanotify/ @@ -14117,6 +14119,7 @@ F: drivers/pci/controller/pci-v3-semi.c PCI ENDPOINT SUBSYSTEM M: Kishon Vijay Abraham I <kishon@ti.com> M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +R: Krzysztof WilczyÅ„ski <kw@linux.com> L: linux-pci@vger.kernel.org S: Supported F: Documentation/PCI/endpoint/* @@ -14165,6 +14168,7 @@ F: drivers/pci/controller/pci-xgene-msi.c PCI NATIVE HOST BRIDGE AND ENDPOINT DRIVERS M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> R: Rob Herring <robh@kernel.org> +R: Krzysztof WilczyÅ„ski <kw@linux.com> L: linux-pci@vger.kernel.org S: Supported Q: http://patchwork.ozlabs.org/project/linux-pci/list/ @@ -14324,10 +14328,12 @@ PER-CPU MEMORY ALLOCATOR M: Dennis Zhou <dennis@kernel.org> M: Tejun Heo <tj@kernel.org> M: Christoph Lameter <cl@linux.com> +L: linux-mm@kvack.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git F: arch/*/include/asm/percpu.h F: include/linux/percpu*.h +F: lib/percpu*.c F: mm/percpu*.c PER-TASK DELAY ACCOUNTING @@ -17676,7 +17682,6 @@ R: Mika Westerberg <mika.westerberg@linux.intel.com> L: linux-i2c@vger.kernel.org S: Maintained F: drivers/i2c/busses/i2c-designware-* -F: include/linux/platform_data/i2c-designware.h SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER M: Jaehoon Chung <jh80.chung@samsung.com> @@ -20012,6 +20017,7 @@ F: arch/x86/xen/*swiotlb* F: drivers/xen/*swiotlb* XFS FILESYSTEM +C: irc://irc.oftc.net/xfs M: Darrick J. Wong <djwong@kernel.org> M: linux-xfs@vger.kernel.org L: linux-xfs@vger.kernel.org @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 13 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Frozen Wasteland # *DOCUMENTATION* @@ -928,6 +928,11 @@ CC_FLAGS_LTO += -fvisibility=hidden # Limit inlining across translation units to reduce binary size KBUILD_LDFLAGS += -mllvm -import-instr-limit=5 + +# Check for frame size exceeding threshold during prolog/epilog insertion. +ifneq ($(CONFIG_FRAME_WARN),0) +KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN) +endif endif ifdef CONFIG_LTO diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 2175ec0004ed..451e11e5fd23 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -74,7 +74,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx, * This insanity brought to you by speculative system register reads, * out-of-order memory accesses, sequence locks and Thomas Gleixner. * - * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html + * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/ */ #define arch_counter_enforce_ordering(val) do { \ u64 tmp, _val = (val); \ diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index cf8df032b9c3..5e9b33cbac51 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -63,6 +63,7 @@ #define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18 #define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19 #define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20 +#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21 #ifndef __ASSEMBLY__ @@ -201,6 +202,8 @@ extern void __kvm_timer_set_cntvoff(u64 cntvoff); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); +extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu); + extern u64 __vgic_v3_get_gic_config(void); extern u64 __vgic_v3_read_vmcr(void); extern void __vgic_v3_write_vmcr(u32 vmcr); diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index f612c090f2e4..01b9857757f2 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -463,4 +463,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC; } +static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) +{ + return test_bit(feature, vcpu->arch.features); +} + #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 1cb39c0803a4..e720148232a0 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -720,11 +720,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) return ret; } - if (run->immediate_exit) - return -EINTR; - vcpu_load(vcpu); + if (run->immediate_exit) { + ret = -EINTR; + goto out; + } + kvm_sigset_activate(vcpu); ret = 1; @@ -897,6 +899,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_sigset_deactivate(vcpu); +out: + /* + * In the unlikely event that we are returning to userspace + * with pending exceptions or PC adjustment, commit these + * adjustments in order to give userspace a consistent view of + * the vcpu state. Note that this relies on __kvm_adjust_pc() + * being preempt-safe on VHE. + */ + if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION | + KVM_ARM64_INCREMENT_PC))) + kvm_call_hyp(__kvm_adjust_pc, vcpu); + vcpu_put(vcpu); return ret; } diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c index 73629094f903..11541b94b328 100644 --- a/arch/arm64/kvm/hyp/exception.c +++ b/arch/arm64/kvm/hyp/exception.c @@ -296,7 +296,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) *vcpu_pc(vcpu) = vect_offset; } -void kvm_inject_exception(struct kvm_vcpu *vcpu) +static void kvm_inject_exception(struct kvm_vcpu *vcpu) { if (vcpu_el1_is_32bit(vcpu)) { switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) { @@ -329,3 +329,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu) } } } + +/* + * Adjust the guest PC (and potentially exception state) depending on + * flags provided by the emulation code. + */ +void __kvm_adjust_pc(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) { + kvm_inject_exception(vcpu); + vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION | + KVM_ARM64_EXCEPT_MASK); + } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) { + kvm_skip_instr(vcpu); + vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC; + } +} diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h index 61716359035d..4fdfeabefeb4 100644 --- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h +++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h @@ -13,8 +13,6 @@ #include <asm/kvm_emulate.h> #include <asm/kvm_host.h> -void kvm_inject_exception(struct kvm_vcpu *vcpu); - static inline void kvm_skip_instr(struct kvm_vcpu *vcpu) { if (vcpu_mode_is_32bit(vcpu)) { @@ -44,22 +42,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu) } /* - * Adjust the guest PC on entry, depending on flags provided by EL1 - * for the purpose of emulation (MMIO, sysreg) or exception injection. - */ -static inline void __adjust_pc(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) { - kvm_inject_exception(vcpu); - vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION | - KVM_ARM64_EXCEPT_MASK); - } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) { - kvm_skip_instr(vcpu); - vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC; - } -} - -/* * Skip an instruction while host sysregs are live. * Assumes host is always 64-bit. */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index f36420a80474..1632f001f4ed 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -28,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu)); } +static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) +{ + DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); + + __kvm_adjust_pc(kern_hyp_va(vcpu)); +} + static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt) { __kvm_flush_vm_context(); @@ -170,6 +177,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *); static const hcall_t host_hcall[] = { HANDLE_FUNC(__kvm_vcpu_run), + HANDLE_FUNC(__kvm_adjust_pc), HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), HANDLE_FUNC(__kvm_tlb_flush_vmid), diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index e342f7f4f4fb..4b60c0056c04 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -23,8 +23,8 @@ extern unsigned long hyp_nr_cpus; struct host_kvm host_kvm; -struct hyp_pool host_s2_mem; -struct hyp_pool host_s2_dev; +static struct hyp_pool host_s2_mem; +static struct hyp_pool host_s2_dev; /* * Copies of the host's CPU features registers holding sanitized values. diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c index 7488f53b0aa2..a3d3a275344e 100644 --- a/arch/arm64/kvm/hyp/nvhe/setup.c +++ b/arch/arm64/kvm/hyp/nvhe/setup.c @@ -17,7 +17,6 @@ #include <nvhe/trap_handler.h> struct hyp_pool hpool; -struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; unsigned long hyp_nr_cpus; #define hyp_percpu_size ((unsigned long)__per_cpu_end - \ @@ -27,6 +26,7 @@ static void *vmemmap_base; static void *hyp_pgt_base; static void *host_s2_mem_pgt_base; static void *host_s2_dev_pgt_base; +static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; static int divide_memory_pool(void *virt, unsigned long size) { diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index e9f6ea704d07..f7af9688c1f7 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -4,7 +4,6 @@ * Author: Marc Zyngier <marc.zyngier@arm.com> */ -#include <hyp/adjust_pc.h> #include <hyp/switch.h> #include <hyp/sysreg-sr.h> @@ -201,7 +200,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) */ __debug_save_host_buffers_nvhe(vcpu); - __adjust_pc(vcpu); + __kvm_adjust_pc(vcpu); /* * We must restore the 32-bit state before the sysregs, thanks diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 7b8f7db5c1ed..b3229924d243 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -4,7 +4,6 @@ * Author: Marc Zyngier <marc.zyngier@arm.com> */ -#include <hyp/adjust_pc.h> #include <hyp/switch.h> #include <linux/arm-smccc.h> @@ -132,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) __load_guest_stage2(vcpu->arch.hw_mmu); __activate_traps(vcpu); - __adjust_pc(vcpu); + __kvm_adjust_pc(vcpu); sysreg_restore_guest_state_vhe(guest_ctxt); __debug_switch_to_guest(vcpu); diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index c5d1f3c87dbd..c10207fed2f3 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1156,13 +1156,13 @@ out_unlock: bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) { if (!kvm->arch.mmu.pgt) - return 0; + return false; __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT, (range->end - range->start) << PAGE_SHIFT, range->may_block); - return 0; + return false; } bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) @@ -1170,7 +1170,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) kvm_pfn_t pfn = pte_pfn(range->pte); if (!kvm->arch.mmu.pgt) - return 0; + return false; WARN_ON(range->end - range->start != 1); @@ -1190,7 +1190,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) PAGE_SIZE, __pfn_to_phys(pfn), KVM_PGTABLE_PROT_R, NULL); - return 0; + return false; } bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) @@ -1200,7 +1200,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) pte_t pte; if (!kvm->arch.mmu.pgt) - return 0; + return false; WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); @@ -1213,7 +1213,7 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { if (!kvm->arch.mmu.pgt) - return 0; + return false; return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT); diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 956cdc240148..d37ebee085cf 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -166,6 +166,25 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) return 0; } +static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) +{ + struct kvm_vcpu *tmp; + bool is32bit; + int i; + + is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); + if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit) + return false; + + /* Check that the vcpus are either all 32bit or all 64bit */ + kvm_for_each_vcpu(i, tmp, vcpu->kvm) { + if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit) + return false; + } + + return true; +} + /** * kvm_reset_vcpu - sets core registers and sys_regs to reset value * @vcpu: The VCPU pointer @@ -217,13 +236,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } } + if (!vcpu_allowed_register_width(vcpu)) { + ret = -EINVAL; + goto out; + } + switch (vcpu->arch.target) { default: if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { - if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) { - ret = -EINVAL; - goto out; - } pstate = VCPU_RESET_PSTATE_SVC; } else { pstate = VCPU_RESET_PSTATE_EL1; diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 76ea2800c33e..1a7968ad078c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { - u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; if (p->is_write) reg_to_dbg(vcpu, p, rd, dbg_reg); else dbg_to_reg(vcpu, p, rd, dbg_reg); - trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); + trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); return true; } @@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; @@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; @@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static void reset_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { - vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; + vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val; } static bool trap_bcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { - u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; if (p->is_write) reg_to_dbg(vcpu, p, rd, dbg_reg); else dbg_to_reg(vcpu, p, rd, dbg_reg); - trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); + trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); return true; } @@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu, static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; @@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; @@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static void reset_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { - vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; + vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val; } static bool trap_wvr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { - u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; if (p->is_write) reg_to_dbg(vcpu, p, rd, dbg_reg); else dbg_to_reg(vcpu, p, rd, dbg_reg); - trace_trap_reg(__func__, rd->reg, p->is_write, - vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); + trace_trap_reg(__func__, rd->CRm, p->is_write, + vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]); return true; } @@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu, static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; @@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; @@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static void reset_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { - vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; + vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val; } static bool trap_wcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *rd) { - u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; if (p->is_write) reg_to_dbg(vcpu, p, rd, dbg_reg); else dbg_to_reg(vcpu, p, rd, dbg_reg); - trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); + trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg); return true; } @@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu, static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; @@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm]; if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; @@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static void reset_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { - vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; + vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val; } static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6dd9369e3ea0..89b66ef43a0f 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -515,7 +515,8 @@ static void __init map_mem(pgd_t *pgdp) */ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); - if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) + if (rodata_full || crash_mem_map || debug_pagealloc_enabled() || + IS_ENABLED(CONFIG_KFENCE)) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c index b184baa4e56a..f175bce2987f 100644 --- a/arch/mips/alchemy/board-xxs1500.c +++ b/arch/mips/alchemy/board-xxs1500.c @@ -18,6 +18,7 @@ #include <asm/reboot.h> #include <asm/setup.h> #include <asm/mach-au1x00/au1000.h> +#include <asm/mach-au1x00/gpio-au1000.h> #include <prom.h> const char *get_system_type(void) diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h index f93aa5ee2e2e..3481ed4c117b 100644 --- a/arch/mips/include/asm/mips-boards/launch.h +++ b/arch/mips/include/asm/mips-boards/launch.h @@ -3,6 +3,9 @@ * */ +#ifndef _ASM_MIPS_BOARDS_LAUNCH_H +#define _ASM_MIPS_BOARDS_LAUNCH_H + #ifndef _ASSEMBLER_ struct cpulaunch { @@ -34,3 +37,5 @@ struct cpulaunch { /* Polling period in count cycles for secondary CPU's */ #define LAUNCHPERIOD 10000 + +#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */ diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index de03838b343b..a9b72eacfc0b 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -37,7 +37,7 @@ */ notrace void arch_local_irq_disable(void) { - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" @@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void) : /* no inputs */ : "memory"); - preempt_enable(); + preempt_enable_notrace(); } EXPORT_SYMBOL(arch_local_irq_disable); @@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void) { unsigned long flags; - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" @@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void) : /* no inputs */ : "memory"); - preempt_enable(); + preempt_enable_notrace(); return flags; } @@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags) { unsigned long __tmp1; - preempt_disable(); + preempt_disable_notrace(); __asm__ __volatile__( " .set push \n" @@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags) : "0" (flags) : "memory"); - preempt_enable(); + preempt_enable_notrace(); } EXPORT_SYMBOL(arch_local_irq_restore); diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index 0c5de07da097..0135376c5de5 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c @@ -8,6 +8,7 @@ #include <linux/io.h> #include <linux/clk.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/sizes.h> #include <linux/of_fdt.h> @@ -25,6 +26,7 @@ __iomem void *rt_sysc_membase; __iomem void *rt_memc_membase; +EXPORT_SYMBOL_GPL(rt_sysc_membase); __iomem void *plat_of_remap_node(const char *node) { diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi index c2717f31925a..ccda0a91abf0 100644 --- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi @@ -122,7 +122,15 @@ }; /include/ "pq3-i2c-0.dtsi" + i2c@3000 { + fsl,i2c-erratum-a004447; + }; + /include/ "pq3-i2c-1.dtsi" + i2c@3100 { + fsl,i2c-erratum-a004447; + }; + /include/ "pq3-duart-0.dtsi" /include/ "pq3-espi-0.dtsi" spi0: spi@7000 { diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi index 872e4485dc3f..ddc018d42252 100644 --- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi +++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi @@ -371,7 +371,23 @@ }; /include/ "qoriq-i2c-0.dtsi" + i2c@118000 { + fsl,i2c-erratum-a004447; + }; + + i2c@118100 { + fsl,i2c-erratum-a004447; + }; + /include/ "qoriq-i2c-1.dtsi" + i2c@119000 { + fsl,i2c-erratum-a004447; + }; + + i2c@119100 { + fsl,i2c-erratum-a004447; + }; + /include/ "qoriq-duart-0.dtsi" /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-gpio-0.dtsi" diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 1e83359f286b..7f2e90db2050 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -51,6 +51,7 @@ /* PPC-specific vcpu->requests bit members */ #define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0) #define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1) +#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2) #include <linux/mmu_notifier.h> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 28a80d240b76..7360350e66ff 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3936,7 +3936,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) break; } cur = ktime_get(); - } while (single_task_running() && ktime_before(cur, stop)); + } while (kvm_vcpu_can_poll(cur, stop)); spin_lock(&vc->lock); vc->vcore_state = VCORE_INACTIVE; diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a8ad8eb76120..c5914e70a0fd 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -34,6 +34,7 @@ config RISCV select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT select ARCH_SUPPORTS_HUGETLBFS if MMU + select ARCH_USE_MEMTEST select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h index 1e954101906a..e4e291d40759 100644 --- a/arch/riscv/include/asm/kexec.h +++ b/arch/riscv/include/asm/kexec.h @@ -42,8 +42,8 @@ struct kimage_arch { unsigned long fdt_addr; }; -const extern unsigned char riscv_kexec_relocate[]; -const extern unsigned int riscv_kexec_relocate_size; +extern const unsigned char riscv_kexec_relocate[]; +extern const unsigned int riscv_kexec_relocate_size; typedef void (*riscv_kexec_method)(unsigned long first_ind_entry, unsigned long jump_addr, diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c index cc048143fba5..9e99e1db156b 100644 --- a/arch/riscv/kernel/machine_kexec.c +++ b/arch/riscv/kernel/machine_kexec.c @@ -14,8 +14,9 @@ #include <asm/set_memory.h> /* For set_memory_x() */ #include <linux/compiler.h> /* For unreachable() */ #include <linux/cpu.h> /* For cpu_down() */ +#include <linux/reboot.h> -/** +/* * kexec_image_info - Print received image details */ static void @@ -39,7 +40,7 @@ kexec_image_info(const struct kimage *image) } } -/** +/* * machine_kexec_prepare - Initialize kexec * * This function is called from do_kexec_load, when the user has @@ -100,7 +101,7 @@ machine_kexec_prepare(struct kimage *image) } -/** +/* * machine_kexec_cleanup - Cleanup any leftovers from * machine_kexec_prepare * @@ -135,7 +136,7 @@ void machine_shutdown(void) #endif } -/** +/* * machine_crash_shutdown - Prepare to kexec after a kernel crash * * This function is called by crash_kexec just before machine_kexec @@ -151,7 +152,7 @@ machine_crash_shutdown(struct pt_regs *regs) pr_info("Starting crashdump kernel...\n"); } -/** +/* * machine_kexec - Jump to the loaded kimage * * This function is called by kernel_kexec which is called by the diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 10b965c34536..15cc65ac7ca6 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -84,6 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return 0; } +#ifdef CONFIG_MMU void *alloc_insn_page(void) { return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END, @@ -91,6 +92,7 @@ void *alloc_insn_page(void) VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, __builtin_return_address(0)); } +#endif /* install breakpoint in text */ void __kprobes arch_arm_kprobe(struct kprobe *p) diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c index 2b3e0cb90d78..bde85fc53357 100644 --- a/arch/riscv/kernel/stacktrace.c +++ b/arch/riscv/kernel/stacktrace.c @@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, fp = frame_pointer(regs); sp = user_stack_pointer(regs); pc = instruction_pointer(regs); - } else if (task == NULL || task == current) { - fp = (unsigned long)__builtin_frame_address(0); - sp = sp_in_global; - pc = (unsigned long)walk_stackframe; + } else if (task == current) { + fp = (unsigned long)__builtin_frame_address(1); + sp = (unsigned long)__builtin_frame_address(0); + pc = (unsigned long)__builtin_return_address(0); } else { /* task blocked in __switch_to */ fp = task->thread.s[0]; @@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc) return true; } -void dump_backtrace(struct pt_regs *regs, struct task_struct *task, +noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task, const char *loglvl) { - pr_cont("%sCall Trace:\n", loglvl); walk_stackframe(task, regs, print_trace_address, (void *)loglvl); } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { + pr_cont("%sCall Trace:\n", loglvl); dump_backtrace(NULL, task, loglvl); } @@ -139,7 +139,7 @@ unsigned long get_wchan(struct task_struct *task) #ifdef CONFIG_STACKTRACE -void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, +noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) { walk_stackframe(task, regs, consume_entry, cookie); diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 323641097f63..e7bef91cee04 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -99,6 +99,7 @@ KVM_X86_OP_NULL(post_block) KVM_X86_OP_NULL(vcpu_blocking) KVM_X86_OP_NULL(vcpu_unblocking) KVM_X86_OP_NULL(update_pi_irte) +KVM_X86_OP_NULL(start_assignment) KVM_X86_OP_NULL(apicv_post_state_restore) KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt) KVM_X86_OP_NULL(set_hv_timer) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 55efbacfc244..9c7ced0e3171 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1352,6 +1352,7 @@ struct kvm_x86_ops { int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); + void (*start_assignment)(struct kvm *kvm); void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu); bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8a0ccdb56076..5e5de05a8fbf 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -5111,7 +5111,7 @@ done: return rc; } -int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) +int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type) { int rc = X86EMUL_CONTINUE; int mode = ctxt->mode; @@ -5322,7 +5322,8 @@ done_prefixes: ctxt->execute = opcode.u.execute; - if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD))) + if (unlikely(emulation_type & EMULTYPE_TRAP_UD) && + likely(!(ctxt->d & EmulateOnUD))) return EMULATION_FAILED; if (unlikely(ctxt->d & diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index f98370a39936..f00830e5202f 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1172,6 +1172,7 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm) { struct kvm_hv *hv = to_kvm_hv(kvm); u64 gfn; + int idx; if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET || @@ -1190,9 +1191,16 @@ void kvm_hv_invalidate_tsc_page(struct kvm *kvm) gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; hv->tsc_ref.tsc_sequence = 0; + + /* + * Take the srcu lock as memslots will be accessed to check the gfn + * cache generation against the memslots generation. + */ + idx = srcu_read_lock(&kvm->srcu); if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; + srcu_read_unlock(&kvm->srcu, idx); out_unlock: mutex_unlock(&hv->hv_lock); diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index f016838faedd..3e870bf9ca4d 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -314,7 +314,6 @@ struct x86_emulate_ctxt { int interruptibility; bool perm_ok; /* do not check permissions if true */ - bool ud; /* inject an #UD if host doesn't support insn */ bool tf; /* TF value before instruction (after for syscall/sysret) */ bool have_exception; @@ -491,7 +490,7 @@ enum x86_intercept { #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64 #endif -int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len); +int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type); bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); #define EMULATION_FAILED -1 #define EMULATION_OK 0 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index c0ebef560bd1..8120e8614b92 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1598,11 +1598,19 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline; + if (lapic_timer_advance_dynamic) { + adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta); + /* + * If the timer fired early, reread the TSC to account for the + * overhead of the above adjustment to avoid waiting longer + * than is necessary. + */ + if (guest_tsc < tsc_deadline) + guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); + } + if (guest_tsc < tsc_deadline) __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc); - - if (lapic_timer_advance_dynamic) - adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta); } void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu) @@ -1661,7 +1669,7 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn) } atomic_inc(&apic->lapic_timer.pending); - kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); + kvm_make_request(KVM_REQ_UNBLOCK, vcpu); if (from_timer_fn) kvm_vcpu_kick(vcpu); } diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 95eeb5ac6a8a..237317b1eddd 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1192,9 +1192,9 @@ bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) } /* - * Remove write access from all the SPTEs mapping GFNs [start, end). If - * skip_4k is set, SPTEs that map 4k pages, will not be write-protected. - * Returns true if an SPTE has been changed and the TLBs need to be flushed. + * Remove write access from all SPTEs at or above min_level that map GFNs + * [start, end). Returns true if an SPTE has been changed and the TLBs need to + * be flushed. */ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t start, gfn_t end, int min_level) diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 712b4e0de481..0e62e6a2438c 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -28,10 +28,8 @@ #include "svm.h" /* enable / disable AVIC */ -int avic; -#ifdef CONFIG_X86_LOCAL_APIC -module_param(avic, int, S_IRUGO); -#endif +bool avic; +module_param(avic, bool, S_IRUGO); #define SVM_AVIC_DOORBELL 0xc001011b diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 05eca131eaf2..e088086f3de6 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1010,9 +1010,7 @@ static __init int svm_hardware_setup(void) } if (avic) { - if (!npt_enabled || - !boot_cpu_has(X86_FEATURE_AVIC) || - !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) { + if (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)) { avic = false; } else { pr_info("AVIC enabled\n"); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 2c9ece618b29..2908c6ab5bb4 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -480,7 +480,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops; #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL -extern int avic; +extern bool avic; static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data) { diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 8dee8a5fbc17..aa0e7872fcc9 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -90,8 +90,7 @@ static inline bool cpu_has_vmx_preemption_timer(void) static inline bool cpu_has_vmx_posted_intr(void) { - return IS_ENABLED(CONFIG_X86_LOCAL_APIC) && - vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; + return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR; } static inline bool cpu_has_load_ia32_efer(void) diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 459748680daf..5f81ef092bd4 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -238,6 +238,20 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu) /* + * Bail out of the block loop if the VM has an assigned + * device, but the blocking vCPU didn't reconfigure the + * PI.NV to the wakeup vector, i.e. the assigned device + * came along after the initial check in pi_pre_block(). + */ +void vmx_pi_start_assignment(struct kvm *kvm) +{ + if (!irq_remapping_cap(IRQ_POSTING_CAP)) + return; + + kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK); +} + +/* * pi_update_irte - set IRTE for Posted-Interrupts * * @kvm: kvm diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h index 0bdc41391c5b..7f7b2326caf5 100644 --- a/arch/x86/kvm/vmx/posted_intr.h +++ b/arch/x86/kvm/vmx/posted_intr.h @@ -95,5 +95,6 @@ void __init pi_init_cpu(int cpu); bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu); int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq, bool set); +void vmx_pi_start_assignment(struct kvm *kvm); #endif /* __KVM_X86_VMX_POSTED_INTR_H */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4bceb5ca3a89..50b42d7a8a11 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4843,7 +4843,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_run *kvm_run = vcpu->run; u32 intr_info, ex_no, error_code; - unsigned long cr2, rip, dr6; + unsigned long cr2, dr6; u32 vect_info; vect_info = vmx->idt_vectoring_info; @@ -4933,8 +4933,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) vmx->vcpu.arch.event_exit_inst_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN); kvm_run->exit_reason = KVM_EXIT_DEBUG; - rip = kvm_rip_read(vcpu); - kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; + kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); kvm_run->debug.arch.exception = ex_no; break; case AC_VECTOR: @@ -7721,6 +7720,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .nested_ops = &vmx_nested_ops, .update_pi_irte = pi_update_irte, + .start_assignment = vmx_pi_start_assignment, #ifdef CONFIG_X86_64 .set_hv_timer = vmx_set_hv_timer, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bbc4e04e67ad..b594275d49b5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3105,6 +3105,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu) st->preempted & KVM_VCPU_FLUSH_TLB); if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) kvm_vcpu_flush_tlb_guest(vcpu); + } else { + st->preempted = 0; } vcpu->arch.st.preempted = 0; @@ -7226,6 +7228,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK); BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK); + ctxt->interruptibility = 0; + ctxt->have_exception = false; + ctxt->exception.vector = -1; + ctxt->perm_ok = false; + init_decode_cache(ctxt); vcpu->arch.emulate_regs_need_sync_from_vcpu = false; } @@ -7561,14 +7568,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, kvm_vcpu_check_breakpoint(vcpu, &r)) return r; - ctxt->interruptibility = 0; - ctxt->have_exception = false; - ctxt->exception.vector = -1; - ctxt->perm_ok = false; - - ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; - - r = x86_decode_insn(ctxt, insn, insn_len); + r = x86_decode_insn(ctxt, insn, insn_len, emulation_type); trace_kvm_emulate_insn_start(vcpu); ++vcpu->stat.insn_emulation; @@ -8360,6 +8360,9 @@ static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) vcpu->stat.directed_yield_attempted++; + if (single_task_running()) + goto no_yield; + rcu_read_lock(); map = rcu_dereference(vcpu->kvm->arch.apic_map); @@ -9496,7 +9499,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu) if (r <= 0) break; - kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu); + kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); if (kvm_cpu_has_pending_timer(vcpu)) kvm_inject_pending_timer_irqs(vcpu); @@ -10115,8 +10118,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, kvm_update_dr7(vcpu); if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) - vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + - get_segment_base(vcpu, VCPU_SREG_CS); + vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); /* * Trigger an rflags update that will inject or remove the trace @@ -11499,7 +11501,8 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) void kvm_arch_start_assignment(struct kvm *kvm) { - atomic_inc(&kvm->arch.assigned_device_count); + if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1) + static_call_cond(kvm_x86_start_assignment)(kvm); } EXPORT_SYMBOL_GPL(kvm_arch_start_assignment); diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 0ec5b3f69112..6e02448d15d9 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -226,6 +226,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "AMDI0010", APD_ADDR(wt_i2c_desc) }, { "AMD0020", APD_ADDR(cz_uart_desc) }, { "AMDI0020", APD_ADDR(cz_uart_desc) }, + { "AMDI0022", APD_ADDR(cz_uart_desc) }, { "AMD0030", }, { "AMD0040", APD_ADDR(fch_misc_desc)}, { "HYGO0010", APD_ADDR(wt_i2c_desc) }, diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index 624a26794d55..e5ba9795ec69 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) } break; + case ACPI_TYPE_LOCAL_ADDRESS_HANDLER: + + ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, + "***** Address handler %p\n", object)); + + acpi_os_delete_mutex(object->address_space.context_mutex); + break; + default: break; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index f973bbe90e5e..e21611c9a170 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -134,7 +134,7 @@ int acpi_power_init(void); void acpi_power_resources_list_free(struct list_head *list); int acpi_extract_power_resources(union acpi_object *package, unsigned int start, struct list_head *list); -int acpi_add_power_resource(acpi_handle handle); +struct acpi_device *acpi_add_power_resource(acpi_handle handle); void acpi_power_add_remove_device(struct acpi_device *adev, bool add); int acpi_power_wakeup_list_init(struct list_head *list, int *system_level); int acpi_device_sleep_wake(struct acpi_device *dev, @@ -142,7 +142,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev, int acpi_power_get_inferred_state(struct acpi_device *device, int *state); int acpi_power_on_resources(struct acpi_device *device, int state); int acpi_power_transition(struct acpi_device *device, int state); -void acpi_turn_off_unused_power_resources(void); +void acpi_turn_off_unused_power_resources(bool init); /* -------------------------------------------------------------------------- Device Power Management diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 56102eaaa2da..97c9a94a1a30 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -52,6 +52,7 @@ struct acpi_power_resource { u32 system_level; u32 order; unsigned int ref_count; + unsigned int users; bool wakeup_enabled; struct mutex resource_lock; struct list_head dependents; @@ -147,6 +148,7 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, for (i = start; i < package->package.count; i++) { union acpi_object *element = &package->package.elements[i]; + struct acpi_device *rdev; acpi_handle rhandle; if (element->type != ACPI_TYPE_LOCAL_REFERENCE) { @@ -163,13 +165,16 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start, if (acpi_power_resource_is_dup(package, start, i)) continue; - err = acpi_add_power_resource(rhandle); - if (err) + rdev = acpi_add_power_resource(rhandle); + if (!rdev) { + err = -ENODEV; break; - + } err = acpi_power_resources_list_add(rhandle, list); if (err) break; + + to_power_resource(rdev)->users++; } if (err) acpi_power_resources_list_free(list); @@ -907,7 +912,7 @@ static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource mutex_unlock(&power_resource_list_lock); } -int acpi_add_power_resource(acpi_handle handle) +struct acpi_device *acpi_add_power_resource(acpi_handle handle) { struct acpi_power_resource *resource; struct acpi_device *device = NULL; @@ -918,11 +923,11 @@ int acpi_add_power_resource(acpi_handle handle) acpi_bus_get_device(handle, &device); if (device) - return 0; + return device; resource = kzalloc(sizeof(*resource), GFP_KERNEL); if (!resource) - return -ENOMEM; + return NULL; device = &resource->device; acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER); @@ -959,11 +964,11 @@ int acpi_add_power_resource(acpi_handle handle) acpi_power_add_resource_to_list(resource); acpi_device_add_finalize(device); - return 0; + return device; err: acpi_release_power_resource(&device->dev); - return result; + return NULL; } #ifdef CONFIG_ACPI_SLEEP @@ -997,7 +1002,38 @@ void acpi_resume_power_resources(void) } #endif -void acpi_turn_off_unused_power_resources(void) +static void acpi_power_turn_off_if_unused(struct acpi_power_resource *resource, + bool init) +{ + if (resource->ref_count > 0) + return; + + if (init) { + if (resource->users > 0) + return; + } else { + int result, state; + + result = acpi_power_get_state(resource->device.handle, &state); + if (result || state == ACPI_POWER_RESOURCE_STATE_OFF) + return; + } + + dev_info(&resource->device.dev, "Turning OFF\n"); + __acpi_power_off(resource); +} + +/** + * acpi_turn_off_unused_power_resources - Turn off power resources not in use. + * @init: Control switch. + * + * If @ainit is set, unconditionally turn off all of the ACPI power resources + * without any users. + * + * Otherwise, turn off all ACPI power resources without active references (that + * is, the ones that should be "off" at the moment) that are "on". + */ +void acpi_turn_off_unused_power_resources(bool init) { struct acpi_power_resource *resource; @@ -1006,10 +1042,7 @@ void acpi_turn_off_unused_power_resources(void) list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) { mutex_lock(&resource->resource_lock); - if (!resource->ref_count) { - dev_info(&resource->device.dev, "Turning OFF\n"); - __acpi_power_off(resource); - } + acpi_power_turn_off_if_unused(resource, init); mutex_unlock(&resource->resource_lock); } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 453eff8ec8c3..e10d38ac7cf2 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -2360,7 +2360,7 @@ int __init acpi_scan_init(void) } } - acpi_turn_off_unused_power_resources(); + acpi_turn_off_unused_power_resources(true); acpi_scan_initialized = true; diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 09fd13757b65..df386571da98 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -504,7 +504,7 @@ static void acpi_pm_start(u32 acpi_state) */ static void acpi_pm_end(void) { - acpi_turn_off_unused_power_resources(); + acpi_turn_off_unused_power_resources(false); acpi_scan_lock_release(); /* * This is necessary in case acpi_pm_finish() is not called during a diff --git a/drivers/base/core.c b/drivers/base/core.c index 628e33939aca..54ba506e5a89 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -194,6 +194,17 @@ int device_links_read_lock_held(void) { return srcu_read_lock_held(&device_links_srcu); } + +static void device_link_synchronize_removal(void) +{ + synchronize_srcu(&device_links_srcu); +} + +static void device_link_remove_from_lists(struct device_link *link) +{ + list_del_rcu(&link->s_node); + list_del_rcu(&link->c_node); +} #else /* !CONFIG_SRCU */ static DECLARE_RWSEM(device_links_lock); @@ -224,6 +235,16 @@ int device_links_read_lock_held(void) return lockdep_is_held(&device_links_lock); } #endif + +static inline void device_link_synchronize_removal(void) +{ +} + +static void device_link_remove_from_lists(struct device_link *link) +{ + list_del(&link->s_node); + list_del(&link->c_node); +} #endif /* !CONFIG_SRCU */ static bool device_is_ancestor(struct device *dev, struct device *target) @@ -445,8 +466,13 @@ static struct attribute *devlink_attrs[] = { }; ATTRIBUTE_GROUPS(devlink); -static void device_link_free(struct device_link *link) +static void device_link_release_fn(struct work_struct *work) { + struct device_link *link = container_of(work, struct device_link, rm_work); + + /* Ensure that all references to the link object have been dropped. */ + device_link_synchronize_removal(); + while (refcount_dec_not_one(&link->rpm_active)) pm_runtime_put(link->supplier); @@ -455,24 +481,19 @@ static void device_link_free(struct device_link *link) kfree(link); } -#ifdef CONFIG_SRCU -static void __device_link_free_srcu(struct rcu_head *rhead) -{ - device_link_free(container_of(rhead, struct device_link, rcu_head)); -} - static void devlink_dev_release(struct device *dev) { struct device_link *link = to_devlink(dev); - call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu); -} -#else -static void devlink_dev_release(struct device *dev) -{ - device_link_free(to_devlink(dev)); + INIT_WORK(&link->rm_work, device_link_release_fn); + /* + * It may take a while to complete this work because of the SRCU + * synchronization in device_link_release_fn() and if the consumer or + * supplier devices get deleted when it runs, so put it into the "long" + * workqueue. + */ + queue_work(system_long_wq, &link->rm_work); } -#endif static struct class devlink_class = { .name = "devlink", @@ -846,7 +867,6 @@ out: } EXPORT_SYMBOL_GPL(device_link_add); -#ifdef CONFIG_SRCU static void __device_link_del(struct kref *kref) { struct device_link *link = container_of(kref, struct device_link, kref); @@ -856,25 +876,9 @@ static void __device_link_del(struct kref *kref) pm_runtime_drop_link(link); - list_del_rcu(&link->s_node); - list_del_rcu(&link->c_node); - device_unregister(&link->link_dev); -} -#else /* !CONFIG_SRCU */ -static void __device_link_del(struct kref *kref) -{ - struct device_link *link = container_of(kref, struct device_link, kref); - - dev_info(link->consumer, "Dropping the link to %s\n", - dev_name(link->supplier)); - - pm_runtime_drop_link(link); - - list_del(&link->s_node); - list_del(&link->c_node); + device_link_remove_from_lists(link); device_unregister(&link->link_dev); } -#endif /* !CONFIG_SRCU */ static void device_link_put_kref(struct device_link *link) { diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index b88c63fbf7fb..7f6ba2c975ed 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -388,6 +388,8 @@ static const struct usb_device_id blacklist_table[] = { /* Realtek 8822CE Bluetooth devices */ { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852AE Bluetooth devices */ { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index e15d484b6a5a..ea7ca74fc173 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) return 0; - n = 0; - len = CPER_REC_LEN - 1; + len = CPER_REC_LEN; dmi_memdev_name(mem->mem_dev_handle, &bank, &device); if (bank && device) n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); @@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) "DIMM location: not present. DMI handle: 0x%.4x ", mem->mem_dev_handle); - msg[n] = '\0'; return n; } diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c index bb042ab7c2be..e901f8564ca0 100644 --- a/drivers/firmware/efi/fdtparams.c +++ b/drivers/firmware/efi/fdtparams.c @@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm) BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name)); BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params)); + if (!fdt) + return 0; + for (i = 0; i < ARRAY_SIZE(dt_params); i++) { node = fdt_path_offset(fdt, dt_params[i].path); if (node < 0) diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c index 4e81c6077188..dd95f330fe6e 100644 --- a/drivers/firmware/efi/libstub/file.c +++ b/drivers/firmware/efi/libstub/file.c @@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len, return 0; /* Skip any leading slashes */ - while (cmdline[i] == L'/' || cmdline[i] == L'\\') + while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\')) i++; while (--result_len > 0 && i < cmdline_len) { diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c index 5737cb0fcd44..0a9aba5f9cef 100644 --- a/drivers/firmware/efi/memattr.c +++ b/drivers/firmware/efi/memattr.c @@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out) return false; } - if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) { - pr_warn("Entry attributes invalid: RO and XP bits both cleared\n"); - return false; - } - if (PAGE_SIZE > EFI_PAGE_SIZE && (!PAGE_ALIGNED(in->phys_addr) || !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index fad3b91f74f5..d39cff4a1fe3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; break; case 1: - sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; break; case 2: - sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0, - mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; break; case 3: - sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0, - mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL; + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; break; } @@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS -#define HQD_N_REGS (19+6+7+10) +#define HQD_N_REGS (19+6+7+12) *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 0350205c4897..6819fe5612d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, { struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr; - unsigned long ras_counter; if (!fpriv) return -EINVAL; @@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, if (atomic_read(&ctx->guilty)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; - /*query ue count*/ - ras_counter = amdgpu_ras_query_error_count(adev, false); - /*ras counter is monotonic increasing*/ - if (ras_counter != ctx->ras_counter_ue) { - out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; - ctx->ras_counter_ue = ras_counter; - } - - /*query ce count*/ - ras_counter = amdgpu_ras_query_error_count(adev, true); - if (ras_counter != ctx->ras_counter_ce) { - out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; - ctx->ras_counter_ce = ras_counter; - } - mutex_unlock(&mgr->lock); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 66ddfe4f58c2..57ec108b5972 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3118,7 +3118,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) */ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display) + if (amdgpu_sriov_vf(adev) || + adev->enable_virtual_display || + (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; return amdgpu_device_asic_has_dc_support(adev->asic_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 8f4a8f8d8146..39b6c6bfab45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr, int amdgpu_fru_get_product_info(struct amdgpu_device *adev) { unsigned char buff[34]; - int addrptr = 0, size = 0; + int addrptr, size; + int len; if (!is_fru_eeprom_supported(adev)) return 0; @@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* If algo exists, it means that the i2c_adapter's initialized */ if (!adev->pm.smu_i2c.algo) { DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); - return 0; + return -ENODEV; } /* There's a lot of repetition here. This is due to the FRU having @@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) size = amdgpu_fru_read_eeprom(adev, addrptr, buff); if (size < 1) { DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size); - return size; + return -EINVAL; } /* Increment the addrptr by the size of the field, and 1 due to the @@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) size = amdgpu_fru_read_eeprom(adev, addrptr, buff); if (size < 1) { DRM_ERROR("Failed to read FRU product name, ret:%d", size); - return size; + return -EINVAL; } + len = size; /* Product name should only be 32 characters. Any more, * and something could be wrong. Cap it at 32 to be safe */ - if (size > 32) { + if (len >= sizeof(adev->product_name)) { DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake"); - size = 32; + len = sizeof(adev->product_name) - 1; } /* Start at 2 due to buff using fields 0 and 1 for the address */ - memcpy(adev->product_name, &buff[2], size); - adev->product_name[size] = '\0'; + memcpy(adev->product_name, &buff[2], len); + adev->product_name[len] = '\0'; addrptr += size + 1; size = amdgpu_fru_read_eeprom(adev, addrptr, buff); if (size < 1) { DRM_ERROR("Failed to read FRU product number, ret:%d", size); - return size; + return -EINVAL; } + len = size; /* Product number should only be 16 characters. Any more, * and something could be wrong. Cap it at 16 to be safe */ - if (size > 16) { + if (len >= sizeof(adev->product_number)) { DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake"); - size = 16; + len = sizeof(adev->product_number) - 1; } - memcpy(adev->product_number, &buff[2], size); - adev->product_number[size] = '\0'; + memcpy(adev->product_number, &buff[2], len); + adev->product_number[len] = '\0'; addrptr += size + 1; size = amdgpu_fru_read_eeprom(adev, addrptr, buff); if (size < 1) { DRM_ERROR("Failed to read FRU product version, ret:%d", size); - return size; + return -EINVAL; } addrptr += size + 1; @@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) if (size < 1) { DRM_ERROR("Failed to read FRU serial number, ret:%d", size); - return size; + return -EINVAL; } + len = size; /* Serial number should only be 16 characters. Any more, * and something could be wrong. Cap it at 16 to be safe */ - if (size > 16) { + if (len >= sizeof(adev->serial)) { DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake"); - size = 16; + len = sizeof(adev->serial) - 1; } - memcpy(adev->serial, &buff[2], size); - adev->serial[size] = '\0'; + memcpy(adev->serial, &buff[2], len); + adev->serial[len] = '\0'; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 46a5328e00e0..60aa99a39a74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -76,6 +76,7 @@ struct psp_ring uint64_t ring_mem_mc_addr; void *ring_mem_handle; uint32_t ring_size; + uint32_t ring_wptr; }; /* More registers may will be supported */ diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index de5abceced0d..85967a5570cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vcn.idle_work); + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 938ef4ce5b76..46096ad7f0d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle) static int jpeg_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring; int i; + cancel_delayed_work_sync(&adev->vcn.idle_work); + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { if (adev->jpeg.harvest_config & (1 << i)) continue; - ring = &adev->jpeg.inst[i].ring_dec; if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 94be35357f7d..bd77794315bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle) static int jpeg_v3_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring; - ring = &adev->jpeg.inst->ring_dec; + cancel_delayed_work_sync(&adev->vcn.idle_work); + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 589410c32d09..02bba1f3c42e 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; if (amdgpu_sriov_vf(adev)) - data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + data = psp->km_ring.ring_wptr; else data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); @@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) if (amdgpu_sriov_vf(adev)) { WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + psp->km_ring.ring_wptr = value; } else WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index f2e725f72d2f..908664a5774b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; if (amdgpu_sriov_vf(adev)) - data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + data = psp->km_ring.ring_wptr; else data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); return data; @@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value) /* send interrupt to PSP for SRIOV ring write pointer update */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + psp->km_ring.ring_wptr = value; } else WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 2bab9c77952f..cf3803f8f075 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unpin(bo); amdgpu_bo_unreserve(bo); amdgpu_bo_unref(&bo); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 0c1beefa3e49..27b1ced145d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vcn.idle_work); + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || - RREG32_SOC15(VCN, 0, mmUVD_STATUS)) + (adev->vcn.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(VCN, 0, mmUVD_STATUS))) { vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 116b9643d5ba..8af567c546db 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_delayed_work_sync(&adev->vcn.idle_work); + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || (adev->vcn.cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, 0, mmUVD_STATUS))) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 948813d7caa0..888b17d84691 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; + cancel_delayed_work_sync(&adev->vcn.idle_work); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 14470da52113..3b23de996db2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -372,15 +372,14 @@ done: static int vcn_v3_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring; int i; + cancel_delayed_work_sync(&adev->vcn.idle_work); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - ring = &adev->vcn.inst[i].ring_dec; - if (!amdgpu_sriov_vf(adev)) { if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || (adev->vcn.cur_state != AMD_PG_STATE_GATE && diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 389eff96fcf6..652cc1a0e450 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -925,7 +925,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); } - adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); + if (!adev->dm.dc->ctx->dmub_srv) + adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); if (!adev->dm.dc->ctx->dmub_srv) { DRM_ERROR("Couldn't allocate DC DMUB server!\n"); return -ENOMEM; @@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle) amdgpu_dm_irq_suspend(adev); - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct drm_display_mode saved_mode; struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; - bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false; + bool recalculate_timing = false; + bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - recalculate_timing |= amdgpu_freesync_vid_mode && + recalculate_timing = amdgpu_freesync_vid_mode && is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); @@ -5571,11 +5572,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, mode = *freesync_mode; } else { decide_crtc_timing_for_drm_display_mode( - &mode, preferred_mode, - dm_state ? (dm_state->scaling != RMX_OFF) : false); - } + &mode, preferred_mode, scale); - preferred_refresh = drm_mode_vrefresh(preferred_mode); + preferred_refresh = drm_mode_vrefresh(preferred_mode); + } } if (recalculate_timing) @@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, * If scaling is enabled and refresh rate didn't change * we copy the vic and polarities of the old timings */ - if (!recalculate_timing || mode_refresh != preferred_refresh) + if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode( stream, &mode, &aconnector->base, con_state, NULL, requested_bpc); @@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, if (cursor_scale_w != primary_scale_w || cursor_scale_h != primary_scale_h) { - DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n"); + drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n"); return -EINVAL; } @@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state) int i; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; - struct drm_plane_state *primary_state, *overlay_state = NULL; + struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL; /* Check if primary plane is contained inside overlay */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { @@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state) if (!primary_state->crtc) return 0; + /* check if cursor plane is enabled */ + cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor); + if (IS_ERR(cursor_state)) + return PTR_ERR(cursor_state); + + if (drm_atomic_plane_disabling(plane->state, cursor_state)) + return 0; + /* Perform the bounds check to ensure the overlay plane covers the primary */ if (primary_state->crtc_x < overlay_state->crtc_x || primary_state->crtc_y < overlay_state->crtc_y || diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 527e56c353cb..8357aa3c41d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; - if (voltage_supported && dummy_pstate_supported) { + if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) { context->bw_ctx.bw.dcn.clk.p_state_change_support = false; goto restore_dml_state; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index ac13042672ea..0eaf86b5e698 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2925,6 +2925,8 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu, static int navi10_enable_mgpu_fan_boost(struct smu_context *smu) { + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; struct amdgpu_device *adev = smu->adev; uint32_t param = 0; @@ -2932,6 +2934,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu) if (adev->asic_type == CHIP_NAVI12) return 0; + /* + * Skip the MGpuFanBoost setting for those ASICs + * which do not support it + */ + if (!smc_pptable->MGpuFanBoostLimitRpm) + return 0; + /* Workaround for WS SKU */ if (adev->pdev->device == 0x7312 && adev->pdev->revision == 0) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index d2fd44b903ca..b124a5e40dd6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3027,6 +3027,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) { + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + + /* + * Skip the MGpuFanBoost setting for those ASICs + * which do not support it + */ + if (!smc_pptable->MGpuFanBoostLimitRpm) + return 0; + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetMGpuFanBoostLimitRpm, 0, diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 93f4d059fc89..1e1cb245fca7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -20,7 +20,6 @@ config DRM_I915 select INPUT if ACPI select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI - select IO_MAPPING select SYNC_FILE select IOSF_MBI select CRC32 diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 02a003fd48fb..50cae0198a3d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -128,49 +128,13 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable) return drm_dp_dpcd_write(&intel_dp->aux, DP_PHY_REPEATER_MODE, &val, 1) == 1; } -/** - * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode - * @intel_dp: Intel DP struct - * - * Read the LTTPR common and DPRX capabilities and switch to non-transparent - * link training mode if any is detected and read the PHY capabilities for all - * detected LTTPRs. In case of an LTTPR detection error or if the number of - * LTTPRs is more than is supported (8), fall back to the no-LTTPR, - * transparent mode link training mode. - * - * Returns: - * >0 if LTTPRs were detected and the non-transparent LT mode was set. The - * DPRX capabilities are read out. - * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a - * detection failure and the transparent LT mode was set. The DPRX - * capabilities are read out. - * <0 Reading out the DPRX capabilities failed. - */ -int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) +static int intel_dp_init_lttpr(struct intel_dp *intel_dp) { int lttpr_count; - bool ret; int i; - ret = intel_dp_read_lttpr_common_caps(intel_dp); - - /* The DPTX shall read the DPRX caps after LTTPR detection. */ - if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) { - intel_dp_reset_lttpr_common_caps(intel_dp); - return -EIO; - } - - if (!ret) - return 0; - - /* - * The 0xF0000-0xF02FF range is only valid if the DPCD revision is - * at least 1.4. - */ - if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) { - intel_dp_reset_lttpr_common_caps(intel_dp); + if (!intel_dp_read_lttpr_common_caps(intel_dp)) return 0; - } lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps); /* @@ -211,6 +175,37 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) return lttpr_count; } + +/** + * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode + * @intel_dp: Intel DP struct + * + * Read the LTTPR common and DPRX capabilities and switch to non-transparent + * link training mode if any is detected and read the PHY capabilities for all + * detected LTTPRs. In case of an LTTPR detection error or if the number of + * LTTPRs is more than is supported (8), fall back to the no-LTTPR, + * transparent mode link training mode. + * + * Returns: + * >0 if LTTPRs were detected and the non-transparent LT mode was set. The + * DPRX capabilities are read out. + * 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a + * detection failure and the transparent LT mode was set. The DPRX + * capabilities are read out. + * <0 Reading out the DPRX capabilities failed. + */ +int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp) +{ + int lttpr_count = intel_dp_init_lttpr(intel_dp); + + /* The DPTX shall read the DPRX caps after LTTPR detection. */ + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) { + intel_dp_reset_lttpr_common_caps(intel_dp); + return -EIO; + } + + return lttpr_count; +} EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps); static u8 dp_voltage_max(u8 preemph) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index f6fe5cb01438..8598a1c78a4c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -367,10 +367,11 @@ retry: goto err_unpin; /* Finally, remap it using the new GTT offset */ - ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start + - (vma->ggtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start)); + ret = remap_io_mapping(area, + area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), + (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, + min_t(u64, vma->size, area->vm_end - area->vm_start), + &ggtt->iomap); if (ret) goto err_fence; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9ec9277539ec..69e43bf91a15 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* i915_mm.c */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct scatterlist *sgl, resource_size_t iobase); diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 9a777b0ff59b..666808cb3a32 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -37,6 +37,17 @@ struct remap_pfn { resource_size_t iobase; }; +static int remap_pfn(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + + return 0; +} + #define use_dma(io) ((io) != -1) static inline unsigned long sgt_pfn(const struct remap_pfn *r) @@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data) return 0; } +/** + * remap_io_mapping - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * @iomap: the source io_mapping + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + struct remap_pfn r; + int err; + #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) + GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + r.mm = vma->vm_mm; + r.pfn = pfn; + r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); + + err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); + if (unlikely(err)) { + zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); + return err; + } + + return 0; +} /** * remap_io_sg - remap an IO mapping to userspace diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index ee8e753d98ce..eae0abd614cb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg) for (n = 0; n < smoke[0].ncontexts; n++) { smoke[0].contexts[n] = live_context(i915, file); - if (!smoke[0].contexts[n]) { - ret = -ENOMEM; + if (IS_ERR(smoke[0].contexts[n])) { + ret = PTR_ERR(smoke[0].contexts[n]); goto out_contexts; } } diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 453d8b4c5763..07fcd12dca16 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -485,11 +485,12 @@ static int meson_probe_remote(struct platform_device *pdev, static void meson_drv_shutdown(struct platform_device *pdev) { struct meson_drm *priv = dev_get_drvdata(&pdev->dev); - struct drm_device *drm = priv->drm; - DRM_DEBUG_DRIVER("\n"); - drm_kms_helper_poll_fini(drm); - drm_atomic_helper_shutdown(drm); + if (!priv) + return; + + drm_kms_helper_poll_fini(priv->drm); + drm_atomic_helper_shutdown(priv->drm); } static int meson_drv_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 87df251c1fcf..0cb868065348 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -25,7 +25,7 @@ #include "trace.h" /* XXX move to include/uapi/drm/drm_fourcc.h? */ -#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22) +#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22) struct reset_control; diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 79bff8b48271..bfae8a02f55b 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, * dGPU sector layout. */ if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU) - base |= BIT(39); + base |= BIT_ULL(39); #endif tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 7b88261f57bb..0ea320c1092b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) { dev_err(sor->dev, "failed to acquire SOR reset: %d\n", err); - return err; + goto rpm_put; } err = reset_control_assert(sor->rst); if (err < 0) { dev_err(sor->dev, "failed to assert SOR reset: %d\n", err); - return err; + goto rpm_put; } } err = clk_prepare_enable(sor->clk); if (err < 0) { dev_err(sor->dev, "failed to enable clock: %d\n", err); - return err; + goto rpm_put; } usleep_range(1000, 3000); @@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client) dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); clk_disable_unprepare(sor->clk); - return err; + goto rpm_put; } reset_control_release(sor->rst); @@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client) } return 0; + +rpm_put: + if (sor->rst) + pm_runtime_put(sor->dev); + + return err; } static int tegra_sor_exit(struct host1x_client *client) @@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->aux) return -EPROBE_DEFER; - if (get_device(&sor->aux->ddc.dev)) { - if (try_module_get(sor->aux->ddc.owner)) - sor->output.ddc = &sor->aux->ddc; - else - put_device(&sor->aux->ddc.dev); - } + if (get_device(sor->aux->dev)) + sor->output.ddc = &sor->aux->ddc; } if (!sor->aux) { @@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev) err = tegra_sor_parse_dt(sor); if (err < 0) - return err; + goto put_aux; err = tegra_output_probe(&sor->output); - if (err < 0) - return dev_err_probe(&pdev->dev, err, - "failed to probe output\n"); + if (err < 0) { + dev_err_probe(&pdev->dev, err, "failed to probe output\n"); + goto put_aux; + } if (sor->ops && sor->ops->probe) { err = sor->ops->probe(sor); @@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sor); pm_runtime_enable(&pdev->dev); - INIT_LIST_HEAD(&sor->client.list); + host1x_client_init(&sor->client); sor->client.ops = &sor_client_ops; sor->client.dev = &pdev->dev; - err = host1x_client_register(&sor->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to register host1x client: %d\n", - err); - goto rpm_disable; - } - /* * On Tegra210 and earlier, provide our own implementation for the * pad output clock. @@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev) sor->index); if (!name) { err = -ENOMEM; - goto unregister; + goto uninit; } err = host1x_client_resume(&sor->client); if (err < 0) { dev_err(sor->dev, "failed to resume: %d\n", err); - goto unregister; + goto uninit; } sor->clk_pad = tegra_clk_sor_pad_register(sor, name); @@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev) err = PTR_ERR(sor->clk_pad); dev_err(sor->dev, "failed to register SOR pad clock: %d\n", err); - goto unregister; + goto uninit; + } + + err = __host1x_client_register(&sor->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + goto uninit; } return 0; -unregister: - host1x_client_unregister(&sor->client); -rpm_disable: +uninit: + host1x_client_exit(&sor->client); pm_runtime_disable(&pdev->dev); remove: + if (sor->aux) + sor->output.ddc = NULL; + tegra_output_remove(&sor->output); +put_aux: + if (sor->aux) + put_device(sor->aux->dev); + return err; } @@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); + if (sor->aux) { + put_device(sor->aux->dev); + sor->output.ddc = NULL; + } + tegra_output_remove(&sor->output); return 0; diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index 510e3e001dab..a1dcf7d55c90 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -145,7 +145,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, list_for_each_entry(bo, &man->lru[j], lru) { uint32_t num_pages; - if (!bo->ttm || + if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) || bo->ttm->page_flags & TTM_PAGE_FLAG_SG || bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) continue; diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 46f69c532b6b..218e3718fd68 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -736,6 +736,29 @@ void host1x_driver_unregister(struct host1x_driver *driver) EXPORT_SYMBOL(host1x_driver_unregister); /** + * __host1x_client_init() - initialize a host1x client + * @client: host1x client + * @key: lock class key for the client-specific mutex + */ +void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key) +{ + INIT_LIST_HEAD(&client->list); + __mutex_init(&client->lock, "host1x client lock", key); + client->usecount = 0; +} +EXPORT_SYMBOL(__host1x_client_init); + +/** + * host1x_client_exit() - uninitialize a host1x client + * @client: host1x client + */ +void host1x_client_exit(struct host1x_client *client) +{ + mutex_destroy(&client->lock); +} +EXPORT_SYMBOL(host1x_client_exit); + +/** * __host1x_client_register() - register a host1x client * @client: host1x client * @key: lock class key for the client-specific mutex @@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister); * device and call host1x_device_init(), which will in turn call each client's * &host1x_client_ops.init implementation. */ -int __host1x_client_register(struct host1x_client *client, - struct lock_class_key *key) +int __host1x_client_register(struct host1x_client *client) { struct host1x *host1x; int err; - INIT_LIST_HEAD(&client->list); - __mutex_init(&client->lock, "host1x client lock", key); - client->usecount = 0; - mutex_lock(&devices_lock); list_for_each_entry(host1x, &devices, list) { diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 4bf263c2d61a..160554903ef9 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -93,11 +93,11 @@ menu "Special HID drivers" depends on HID config HID_A4TECH - tristate "A4 tech mice" + tristate "A4TECH mice" depends on HID default !EXPERT help - Support for A4 tech X5 and WOP-35 / Trust 450L mice. + Support for some A4TECH mice with two scroll wheels. config HID_ACCUTOUCH tristate "Accutouch touch device" @@ -922,6 +922,21 @@ config HID_SAMSUNG help Support for Samsung InfraRed remote control or keyboards. +config HID_SEMITEK + tristate "Semitek USB keyboards" + depends on HID + help + Support for Semitek USB keyboards that are not fully compliant + with the HID standard. + + There are many variants, including: + - GK61, GK64, GK68, GK84, GK96, etc. + - SK61, SK64, SK68, SK84, SK96, etc. + - Dierya DK61/DK66 + - Tronsmart TK09R + - Woo-dy + - X-Bows Nature/Knight + config HID_SONY tristate "Sony PS2/3/4 accessories" depends on USB_HID diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index 193431ec4db8..1ea1a7c0b20f 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -106,6 +106,7 @@ obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \ obj-$(CONFIG_HID_RMI) += hid-rmi.o obj-$(CONFIG_HID_SAITEK) += hid-saitek.o obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o +obj-$(CONFIG_HID_SEMITEK) += hid-semitek.o obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o obj-$(CONFIG_HID_SONY) += hid-sony.o obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c index 2ab38b715347..3589d9945da1 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c @@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work) sensor_index = req_node->sensor_idx; report_id = req_node->report_id; node_type = req_node->report_type; + kfree(req_node); if (node_type == HID_FEATURE_REPORT) { report_size = get_feature_report(sensor_index, report_id, @@ -142,7 +143,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) int rc, i; dev = &privdata->pdev->dev; - cl_data = kzalloc(sizeof(*cl_data), GFP_KERNEL); + cl_data = devm_kzalloc(dev, sizeof(*cl_data), GFP_KERNEL); if (!cl_data) return -ENOMEM; @@ -175,12 +176,12 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) rc = -EINVAL; goto cleanup; } - cl_data->feature_report[i] = kzalloc(feature_report_size, GFP_KERNEL); + cl_data->feature_report[i] = devm_kzalloc(dev, feature_report_size, GFP_KERNEL); if (!cl_data->feature_report[i]) { rc = -ENOMEM; goto cleanup; } - cl_data->input_report[i] = kzalloc(input_report_size, GFP_KERNEL); + cl_data->input_report[i] = devm_kzalloc(dev, input_report_size, GFP_KERNEL); if (!cl_data->input_report[i]) { rc = -ENOMEM; goto cleanup; @@ -189,7 +190,8 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata) info.sensor_idx = cl_idx; info.dma_address = cl_data->sensor_dma_addr[i]; - cl_data->report_descr[i] = kzalloc(cl_data->report_descr_sz[i], GFP_KERNEL); + cl_data->report_descr[i] = + devm_kzalloc(dev, cl_data->report_descr_sz[i], GFP_KERNEL); if (!cl_data->report_descr[i]) { rc = -ENOMEM; goto cleanup; @@ -214,11 +216,11 @@ cleanup: cl_data->sensor_virt_addr[i], cl_data->sensor_dma_addr[i]); } - kfree(cl_data->feature_report[i]); - kfree(cl_data->input_report[i]); - kfree(cl_data->report_descr[i]); + devm_kfree(dev, cl_data->feature_report[i]); + devm_kfree(dev, cl_data->input_report[i]); + devm_kfree(dev, cl_data->report_descr[i]); } - kfree(cl_data); + devm_kfree(dev, cl_data); return rc; } @@ -241,6 +243,5 @@ int amd_sfh_hid_client_deinit(struct amd_mp2_dev *privdata) cl_data->sensor_dma_addr[i]); } } - kfree(cl_data); return 0; } diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c index 4f989483aa03..5ad1e7acd294 100644 --- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c +++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c @@ -162,9 +162,6 @@ void amdtp_hid_remove(struct amdtp_cl_data *cli_data) int i; for (i = 0; i < cli_data->num_hid_devices; ++i) { - kfree(cli_data->feature_report[i]); - kfree(cli_data->input_report[i]); - kfree(cli_data->report_descr[i]); if (cli_data->hid_sensor_hubs[i]) { kfree(cli_data->hid_sensor_hubs[i]->driver_data); hid_destroy_device(cli_data->hid_sensor_hubs[i]); diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c index 3a8c4a5971f7..2cbc32dda7f7 100644 --- a/drivers/hid/hid-a4tech.c +++ b/drivers/hid/hid-a4tech.c @@ -147,6 +147,8 @@ static const struct hid_device_id a4_devices[] = { .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649), .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95), + .driver_data = A4_2WHEEL_MOUSE_HACK_B8 }, { } }; MODULE_DEVICE_TABLE(hid, a4_devices); diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 2ab22b925941..fca8fc78a78a 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -79,10 +79,9 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad"); #define QUIRK_T100_KEYBOARD BIT(6) #define QUIRK_T100CHI BIT(7) #define QUIRK_G752_KEYBOARD BIT(8) -#define QUIRK_T101HA_DOCK BIT(9) -#define QUIRK_T90CHI BIT(10) -#define QUIRK_MEDION_E1239T BIT(11) -#define QUIRK_ROG_NKEY_KEYBOARD BIT(12) +#define QUIRK_T90CHI BIT(9) +#define QUIRK_MEDION_E1239T BIT(10) +#define QUIRK_ROG_NKEY_KEYBOARD BIT(11) #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ QUIRK_NO_INIT_REPORTS | \ @@ -335,7 +334,7 @@ static int asus_raw_event(struct hid_device *hdev, if (drvdata->quirks & QUIRK_MEDION_E1239T) return asus_e1239t_event(drvdata, data, size); - if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) { + if (drvdata->quirks & QUIRK_USE_KBD_BACKLIGHT) { /* * Skip these report ID, the device emits a continuous stream associated * with the AURA mode it is in which looks like an 'echo'. @@ -355,6 +354,16 @@ static int asus_raw_event(struct hid_device *hdev, return -1; } } + if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD) { + /* + * G713 and G733 send these codes on some keypresses, depending on + * the key pressed it can trigger a shutdown event if not caught. + */ + if(data[0] == 0x02 && data[1] == 0x30) { + return -1; + } + } + } return 0; @@ -1072,11 +1081,6 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id) return ret; } - /* use hid-multitouch for T101HA touchpad */ - if (id->driver_data & QUIRK_T101HA_DOCK && - hdev->collection->usage == HID_GD_MOUSE) - return -ENODEV; - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "Asus hw start failed: %d\n", ret); @@ -1230,8 +1234,6 @@ static const struct hid_device_id asus_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD), QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, - { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, - USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) }, { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) }, @@ -1239,6 +1241,12 @@ static const struct hid_device_id asus_devices[] = { USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD), QUIRK_T100CHI }, { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE_MEDION_E1239T), QUIRK_MEDION_E1239T }, + /* + * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard + * part, while letting hid-multitouch.c handle the touchpad. + */ + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) }, { } }; MODULE_DEVICE_TABLE(hid, asus_devices); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 0ae9f6df59d1..0de2788b9814 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2005,6 +2005,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask) case BUS_I2C: bus = "I2C"; break; + case BUS_VIRTUAL: + bus = "VIRTUAL"; + break; default: bus = "<UNKNOWN>"; } @@ -2588,7 +2591,6 @@ int hid_check_keys_pressed(struct hid_device *hid) return 0; } - EXPORT_SYMBOL_GPL(hid_check_keys_pressed); static int __init hid_init(void) diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 59f8d716d78f..a311fb87b02a 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -930,6 +930,9 @@ static const char *keys[KEY_MAX + 1] = { [KEY_APPSELECT] = "AppSelect", [KEY_SCREENSAVER] = "ScreenSaver", [KEY_VOICECOMMAND] = "VoiceCommand", + [KEY_ASSISTANT] = "Assistant", + [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext", + [KEY_EMOJI_PICKER] = "EmojiPicker", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c index a5751607ce24..f43a8406cb9a 100644 --- a/drivers/hid/hid-ft260.c +++ b/drivers/hid/hid-ft260.c @@ -201,7 +201,7 @@ struct ft260_i2c_write_request_report { u8 address; /* 7-bit I2C address */ u8 flag; /* I2C transaction condition */ u8 length; /* data payload length */ - u8 data[60]; /* data payload */ + u8 data[FT260_WR_DATA_MAX]; /* data payload */ } __packed; struct ft260_i2c_read_request_report { @@ -249,7 +249,10 @@ static int ft260_hid_feature_report_get(struct hid_device *hdev, ret = hid_hw_raw_request(hdev, report_id, buf, len, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); - memcpy(data, buf, len); + if (likely(ret == len)) + memcpy(data, buf, len); + else if (ret >= 0) + ret = -EIO; kfree(buf); return ret; } @@ -298,7 +301,7 @@ static int ft260_xfer_status(struct ft260_device *dev) ret = ft260_hid_feature_report_get(hdev, FT260_I2C_STATUS, (u8 *)&report, sizeof(report)); - if (ret < 0) { + if (unlikely(ret < 0)) { hid_err(hdev, "failed to retrieve status: %d\n", ret); return ret; } @@ -429,6 +432,9 @@ static int ft260_smbus_write(struct ft260_device *dev, u8 addr, u8 cmd, struct ft260_i2c_write_request_report *rep = (struct ft260_i2c_write_request_report *)dev->write_buf; + if (data_len >= sizeof(rep->data)) + return -EINVAL; + rep->address = addr; rep->data[0] = cmd; rep->length = data_len + 1; @@ -721,10 +727,9 @@ static int ft260_get_system_config(struct hid_device *hdev, ret = ft260_hid_feature_report_get(hdev, FT260_SYSTEM_SETTINGS, (u8 *)cfg, len); - if (ret != len) { + if (ret < 0) { hid_err(hdev, "failed to retrieve system status\n"); - if (ret >= 0) - return -EIO; + return ret; } return 0; } @@ -777,8 +782,8 @@ static int ft260_byte_show(struct hid_device *hdev, int id, u8 *cfg, int len, int ret; ret = ft260_hid_feature_report_get(hdev, id, cfg, len); - if (ret != len && ret >= 0) - return -EIO; + if (ret < 0) + return ret; return scnprintf(buf, PAGE_SIZE, "%hi\n", *field); } @@ -789,8 +794,8 @@ static int ft260_word_show(struct hid_device *hdev, int id, u8 *cfg, int len, int ret; ret = ft260_hid_feature_report_get(hdev, id, cfg, len); - if (ret != len && ret >= 0) - return -EIO; + if (ret < 0) + return ret; return scnprintf(buf, PAGE_SIZE, "%hi\n", le16_to_cpu(*field)); } @@ -941,10 +946,8 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id) ret = ft260_hid_feature_report_get(hdev, FT260_CHIP_VERSION, (u8 *)&version, sizeof(version)); - if (ret != sizeof(version)) { + if (ret < 0) { hid_err(hdev, "failed to retrieve chip version\n"); - if (ret >= 0) - ret = -EIO; goto err_hid_close; } diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c index 898871c8c768..29ccb0accfba 100644 --- a/drivers/hid/hid-gt683r.c +++ b/drivers/hid/hid-gt683r.c @@ -54,6 +54,7 @@ static const struct hid_device_id gt683r_led_id[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, { } }; +MODULE_DEVICE_TABLE(hid, gt683r_led_id); static void gt683r_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 84b8da3e7d09..b84a0a11e05b 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -26,6 +26,7 @@ #define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006 #define USB_DEVICE_ID_A4TECH_X5_005D 0x000a #define USB_DEVICE_ID_A4TECH_RP_649 0x001a +#define USB_DEVICE_ID_A4TECH_NB_95 0x022b #define USB_VENDOR_ID_AASHIMA 0x06d6 #define USB_DEVICE_ID_AASHIMA_GAMEPAD 0x0025 @@ -299,8 +300,6 @@ #define USB_VENDOR_ID_CORSAIR 0x1b1c #define USB_DEVICE_ID_CORSAIR_K90 0x1b02 - -#define USB_VENDOR_ID_CORSAIR 0x1b1c #define USB_DEVICE_ID_CORSAIR_K70R 0x1b09 #define USB_DEVICE_ID_CORSAIR_K95RGB 0x1b11 #define USB_DEVICE_ID_CORSAIR_M65RGB 0x1b12 @@ -751,6 +750,7 @@ #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 +#define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E 0x602e @@ -1051,6 +1051,7 @@ #define USB_DEVICE_ID_SAITEK_X52 0x075c #define USB_DEVICE_ID_SAITEK_X52_2 0x0255 #define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762 +#define USB_DEVICE_ID_SAITEK_X65 0x0b6a #define USB_VENDOR_ID_SAMSUNG 0x0419 #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 @@ -1060,6 +1061,9 @@ #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD 0x0023 #define USB_DEVICE_ID_SEMICO_USB_KEYKOARD2 0x0027 +#define USB_VENDOR_ID_SEMITEK 0x1ea7 +#define USB_DEVICE_ID_SEMITEK_KEYBOARD 0x0907 + #define USB_VENDOR_ID_SENNHEISER 0x1395 #define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c @@ -1161,6 +1165,7 @@ #define USB_DEVICE_ID_SYNAPTICS_DELL_K12A 0x2819 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012 0x2968 #define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 +#define USB_DEVICE_ID_SYNAPTICS_DELL_K15A 0x6e21 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1002 0x73f4 #define USB_DEVICE_ID_SYNAPTICS_ACER_ONE_S1003 0x73f5 #define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 18f5e28d475c..abbfa91e73e4 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -964,6 +964,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel case 0x0cd: map_key_clear(KEY_PLAYPAUSE); break; case 0x0cf: map_key_clear(KEY_VOICECOMMAND); break; + + case 0x0d9: map_key_clear(KEY_EMOJI_PICKER); break; + case 0x0e0: map_abs_clear(ABS_VOLUME); break; case 0x0e2: map_key_clear(KEY_MUTE); break; case 0x0e5: map_key_clear(KEY_BASSBOOST); break; diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index d598094dadd0..fee4e54a3ce0 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -1263,6 +1263,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, int status; long flags = (long) data[2]; + *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; if (flags & 0x80) switch (flags & 0x07) { diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 2bb473d8c424..8bcaee4ccae0 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -693,7 +693,7 @@ static int magicmouse_probe(struct hid_device *hdev, if (id->vendor == USB_VENDOR_ID_APPLE && id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && hdev->type != HID_TYPE_USBMOUSE) - return 0; + return -ENODEV; msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); if (msc == NULL) { @@ -779,7 +779,10 @@ err_stop_hw: static void magicmouse_remove(struct hid_device *hdev) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); - cancel_delayed_work_sync(&msc->work); + + if (msc) + cancel_delayed_work_sync(&msc->work); + hid_hw_stop(hdev); } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 9d9f3e1bd5f4..2e4fb76c45f3 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -70,6 +70,7 @@ MODULE_LICENSE("GPL"); #define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18) #define MT_QUIRK_SEPARATE_APP_REPORT BIT(19) #define MT_QUIRK_FORCE_MULTI_INPUT BIT(20) +#define MT_QUIRK_DISABLE_WAKEUP BIT(21) #define MT_INPUTMODE_TOUCHSCREEN 0x02 #define MT_INPUTMODE_TOUCHPAD 0x03 @@ -191,6 +192,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app); #define MT_CLS_EXPORT_ALL_INPUTS 0x0013 /* reserved 0x0014 */ #define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015 +#define MT_CLS_WIN_8_DISABLE_WAKEUP 0x0016 /* vendor specific classes */ #define MT_CLS_3M 0x0101 @@ -283,6 +285,15 @@ static const struct mt_class mt_classes[] = { MT_QUIRK_WIN8_PTP_BUTTONS | MT_QUIRK_FORCE_MULTI_INPUT, .export_all_inputs = true }, + { .name = MT_CLS_WIN_8_DISABLE_WAKEUP, + .quirks = MT_QUIRK_ALWAYS_VALID | + MT_QUIRK_IGNORE_DUPLICATES | + MT_QUIRK_HOVERING | + MT_QUIRK_CONTACT_CNT_ACCURATE | + MT_QUIRK_STICKY_FINGERS | + MT_QUIRK_WIN8_PTP_BUTTONS | + MT_QUIRK_DISABLE_WAKEUP, + .export_all_inputs = true }, /* * vendor specific classes @@ -604,9 +615,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td, if (!(HID_MAIN_ITEM_VARIABLE & field->flags)) continue; - for (n = 0; n < field->report_count; n++) { - if (field->usage[n].hid == HID_DG_CONTACTID) - rdata->is_mt_collection = true; + if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) { + for (n = 0; n < field->report_count; n++) { + if (field->usage[n].hid == HID_DG_CONTACTID) { + rdata->is_mt_collection = true; + break; + } + } } } @@ -759,7 +774,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, return 1; case HID_DG_CONFIDENCE: if ((cls->name == MT_CLS_WIN_8 || - cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) && + cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT || + cls->name == MT_CLS_WIN_8_DISABLE_WAKEUP) && (field->application == HID_DG_TOUCHPAD || field->application == HID_DG_TOUCHSCREEN)) app->quirks |= MT_QUIRK_CONFIDENCE; @@ -1576,13 +1592,13 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) /* we do not set suffix = "Touchscreen" */ hi->input->name = hdev->name; break; - case HID_DG_STYLUS: - /* force BTN_STYLUS to allow tablet matching in udev */ - __set_bit(BTN_STYLUS, hi->input->keybit); - break; case HID_VD_ASUS_CUSTOM_MEDIA_KEYS: suffix = "Custom Media Keys"; break; + case HID_DG_STYLUS: + /* force BTN_STYLUS to allow tablet matching in udev */ + __set_bit(BTN_STYLUS, hi->input->keybit); + fallthrough; case HID_DG_PEN: suffix = "Stylus"; break; @@ -1749,8 +1765,14 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) #ifdef CONFIG_PM static int mt_suspend(struct hid_device *hdev, pm_message_t state) { + struct mt_device *td = hid_get_drvdata(hdev); + /* High latency is desirable for power savings during S3/S0ix */ - mt_set_modes(hdev, HID_LATENCY_HIGH, true, true); + if (td->mtclass.quirks & MT_QUIRK_DISABLE_WAKEUP) + mt_set_modes(hdev, HID_LATENCY_HIGH, false, false); + else + mt_set_modes(hdev, HID_LATENCY_HIGH, true, true); + return 0; } @@ -1809,6 +1831,12 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_ANTON, USB_DEVICE_ID_ANTON_TOUCH_PAD) }, + /* Asus T101HA */ + { .driver_data = MT_CLS_WIN_8_DISABLE_WAKEUP, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_ASUSTEK, + USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) }, + /* Asus T304UA */ { .driver_data = MT_CLS_ASUS, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 3dd6f15f2a67..51b39bda9a9d 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -110,6 +110,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_602E), HID_QUIRK_ALWAYS_POLL }, @@ -158,6 +159,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X65), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET }, @@ -176,6 +178,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K12A), HID_QUIRK_NO_INIT_REPORTS }, + { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DELL_K15A), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_TPV, USB_DEVICE_ID_TPV_OPTICAL_TOUCHSCREEN_8882), HID_QUIRK_NOGET }, @@ -211,6 +214,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, + { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_NB_95) }, #endif #if IS_ENABLED(CONFIG_HID_ACCUTOUCH) { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, diff --git a/drivers/hid/hid-semitek.c b/drivers/hid/hid-semitek.c new file mode 100644 index 000000000000..ba6607d5e051 --- /dev/null +++ b/drivers/hid/hid-semitek.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * HID driver for Semitek keyboards + * + * Copyright (c) 2021 Benjamin Moody + */ + +#include <linux/device.h> +#include <linux/hid.h> +#include <linux/module.h> + +#include "hid-ids.h" + +static __u8 *semitek_report_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + /* In the report descriptor for interface 2, fix the incorrect + description of report ID 0x04 (the report contains a + bitmask, not an array of keycodes.) */ + if (*rsize == 0xcb && rdesc[0x83] == 0x81 && rdesc[0x84] == 0x00) { + hid_info(hdev, "fixing up Semitek report descriptor\n"); + rdesc[0x84] = 0x02; + } + return rdesc; +} + +static const struct hid_device_id semitek_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_SEMITEK, USB_DEVICE_ID_SEMITEK_KEYBOARD) }, + { } +}; +MODULE_DEVICE_TABLE(hid, semitek_devices); + +static struct hid_driver semitek_driver = { + .name = "semitek", + .id_table = semitek_devices, + .report_fixup = semitek_report_fixup, +}; +module_hid_driver(semitek_driver); + +MODULE_LICENSE("GPL"); diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c index 2e6662173a79..32c2306e240d 100644 --- a/drivers/hid/hid-sensor-custom.c +++ b/drivers/hid/hid-sensor-custom.c @@ -387,7 +387,7 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr, struct hid_sensor_custom *sensor_inst = dev_get_drvdata(dev); int index, field_index, usage; char name[HID_CUSTOM_NAME_LENGTH]; - int value; + int value, ret; if (sscanf(attr->attr.name, "feature-%x-%x-%s", &index, &usage, name) == 3) { @@ -403,8 +403,10 @@ static ssize_t store_value(struct device *dev, struct device_attribute *attr, report_id = sensor_inst->fields[field_index].attribute. report_id; - sensor_hub_set_feature(sensor_inst->hsdev, report_id, - index, sizeof(value), &value); + ret = sensor_hub_set_feature(sensor_inst->hsdev, report_id, + index, sizeof(value), &value); + if (ret) + return ret; } else return -EINVAL; diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 95cf88f3bafb..6abd3e2a9094 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -209,16 +209,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, buffer_size = buffer_size / sizeof(__s32); if (buffer_size) { for (i = 0; i < buffer_size; ++i) { - hid_set_field(report->field[field_index], i, - (__force __s32)cpu_to_le32(*buf32)); + ret = hid_set_field(report->field[field_index], i, + (__force __s32)cpu_to_le32(*buf32)); + if (ret) + goto done_proc; + ++buf32; } } if (remaining_bytes) { value = 0; memcpy(&value, (u8 *)buf32, remaining_bytes); - hid_set_field(report->field[field_index], i, - (__force __s32)cpu_to_le32(value)); + ret = hid_set_field(report->field[field_index], i, + (__force __s32)cpu_to_le32(value)); + if (ret) + goto done_proc; } hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT); hid_hw_wait(hsdev->hdev); diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index 2e452c6e8ef4..f643b1cb112d 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -312,7 +312,7 @@ static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_i } tm_wheel->change_request = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); - if (!tm_wheel->model_request) { + if (!tm_wheel->change_request) { ret = -ENOMEM; goto error5; } diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 9993133989a5..46474612e73c 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -45,6 +45,7 @@ #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) +#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7) /* flags */ @@ -178,6 +179,11 @@ static const struct i2c_hid_quirks { I2C_HID_QUIRK_RESET_ON_RESUME }, { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, I2C_HID_QUIRK_BAD_INPUT_SIZE }, + /* + * Sending the wakeup after reset actually break ELAN touchscreen controller + */ + { USB_VENDOR_ID_ELAN, HID_ANY_ID, + I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET }, { 0, 0 } }; @@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client) } /* At least some SIS devices need this after reset */ - ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); + if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET)) + ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); out_unlock: mutex_unlock(&ihid->reset_lock); @@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); hid->product = le16_to_cpu(ihid->hdesc.wProductID); - snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", - client->name, hid->vendor, hid->product); + snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", + client->name, (u16)hid->vendor, (u16)hid->product); strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys)); ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product); diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h index 21b87e4003af..07e3cbc86bef 100644 --- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h +++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h @@ -28,6 +28,8 @@ #define EHL_Ax_DEVICE_ID 0x4BB3 #define TGL_LP_DEVICE_ID 0xA0FC #define TGL_H_DEVICE_ID 0x43FC +#define ADL_S_DEVICE_ID 0x7AF8 +#define ADL_P_DEVICE_ID 0x51FC #define REVISION_ID_CHT_A0 0x6 #define REVISION_ID_CHT_Ax_SI 0x0 diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index 06081cf9b85a..a6d5173ac003 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -39,6 +39,8 @@ static const struct pci_device_id ish_pci_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_LP_DEVICE_ID)}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, TGL_H_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_S_DEVICE_ID)}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)}, {0, } }; MODULE_DEVICE_TABLE(pci, ish_pci_tbl); diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c index 7b27ec392232..5571e74abe91 100644 --- a/drivers/hid/surface-hid/surface_hid_core.c +++ b/drivers/hid/surface-hid/surface_hid_core.c @@ -168,9 +168,9 @@ int surface_hid_device_add(struct surface_hid_device *shid) shid->hid->dev.parent = shid->dev; shid->hid->bus = BUS_HOST; - shid->hid->vendor = cpu_to_le16(shid->attrs.vendor); - shid->hid->product = cpu_to_le16(shid->attrs.product); - shid->hid->version = cpu_to_le16(shid->hid_desc.hid_version); + shid->hid->vendor = get_unaligned_le16(&shid->attrs.vendor); + shid->hid->product = get_unaligned_le16(&shid->attrs.product); + shid->hid->version = get_unaligned_le16(&shid->hid_desc.hid_version); shid->hid->country = shid->hid_desc.country_code; snprintf(shid->hid->name, sizeof(shid->hid->name), "Microsoft Surface %04X:%04X", diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 86257ce6d619..4e9077363c96 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -374,7 +374,7 @@ static int hid_submit_ctrl(struct hid_device *hid) raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report; dir = usbhid->ctrl[usbhid->ctrltail].dir; - len = ((report->size - 1) >> 3) + 1 + (report->id > 0); + len = hid_report_len(report); if (dir == USB_DIR_OUT) { usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0); usbhid->urbctrl->transfer_buffer_length = len; diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c index ea126c50acc3..3b4ee21cd811 100644 --- a/drivers/hid/usbhid/hid-pidff.c +++ b/drivers/hid/usbhid/hid-pidff.c @@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid) if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { + error = -EPERM; hid_notice(hid, "device does not support device managed pool\n"); goto fail; diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 2970892bed82..f2221ca0aa7b 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = { static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, int index) { - if (disallow_fan_support && index >= 8) + if (disallow_fan_support && index >= 20) return 0; if (disallow_fan_type_call && - (index == 9 || index == 12 || index == 15)) + (index == 21 || index == 25 || index == 28)) return 0; if (index >= 0 && index <= 1 && !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c index e24842475254..aec294cc72d1 100644 --- a/drivers/hwmon/pmbus/fsp-3y.c +++ b/drivers/hwmon/pmbus/fsp-3y.c @@ -37,6 +37,8 @@ struct fsp3y_data { struct pmbus_driver_info info; int chip; int page; + + bool vout_linear_11; }; #define to_fsp3y_data(x) container_of(x, struct fsp3y_data, info) @@ -108,11 +110,9 @@ static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg) int rv; /* - * YH5151-E outputs vout in linear11. The conversion is done when - * reading. Here, we have to inject pmbus_core with the correct - * exponent (it is -6). + * Inject an exponent for non-compliant YH5151-E. */ - if (data->chip == yh5151e && reg == PMBUS_VOUT_MODE) + if (data->vout_linear_11 && reg == PMBUS_VOUT_MODE) return 0x1A; rv = set_page(client, page); @@ -161,10 +161,9 @@ static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase, return rv; /* - * YH-5151E is non-compliant and outputs output voltages in linear11 - * instead of linear16. + * Handle YH-5151E non-compliant linear11 vout voltage. */ - if (data->chip == yh5151e && reg == PMBUS_READ_VOUT) + if (data->vout_linear_11 && reg == PMBUS_READ_VOUT) rv = sign_extend32(rv, 10) & 0xffff; return rv; @@ -256,6 +255,25 @@ static int fsp3y_probe(struct i2c_client *client) data->info = fsp3y_info[data->chip]; + /* + * YH-5151E sometimes reports vout in linear11 and sometimes in + * linear16. This depends on the exact individual piece of hardware. One + * YH-5151E can use linear16 and another might use linear11 instead. + * + * The format can be recognized by reading VOUT_MODE - if it doesn't + * report a valid exponent, then vout uses linear11. Otherwise, the + * device is compliant and uses linear16. + */ + data->vout_linear_11 = false; + if (data->chip == yh5151e) { + rv = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE); + if (rv < 0) + return rv; + + if (rv == 0xFF) + data->vout_linear_11 = true; + } + return pmbus_do_probe(client, &data->info); } diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c index 40597a9e799f..1a8caff1ac5f 100644 --- a/drivers/hwmon/pmbus/isl68137.c +++ b/drivers/hwmon/pmbus/isl68137.c @@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client) info->read_word_data = raa_dmpvr2_read_word_data; break; case raa_dmpvr2_2rail_nontc: - info->func[0] &= ~PMBUS_HAVE_TEMP; - info->func[1] &= ~PMBUS_HAVE_TEMP; + info->func[0] &= ~PMBUS_HAVE_TEMP3; + info->func[1] &= ~PMBUS_HAVE_TEMP3; fallthrough; case raa_dmpvr2_2rail: info->pages = 2; diff --git a/drivers/hwmon/pmbus/q54sj108a2.c b/drivers/hwmon/pmbus/q54sj108a2.c index b6e8b20466f1..fa298b4265a1 100644 --- a/drivers/hwmon/pmbus/q54sj108a2.c +++ b/drivers/hwmon/pmbus/q54sj108a2.c @@ -299,7 +299,7 @@ static int q54sj108a2_probe(struct i2c_client *client) dev_err(&client->dev, "Failed to read Manufacturer ID\n"); return ret; } - if (ret != 5 || strncmp(buf, "DELTA", 5)) { + if (ret != 6 || strncmp(buf, "DELTA", 5)) { buf[ret] = '\0'; dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf); return -ENODEV; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 281a65d9b44b..10acece9d7b9 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -647,7 +647,7 @@ config I2C_HIGHLANDER config I2C_HISI tristate "HiSilicon I2C controller" - depends on ARM64 || COMPILE_TEST + depends on (ARM64 && ACPI) || COMPILE_TEST help Say Y here if you want to have Hisilicon I2C controller support available on the Kunpeng Server. diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c index 4d12e3da12f0..55a9e93fbfeb 100644 --- a/drivers/i2c/busses/i2c-ali1563.c +++ b/drivers/i2c/busses/i2c-ali1563.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * i2c-ali1563.c - i2c driver for the ALi 1563 Southbridge * * Copyright (C) 2004 Patrick Mochel diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index c1bbc4caeb5c..66aafa7d1123 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -144,7 +144,7 @@ enum cdns_i2c_mode { }; /** - * enum cdns_i2c_slave_mode - Slave state when I2C is operating in slave mode + * enum cdns_i2c_slave_state - Slave state when I2C is operating in slave mode * * @CDNS_I2C_SLAVE_STATE_IDLE: I2C slave idle * @CDNS_I2C_SLAVE_STATE_SEND: I2C slave sending data to master diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 13be1d678c39..9b08bb5df38d 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -165,7 +165,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) } /** - * i2c_dw_init() - Initialize the designware I2C master hardware + * i2c_dw_init_master() - Initialize the designware I2C master hardware * @dev: device private data * * This functions configures and enables the I2C master. diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 843b31a0f752..321b2770feab 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -148,7 +148,7 @@ struct i2c_algo_pch_data { /** * struct adapter_info - This structure holds the adapter information for the - PCH i2c controller + * PCH i2c controller * @pch_data: stores a list of i2c_algo_pch_data * @pch_i2c_suspended: specifies whether the system is suspended or not * perhaps with more lines and words. @@ -358,6 +358,7 @@ static void pch_i2c_repstart(struct i2c_algo_pch_data *adap) /** * pch_i2c_writebytes() - write data to I2C bus in normal mode * @i2c_adap: Pointer to the struct i2c_adapter. + * @msgs: Pointer to the i2c message structure. * @last: specifies whether last message or not. * In the case of compound mode it will be 1 for last message, * otherwise 0. diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 99d446763530..f9e1c2ceaac0 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -395,11 +395,9 @@ static int i801_check_post(struct i801_priv *priv, int status) dev_err(&priv->pci_dev->dev, "Transaction timeout\n"); /* try to stop the current command */ dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n"); - outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, - SMBHSTCNT(priv)); + outb_p(SMBHSTCNT_KILL, SMBHSTCNT(priv)); usleep_range(1000, 2000); - outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), - SMBHSTCNT(priv)); + outb_p(0, SMBHSTCNT(priv)); /* Check if it worked */ status = inb_p(SMBHSTSTS(priv)); diff --git a/drivers/i2c/busses/i2c-icy.c b/drivers/i2c/busses/i2c-icy.c index c8c422e9dda4..5dae7cab7260 100644 --- a/drivers/i2c/busses/i2c-icy.c +++ b/drivers/i2c/busses/i2c-icy.c @@ -123,7 +123,6 @@ static int icy_probe(struct zorro_dev *z, { struct icy_i2c *i2c; struct i2c_algo_pcf_data *algo_data; - struct fwnode_handle *new_fwnode; struct i2c_board_info ltc2990_info = { .type = "ltc2990", .swnode = &icy_ltc2990_node, diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 30d9e89a3db2..dcca9c2396db 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -19,6 +19,7 @@ #include <linux/clk.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/fsl_devices.h> #include <linux/i2c.h> #include <linux/interrupt.h> @@ -45,6 +46,7 @@ #define CCR_MTX 0x10 #define CCR_TXAK 0x08 #define CCR_RSTA 0x04 +#define CCR_RSVD 0x02 #define CSR_MCF 0x80 #define CSR_MAAS 0x40 @@ -97,7 +99,7 @@ struct mpc_i2c { u32 block; int rc; int expect_rxack; - + bool has_errata_A004447; }; struct mpc_i2c_divider { @@ -136,6 +138,75 @@ static void mpc_i2c_fixup(struct mpc_i2c *i2c) } } +static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask) +{ + void __iomem *addr = i2c->base + MPC_I2C_SR; + u8 val; + + return readb_poll_timeout(addr, val, val & mask, 0, 100); +} + +/* + * Workaround for Erratum A004447. From the P2040CE Rev Q + * + * 1. Set up the frequency divider and sampling rate. + * 2. I2CCR - a0h + * 3. Poll for I2CSR[MBB] to get set. + * 4. If I2CSR[MAL] is set (an indication that SDA is stuck low), then go to + * step 5. If MAL is not set, then go to step 13. + * 5. I2CCR - 00h + * 6. I2CCR - 22h + * 7. I2CCR - a2h + * 8. Poll for I2CSR[MBB] to get set. + * 9. Issue read to I2CDR. + * 10. Poll for I2CSR[MIF] to be set. + * 11. I2CCR - 82h + * 12. Workaround complete. Skip the next steps. + * 13. Issue read to I2CDR. + * 14. Poll for I2CSR[MIF] to be set. + * 15. I2CCR - 80h + */ +static void mpc_i2c_fixup_A004447(struct mpc_i2c *i2c) +{ + int ret; + u32 val; + + writeccr(i2c, CCR_MEN | CCR_MSTA); + ret = i2c_mpc_wait_sr(i2c, CSR_MBB); + if (ret) { + dev_err(i2c->dev, "timeout waiting for CSR_MBB\n"); + return; + } + + val = readb(i2c->base + MPC_I2C_SR); + + if (val & CSR_MAL) { + writeccr(i2c, 0x00); + writeccr(i2c, CCR_MSTA | CCR_RSVD); + writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSVD); + ret = i2c_mpc_wait_sr(i2c, CSR_MBB); + if (ret) { + dev_err(i2c->dev, "timeout waiting for CSR_MBB\n"); + return; + } + val = readb(i2c->base + MPC_I2C_DR); + ret = i2c_mpc_wait_sr(i2c, CSR_MIF); + if (ret) { + dev_err(i2c->dev, "timeout waiting for CSR_MIF\n"); + return; + } + writeccr(i2c, CCR_MEN | CCR_RSVD); + } else { + val = readb(i2c->base + MPC_I2C_DR); + ret = i2c_mpc_wait_sr(i2c, CSR_MIF); + if (ret) { + dev_err(i2c->dev, "timeout waiting for CSR_MIF\n"); + return; + } + writeccr(i2c, CCR_MEN); + } +} + #if defined(CONFIG_PPC_MPC52xx) || defined(CONFIG_PPC_MPC512x) static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] = { {20, 0x20}, {22, 0x21}, {24, 0x22}, {26, 0x23}, @@ -670,7 +741,10 @@ static int fsl_i2c_bus_recovery(struct i2c_adapter *adap) { struct mpc_i2c *i2c = i2c_get_adapdata(adap); - mpc_i2c_fixup(i2c); + if (i2c->has_errata_A004447) + mpc_i2c_fixup_A004447(i2c); + else + mpc_i2c_fixup(i2c); return 0; } @@ -767,6 +841,9 @@ static int fsl_i2c_probe(struct platform_device *op) } dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ); + if (of_property_read_bool(op->dev.of_node, "fsl,i2c-erratum-a004447")) + i2c->has_errata_A004447 = true; + i2c->adap = mpc_ops; scnprintf(i2c->adap.name, sizeof(i2c->adap.name), "MPC adapter (%s)", of_node_full_name(op->dev.of_node)); diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 5ddfa4e56ee2..4e9fb6b44436 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -479,6 +479,11 @@ static void mtk_i2c_clock_disable(struct mtk_i2c *i2c) static void mtk_i2c_init_hw(struct mtk_i2c *i2c) { u16 control_reg; + u16 intr_stat_reg; + + mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_START); + intr_stat_reg = mtk_i2c_readw(i2c, OFFSET_INTR_STAT); + mtk_i2c_writew(i2c, intr_stat_reg, OFFSET_INTR_STAT); if (i2c->dev_comp->apdma_sync) { writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST); diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index dc77e1c4e80f..a2d12a5b1c34 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -159,7 +159,7 @@ struct i2c_nmk_client { * @clk_freq: clock frequency for the operation mode * @tft: Tx FIFO Threshold in bytes * @rft: Rx FIFO Threshold in bytes - * @timeout Slave response timeout (ms) + * @timeout: Slave response timeout (ms) * @sm: speed mode * @stop: stop condition. * @xfer_complete: acknowledge completion for a I2C message. diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 273222e38056..a0af027db04c 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -250,7 +250,7 @@ static irqreturn_t ocores_isr(int irq, void *dev_id) } /** - * Process timeout event + * ocores_process_timeout() - Process timeout event * @i2c: ocores I2C device instance */ static void ocores_process_timeout(struct ocores_i2c *i2c) @@ -264,7 +264,7 @@ static void ocores_process_timeout(struct ocores_i2c *i2c) } /** - * Wait until something change in a given register + * ocores_wait() - Wait until something change in a given register * @i2c: ocores I2C device instance * @reg: register to query * @mask: bitmask to apply on register value @@ -296,7 +296,7 @@ static int ocores_wait(struct ocores_i2c *i2c, } /** - * Wait until is possible to process some data + * ocores_poll_wait() - Wait until is possible to process some data * @i2c: ocores I2C device instance * * Used when the device is in polling mode (interrupts disabled). @@ -334,7 +334,7 @@ static int ocores_poll_wait(struct ocores_i2c *i2c) } /** - * It handles an IRQ-less transfer + * ocores_process_polling() - It handles an IRQ-less transfer * @i2c: ocores I2C device instance * * Even if IRQ are disabled, the I2C OpenCore IP behavior is exactly the same diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 8c4ec7f13f5a..50f21cdbe90d 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -138,7 +138,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) /** * i2c_pnx_start - start a device * @slave_addr: slave address - * @adap: pointer to adapter structure + * @alg_data: pointer to local driver data structure * * Generate a START signal in the desired mode. */ @@ -194,7 +194,7 @@ static int i2c_pnx_start(unsigned char slave_addr, /** * i2c_pnx_stop - stop a device - * @adap: pointer to I2C adapter structure + * @alg_data: pointer to local driver data structure * * Generate a STOP signal to terminate the master transaction. */ @@ -223,7 +223,7 @@ static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data) /** * i2c_pnx_master_xmit - transmit data to slave - * @adap: pointer to I2C adapter structure + * @alg_data: pointer to local driver data structure * * Sends one byte of data to the slave */ @@ -293,7 +293,7 @@ static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data) /** * i2c_pnx_master_rcv - receive data from slave - * @adap: pointer to I2C adapter structure + * @alg_data: pointer to local driver data structure * * Reads one byte data from the slave */ diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c index 214b4c913a13..07b710a774df 100644 --- a/drivers/i2c/busses/i2c-qcom-geni.c +++ b/drivers/i2c/busses/i2c-qcom-geni.c @@ -100,7 +100,7 @@ static const struct geni_i2c_err_log gi2c_log[] = { [GP_IRQ0] = {-EIO, "Unknown I2C err GP_IRQ0"}, [NACK] = {-ENXIO, "NACK: slv unresponsive, check its power/reset-ln"}, [GP_IRQ2] = {-EIO, "Unknown I2C err GP IRQ2"}, - [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unepxected start/stop"}, + [BUS_PROTO] = {-EPROTO, "Bus proto err, noisy/unexpected start/stop"}, [ARB_LOST] = {-EAGAIN, "Bus arbitration lost, clock line undriveable"}, [GP_IRQ5] = {-EIO, "Unknown I2C err GP IRQ5"}, [GENI_OVERRUN] = {-EIO, "Cmd overrun, check GENI cmd-state machine"}, diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index ab928613afba..4d82761e1585 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -480,7 +480,10 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat) * forces us to send a new START * when we change direction */ + dev_dbg(i2c->dev, + "missing START before write->read\n"); s3c24xx_i2c_stop(i2c, -EINVAL); + break; } goto retry_write; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 3ae6ca21a02c..2d2e630fd438 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -807,7 +807,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = { static const struct of_device_id sh_mobile_i2c_dt_ids[] = { { .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config }, { .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config }, - { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config }, + { .compatible = "renesas,iic-r8a774c0", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7791", .data = &v2_freq_calc_dt_config }, { .compatible = "renesas,iic-r8a7792", .data = &v2_freq_calc_dt_config }, diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c index faa81a95551f..88482316d22a 100644 --- a/drivers/i2c/busses/i2c-st.c +++ b/drivers/i2c/busses/i2c-st.c @@ -524,7 +524,7 @@ static void st_i2c_handle_write(struct st_i2c_dev *i2c_dev) } /** - * st_i2c_handle_write() - Handle FIFO enmpty interrupt in case of read + * st_i2c_handle_read() - Handle FIFO empty interrupt in case of read * @i2c_dev: Controller's private data */ static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev) @@ -558,7 +558,7 @@ static void st_i2c_handle_read(struct st_i2c_dev *i2c_dev) } /** - * st_i2c_isr() - Interrupt routine + * st_i2c_isr_thread() - Interrupt routine * @irq: interrupt number * @data: Controller's private data */ diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index 4933fc8ce3fd..eebce7ecef25 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c @@ -313,7 +313,7 @@ static int stm32f4_i2c_wait_free_bus(struct stm32f4_i2c_dev *i2c_dev) } /** - * stm32f4_i2c_write_ byte() - Write a byte in the data register + * stm32f4_i2c_write_byte() - Write a byte in the data register * @i2c_dev: Controller's private data * @byte: Data to write in the register */ diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c index 6dc88902c189..1c78657631f4 100644 --- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c +++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c @@ -34,7 +34,7 @@ struct i2c_arbitrator_data { }; -/** +/* * i2c_arbitrator_select - claim the I2C bus * * Use the GPIO-based signalling protocol; return -EBUSY if we fail. @@ -77,7 +77,7 @@ static int i2c_arbitrator_select(struct i2c_mux_core *muxc, u32 chan) return -EBUSY; } -/** +/* * i2c_arbitrator_deselect - release the I2C bus * * Release the I2C bus using the GPIO-based signalling protocol. diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 9d3952b4674f..a27db78ea13e 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -771,6 +771,13 @@ static int ad7124_of_parse_channel_config(struct iio_dev *indio_dev, if (ret) goto err; + if (channel >= indio_dev->num_channels) { + dev_err(indio_dev->dev.parent, + "Channel index >= number of channels\n"); + ret = -EINVAL; + goto err; + } + ret = of_property_read_u32_array(child, "diff-channels", ain, 2); if (ret) @@ -850,6 +857,11 @@ static int ad7124_setup(struct ad7124_state *st) return ret; } +static void ad7124_reg_disable(void *r) +{ + regulator_disable(r); +} + static int ad7124_probe(struct spi_device *spi) { const struct ad7124_chip_info *info; @@ -895,17 +907,20 @@ static int ad7124_probe(struct spi_device *spi) ret = regulator_enable(st->vref[i]); if (ret) return ret; + + ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable, + st->vref[i]); + if (ret) + return ret; } st->mclk = devm_clk_get(&spi->dev, "mclk"); - if (IS_ERR(st->mclk)) { - ret = PTR_ERR(st->mclk); - goto error_regulator_disable; - } + if (IS_ERR(st->mclk)) + return PTR_ERR(st->mclk); ret = clk_prepare_enable(st->mclk); if (ret < 0) - goto error_regulator_disable; + return ret; ret = ad7124_soft_reset(st); if (ret < 0) @@ -935,11 +950,6 @@ error_remove_trigger: ad_sd_cleanup_buffer_and_trigger(indio_dev); error_clk_disable_unprepare: clk_disable_unprepare(st->mclk); -error_regulator_disable: - for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) { - if (!IS_ERR_OR_NULL(st->vref[i])) - regulator_disable(st->vref[i]); - } return ret; } @@ -948,17 +958,11 @@ static int ad7124_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad7124_state *st = iio_priv(indio_dev); - int i; iio_device_unregister(indio_dev); ad_sd_cleanup_buffer_and_trigger(indio_dev); clk_disable_unprepare(st->mclk); - for (i = ARRAY_SIZE(st->vref) - 1; i >= 0; i--) { - if (!IS_ERR_OR_NULL(st->vref[i])) - regulator_disable(st->vref[i]); - } - return 0; } diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c index 2ed580521d81..1141cc13a124 100644 --- a/drivers/iio/adc/ad7192.c +++ b/drivers/iio/adc/ad7192.c @@ -912,7 +912,7 @@ static int ad7192_probe(struct spi_device *spi) { struct ad7192_state *st; struct iio_dev *indio_dev; - int ret, voltage_uv = 0; + int ret; if (!spi->irq) { dev_err(&spi->dev, "no IRQ?\n"); @@ -949,15 +949,12 @@ static int ad7192_probe(struct spi_device *spi) goto error_disable_avdd; } - voltage_uv = regulator_get_voltage(st->avdd); - - if (voltage_uv > 0) { - st->int_vref_mv = voltage_uv / 1000; - } else { - ret = voltage_uv; + ret = regulator_get_voltage(st->avdd); + if (ret < 0) { dev_err(&spi->dev, "Device tree error, reference voltage undefined\n"); goto error_disable_avdd; } + st->int_vref_mv = ret / 1000; spi_set_drvdata(spi, indio_dev); st->chip_info = of_device_get_match_data(&spi->dev); @@ -1014,7 +1011,9 @@ static int ad7192_probe(struct spi_device *spi) return 0; error_disable_clk: - clk_disable_unprepare(st->mclk); + if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 || + st->clock_sel == AD7192_CLK_EXT_MCLK2) + clk_disable_unprepare(st->mclk); error_remove_trigger: ad_sd_cleanup_buffer_and_trigger(indio_dev); error_disable_dvdd: @@ -1031,7 +1030,9 @@ static int ad7192_remove(struct spi_device *spi) struct ad7192_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); - clk_disable_unprepare(st->mclk); + if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 || + st->clock_sel == AD7192_CLK_EXT_MCLK2) + clk_disable_unprepare(st->mclk); ad_sd_cleanup_buffer_and_trigger(indio_dev); regulator_disable(st->dvdd); diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index c945f1349623..60f21fed6dcb 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -167,6 +167,10 @@ struct ad7768_state { * transfer buffers to live in their own cache lines. */ union { + struct { + __be32 chan; + s64 timestamp; + } scan; __be32 d32; u8 d8[2]; } data ____cacheline_aligned; @@ -469,11 +473,11 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p) mutex_lock(&st->lock); - ret = spi_read(st->spi, &st->data.d32, 3); + ret = spi_read(st->spi, &st->data.scan.chan, 3); if (ret < 0) goto err_unlock; - iio_push_to_buffers_with_timestamp(indio_dev, &st->data.d32, + iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan, iio_get_time_ns(indio_dev)); iio_trigger_notify_done(indio_dev->trig); diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c index 5e980a06258e..440ef4c7be07 100644 --- a/drivers/iio/adc/ad7793.c +++ b/drivers/iio/adc/ad7793.c @@ -279,6 +279,7 @@ static int ad7793_setup(struct iio_dev *indio_dev, id &= AD7793_ID_MASK; if (id != st->chip_info->id) { + ret = -ENODEV; dev_err(&st->sd.spi->dev, "device ID query failed\n"); goto out; } diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c index 9a649745cd0a..069b561ee768 100644 --- a/drivers/iio/adc/ad7923.c +++ b/drivers/iio/adc/ad7923.c @@ -59,8 +59,10 @@ struct ad7923_state { /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. + * Ensure rx_buf can be directly used in iio_push_to_buffers_with_timetamp + * Length = 8 channels + 4 extra for 8 byte timestamp */ - __be16 rx_buf[4] ____cacheline_aligned; + __be16 rx_buf[12] ____cacheline_aligned; __be16 tx_buf[4]; }; diff --git a/drivers/iio/dac/ad5770r.c b/drivers/iio/dac/ad5770r.c index 7ab2ccf90863..8107f7bbbe3c 100644 --- a/drivers/iio/dac/ad5770r.c +++ b/drivers/iio/dac/ad5770r.c @@ -524,23 +524,29 @@ static int ad5770r_channel_config(struct ad5770r_state *st) device_for_each_child_node(&st->spi->dev, child) { ret = fwnode_property_read_u32(child, "num", &num); if (ret) - return ret; - if (num >= AD5770R_MAX_CHANNELS) - return -EINVAL; + goto err_child_out; + if (num >= AD5770R_MAX_CHANNELS) { + ret = -EINVAL; + goto err_child_out; + } ret = fwnode_property_read_u32_array(child, "adi,range-microamp", tmp, 2); if (ret) - return ret; + goto err_child_out; min = tmp[0] / 1000; max = tmp[1] / 1000; ret = ad5770r_store_output_range(st, min, max, num); if (ret) - return ret; + goto err_child_out; } + return 0; + +err_child_out: + fwnode_handle_put(child); return ret; } diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c index 1a20c6b88e7d..645461c70454 100644 --- a/drivers/iio/gyro/fxas21002c_core.c +++ b/drivers/iio/gyro/fxas21002c_core.c @@ -399,6 +399,7 @@ static int fxas21002c_temp_get(struct fxas21002c_data *data, int *val) ret = regmap_field_read(data->regmap_fields[F_TEMP], &temp); if (ret < 0) { dev_err(dev, "failed to read temp: %d\n", ret); + fxas21002c_pm_put(data); goto data_unlock; } @@ -432,6 +433,7 @@ static int fxas21002c_axis_get(struct fxas21002c_data *data, &axis_be, sizeof(axis_be)); if (ret < 0) { dev_err(dev, "failed to read axis: %d: %d\n", index, ret); + fxas21002c_pm_put(data); goto data_unlock; } diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c index d1591a28b743..8f385f9c2dd3 100644 --- a/drivers/interconnect/qcom/bcm-voter.c +++ b/drivers/interconnect/qcom/bcm-voter.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */ #include <asm/div64.h> @@ -205,6 +205,7 @@ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name) } mutex_unlock(&bcm_voter_lock); + of_node_put(node); return voter; } EXPORT_SYMBOL_GPL(of_bcm_voter_get); @@ -362,6 +363,7 @@ static const struct of_device_id bcm_voter_of_match[] = { { .compatible = "qcom,bcm-voter" }, { } }; +MODULE_DEVICE_TABLE(of, bcm_voter_of_match); static struct platform_driver qcom_icc_bcm_voter_driver = { .probe = qcom_icc_bcm_voter_probe, diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 80e8e1916dd1..3ac42bbdefc6 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -884,7 +884,7 @@ static inline u64 build_inv_address(u64 address, size_t size) * The msb-bit must be clear on the address. Just set all the * lower bits. */ - address |= 1ull << (msb_diff - 1); + address |= (1ull << msb_diff) - 1; } /* Clear bits 11:0 */ @@ -1714,6 +1714,8 @@ static void amd_iommu_probe_finalize(struct device *dev) domain = iommu_get_domain_for_dev(dev); if (domain->type == IOMMU_DOMAIN_DMA) iommu_setup_dma_ops(dev, IOVA_START_PFN << PAGE_SHIFT, 0); + else + set_dma_ops(dev, NULL); } static void amd_iommu_release_device(struct device *dev) diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 1757ac1e1623..84057cb9596c 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1142,7 +1142,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); if (err) - goto err_unmap; + goto err_sysfs; } drhd->iommu = iommu; @@ -1150,6 +1150,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) return 0; +err_sysfs: + iommu_device_sysfs_remove(&iommu->iommu); err_unmap: unmap_iommu(iommu); error_free_seq_id: diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 708f430af1c4..be35284a2016 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2525,9 +2525,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu, struct device *dev, u32 pasid) { - int flags = PASID_FLAG_SUPERVISOR_MODE; struct dma_pte *pgd = domain->pgd; int agaw, level; + int flags = 0; /* * Skip top levels of page tables for iommu which has @@ -2543,7 +2543,10 @@ static int domain_setup_first_level(struct intel_iommu *iommu, if (level != 4 && level != 5) return -EINVAL; - flags |= (level == 5) ? PASID_FLAG_FL5LP : 0; + if (pasid != PASID_RID2PASID) + flags |= PASID_FLAG_SUPERVISOR_MODE; + if (level == 5) + flags |= PASID_FLAG_FL5LP; if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED) flags |= PASID_FLAG_PAGE_SNOOP; @@ -4606,6 +4609,8 @@ static int auxiliary_link_device(struct dmar_domain *domain, if (!sinfo) { sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC); + if (!sinfo) + return -ENOMEM; sinfo->domain = domain; sinfo->pdev = dev; list_add(&sinfo->link_phys, &info->subdevices); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 72646bafc52f..72dc84821dad 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -699,7 +699,8 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, * Since it is a second level only translation setup, we should * set SRE bit as well (addresses are expected to be GPAs). */ - pasid_set_sre(pte); + if (pasid != PASID_RID2PASID) + pasid_set_sre(pte); pasid_set_present(pte); pasid_flush_caches(iommu, pte, pasid, did); diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 7c02481a81b4..c6e5ee4d9cef 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1136,6 +1136,7 @@ static struct virtio_device_id id_table[] = { { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID }, { 0 }, }; +MODULE_DEVICE_TABLE(virtio, id_table); static struct virtio_driver virtio_iommu_drv = { .driver.name = KBUILD_MODNAME, diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index b8e4d31124ea..751ec5ea1dbb 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -859,7 +859,8 @@ static uint32_t __minimum_chunk_size(struct origin *o) if (o) list_for_each_entry(snap, &o->snapshots, list) - chunk_size = min(chunk_size, snap->store->chunk_size); + chunk_size = min_not_zero(chunk_size, + snap->store->chunk_size); return (uint32_t) chunk_size; } diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c index 29385dc470d5..db61a1f43ae9 100644 --- a/drivers/md/dm-verity-verify-sig.c +++ b/drivers/md/dm-verity-verify-sig.c @@ -15,7 +15,7 @@ #define DM_VERITY_VERIFY_ERR(s) DM_VERITY_ROOT_HASH_VERIFICATION " " s static bool require_signatures; -module_param(require_signatures, bool, false); +module_param(require_signatures, bool, 0444); MODULE_PARM_DESC(require_signatures, "Verify the roothash of dm-verity hash tree"); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 841e1c1aa5e6..7d4ff8a5c55e 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5311,8 +5311,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) unsigned int chunk_sectors; unsigned int bio_sectors = bio_sectors(bio); - WARN_ON_ONCE(bio->bi_bdev->bd_partno); - chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); return chunk_sectors >= ((sector & (chunk_sectors - 1)) + bio_sectors); diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 64d33e368509..67c5b452dd35 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -101,8 +101,9 @@ printk(KERN_INFO a); \ } while (0) #define v2printk(a...) do { \ - if (verbose > 1) \ + if (verbose > 1) { \ printk(KERN_INFO a); \ + } \ touch_nmi_watchdog(); \ } while (0) #define eprintk(a...) do { \ diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c index a98f6b895af7..aab3ebfa9fc4 100644 --- a/drivers/misc/mei/interrupt.c +++ b/drivers/misc/mei/interrupt.c @@ -277,6 +277,9 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb, return ret; } + pm_runtime_mark_last_busy(dev->dev); + pm_request_autosuspend(dev->dev); + list_move_tail(&cb->list, &cl->rd_pending); return 0; diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index a44d49d63968..494675aeaaad 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -71,7 +71,8 @@ config NVME_FC config NVME_TCP tristate "NVM Express over Fabrics TCP host driver" depends on INET - depends on BLK_DEV_NVME + depends on BLOCK + select NVME_CORE select NVME_FABRICS select CRYPTO select CRYPTO_CRC32C diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 762125f2905f..66973bb56305 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3485,8 +3485,10 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, cdev_init(cdev, fops); cdev->owner = owner; ret = cdev_device_add(cdev, cdev_device); - if (ret) + if (ret) { + put_device(cdev_device); ida_simple_remove(&nvme_ns_chr_minor_ida, minor); + } return ret; } diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index a2bb7fc63a73..34a84d2086c7 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -336,6 +336,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, cmd->connect.recfmt); break; + case NVME_SC_HOST_PATH_ERROR: + dev_err(ctrl->device, + "Connect command failed: host path error\n"); + break; + default: dev_err(ctrl->device, "Connect command failed, error wo/DNR bit: %d\n", diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 256e87721a01..f183f9fa03d0 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3107,6 +3107,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ctrl->ctrl.icdoff) { dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", ctrl->ctrl.icdoff); + ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out_disconnect_admin_queue; } @@ -3114,6 +3115,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) { dev_err(ctrl->ctrl.device, "Mandatory sgls are not supported!\n"); + ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; goto out_disconnect_admin_queue; } @@ -3280,11 +3282,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) return; - if (portptr->port_state == FC_OBJSTATE_ONLINE) + if (portptr->port_state == FC_OBJSTATE_ONLINE) { dev_info(ctrl->ctrl.device, "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", ctrl->cnum, status); - else if (time_after_eq(jiffies, rport->dev_loss_end)) + if (status > 0 && (status & NVME_SC_DNR)) + recon = false; + } else if (time_after_eq(jiffies, rport->dev_loss_end)) recon = false; if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { @@ -3298,12 +3302,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); } else { - if (portptr->port_state == FC_OBJSTATE_ONLINE) - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Max reconnect attempts (%d) " - "reached.\n", - ctrl->cnum, ctrl->ctrl.nr_reconnects); - else + if (portptr->port_state == FC_OBJSTATE_ONLINE) { + if (status > 0 && (status & NVME_SC_DNR)) + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: reconnect failure\n", + ctrl->cnum); + else + dev_warn(ctrl->ctrl.device, + "NVME-FC{%d}: Max reconnect attempts " + "(%d) reached.\n", + ctrl->cnum, ctrl->ctrl.nr_reconnects); + } else dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: dev_loss_tmo (%d) expired " "while waiting for remoteport connectivity.\n", diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 37943dc4c2c1..4697a94c0945 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1320,16 +1320,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, int count) { struct nvme_sgl_desc *sg = &c->common.dptr.sgl; - struct scatterlist *sgl = req->data_sgl.sg_table.sgl; struct ib_sge *sge = &req->sge[1]; + struct scatterlist *sgl; u32 len = 0; int i; - for (i = 0; i < count; i++, sgl++, sge++) { + for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { sge->addr = sg_dma_address(sgl); sge->length = sg_dma_len(sgl); sge->lkey = queue->device->pd->local_dma_lkey; len += sge->length; + sge++; } sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 1853db38b682..b20b8d0a1144 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -388,10 +388,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work) { struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), struct nvmet_ctrl, ka_work); - bool cmd_seen = ctrl->cmd_seen; + bool reset_tbkas = ctrl->reset_tbkas; - ctrl->cmd_seen = false; - if (cmd_seen) { + ctrl->reset_tbkas = false; + if (reset_tbkas) { pr_debug("ctrl %d reschedule traffic based keep-alive timer\n", ctrl->cntlid); schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); @@ -804,6 +804,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) percpu_ref_exit(&sq->ref); if (ctrl) { + /* + * The teardown flow may take some time, and the host may not + * send us keep-alive during this period, hence reset the + * traffic based keep-alive timer so we don't trigger a + * controller teardown as a result of a keep-alive expiration. + */ + ctrl->reset_tbkas = true; nvmet_ctrl_put(ctrl); sq->ctrl = NULL; /* allows reusing the queue later */ } @@ -952,7 +959,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, } if (sq->ctrl) - sq->ctrl->cmd_seen = true; + sq->ctrl->reset_tbkas = true; return true; @@ -998,19 +1005,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) return req->transfer_len - req->metadata_len; } -static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req) +static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev, + struct nvmet_req *req) { - req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt, + req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, nvmet_data_transfer_len(req)); if (!req->sg) goto out_err; if (req->metadata_len) { - req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev, + req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->metadata_sg_cnt, req->metadata_len); if (!req->metadata_sg) goto out_free_sg; } + + req->p2p_dev = p2p_dev; + return 0; out_free_sg: pci_p2pmem_free_sgl(req->p2p_dev, req->sg); @@ -1018,25 +1029,19 @@ out_err: return -ENOMEM; } -static bool nvmet_req_find_p2p_dev(struct nvmet_req *req) +static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req) { - if (!IS_ENABLED(CONFIG_PCI_P2PDMA)) - return false; - - if (req->sq->ctrl && req->sq->qid && req->ns) { - req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, - req->ns->nsid); - if (req->p2p_dev) - return true; - } - - req->p2p_dev = NULL; - return false; + if (!IS_ENABLED(CONFIG_PCI_P2PDMA) || + !req->sq->ctrl || !req->sq->qid || !req->ns) + return NULL; + return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); } int nvmet_req_alloc_sgls(struct nvmet_req *req) { - if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req)) + struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req); + + if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req)) return 0; req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, @@ -1065,6 +1070,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req) pci_p2pmem_free_sgl(req->p2p_dev, req->sg); if (req->metadata_sg) pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); + req->p2p_dev = NULL; } else { sgl_free(req->sg); if (req->metadata_sg) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index cb30cb942e1d..a5c4a1865026 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -263,7 +263,8 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = { static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { - clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); + if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) + return; nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.fabrics_q); @@ -299,6 +300,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); } + ctrl->ctrl.queue_count = 1; } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) @@ -405,6 +407,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) return 0; out_cleanup_queue: + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); blk_cleanup_queue(ctrl->ctrl.admin_q); out_cleanup_fabrics_q: blk_cleanup_queue(ctrl->ctrl.fabrics_q); @@ -462,8 +465,10 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) nvme_loop_shutdown_ctrl(ctrl); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { - /* state change failure should never happen */ - WARN_ON_ONCE(1); + if (ctrl->ctrl.state != NVME_CTRL_DELETING && + ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO) + /* state change failure for non-deleted ctrl? */ + WARN_ON_ONCE(1); return; } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index d69a409515d6..53aea9a8056e 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -167,7 +167,7 @@ struct nvmet_ctrl { struct nvmet_subsys *subsys; struct nvmet_sq **sqs; - bool cmd_seen; + bool reset_tbkas; struct mutex lock; u64 cap; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index f9f34f6caf5e..d8aceef83284 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -550,7 +550,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) * nvmet_req_init is completed. */ if (queue->rcv_state == NVMET_TCP_RECV_PDU && - len && len < cmd->req.port->inline_data_size && + len && len <= cmd->req.port->inline_data_size && nvme_is_write(cmd->req.cmd)) return; } diff --git a/drivers/pci/of.c b/drivers/pci/of.c index da5b414d585a..85dcb7097da4 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -103,6 +103,13 @@ struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus) #endif } +bool pci_host_of_has_msi_map(struct device *dev) +{ + if (dev && dev->of_node) + return of_get_property(dev->of_node, "msi-map", NULL); + return false; +} + static inline int __of_pci_pci_compare(struct device_node *node, unsigned int data) { diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 3a62d09b8869..275204646c68 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -925,7 +925,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); pci_set_bus_msi_domain(bus); - if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev)) + if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) && + !pci_host_of_has_msi_map(parent)) bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI; if (!parent) diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 1b9e1442e6a5..fd42a5fffaed 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c @@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block) blk_queue_segment_boundary(q, PAGE_SIZE - 1); } +static int dasd_diag_pe_handler(struct dasd_device *device, + __u8 tbvpm, __u8 fcsecpm) +{ + return dasd_generic_verify_path(device, tbvpm); +} + static struct dasd_discipline dasd_diag_discipline = { .owner = THIS_MODULE, .name = "DIAG", .ebcname = "DIAG", .check_device = dasd_diag_check_device, - .verify_path = dasd_generic_verify_path, + .pe_handler = dasd_diag_pe_handler, .fill_geometry = dasd_diag_fill_geometry, .setup_blk_queue = dasd_diag_setup_blk_queue, .start_IO = dasd_start_diag, diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 4789410885e4..3ad319aee51e 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block) blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } +static int dasd_fba_pe_handler(struct dasd_device *device, + __u8 tbvpm, __u8 fcsecpm) +{ + return dasd_generic_verify_path(device, tbvpm); +} + static struct dasd_discipline dasd_fba_discipline = { .owner = THIS_MODULE, .name = "FBA ", .ebcname = "FBA ", .check_device = dasd_fba_check_characteristics, .do_analysis = dasd_fba_do_analysis, - .verify_path = dasd_generic_verify_path, + .pe_handler = dasd_fba_pe_handler, .setup_blk_queue = dasd_fba_setup_blk_queue, .fill_geometry = dasd_fba_fill_geometry, .start_IO = dasd_start_IO, diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 1c59b0e86a9f..155428bfed8a 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -297,7 +297,6 @@ struct dasd_discipline { * e.g. verify that new path is compatible with the current * configuration. */ - int (*verify_path)(struct dasd_device *, __u8); int (*pe_handler)(struct dasd_device *, __u8, __u8); /* diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index b9febc581b1f..8d1b2771c1aa 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -638,6 +638,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 1); int ret; + /* this is an error in the caller */ + if (cp->initialized) + return -EBUSY; + /* * We only support prefetching the channel program. We assume all channel * programs executed by supported guests likewise support prefetching. diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 8c625b530035..9b61e9b131ad 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -86,6 +86,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) struct vfio_ccw_private *private; struct irb *irb; bool is_final; + bool cp_is_finished = false; private = container_of(work, struct vfio_ccw_private, io_work); irb = &private->irb; @@ -94,14 +95,21 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work) (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); if (scsw_is_solicited(&irb->scsw)) { cp_update_scsw(&private->cp, &irb->scsw); - if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) + if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) { cp_free(&private->cp); + cp_is_finished = true; + } } mutex_lock(&private->io_mutex); memcpy(private->io_region->irb_area, irb, sizeof(*irb)); mutex_unlock(&private->io_mutex); - if (private->mdev && is_final) + /* + * Reset to IDLE only if processing of a channel program + * has finished. Do not overwrite a possible processing + * state if the final interrupt was for HSCH or CSCH. + */ + if (private->mdev && cp_is_finished) private->state = VFIO_CCW_STATE_IDLE; if (private->io_trigger) diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c index 23e61aa638e4..e435a9cd92da 100644 --- a/drivers/s390/cio/vfio_ccw_fsm.c +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -318,6 +318,7 @@ static void fsm_io_request(struct vfio_ccw_private *private, } err_out: + private->state = VFIO_CCW_STATE_IDLE; trace_vfio_ccw_fsm_io_request(scsw->cmd.fctl, schid, io_region->ret_code, errstr); } diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c index 491a64c61fff..c57d2a7f0919 100644 --- a/drivers/s390/cio/vfio_ccw_ops.c +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -279,8 +279,6 @@ static ssize_t vfio_ccw_mdev_write_io_region(struct vfio_ccw_private *private, } vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ); - if (region->ret_code != 0) - private->state = VFIO_CCW_STATE_IDLE; ret = (region->ret_code != 0) ? region->ret_code : count; out_unlock: diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y index 924d55a8acbf..65182ad9cdf8 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y @@ -58,7 +58,6 @@ #include "aicasm_symbol.h" #include "aicasm_insformat.h" -int yylineno; char *yyfilename; char stock_prefix[] = "aic_"; char *prefix = stock_prefix; diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h index 7bf7fd5953ac..ed3bdd43c297 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h @@ -108,7 +108,7 @@ struct macro_arg { regex_t arg_regex; char *replacement_text; }; -STAILQ_HEAD(macro_arg_list, macro_arg) args; +STAILQ_HEAD(macro_arg_list, macro_arg); struct macro_info { struct macro_arg_list args; diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h index a7515c3039ed..53343a6d8ae1 100644 --- a/drivers/scsi/aic7xxx/scsi_message.h +++ b/drivers/scsi/aic7xxx/scsi_message.h @@ -3,6 +3,17 @@ * $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $ */ +/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */ +#define MSG_SAVEDATAPOINTER 0x02 /* O/O */ +#define MSG_RESTOREPOINTERS 0x03 /* O/O */ +#define MSG_DISCONNECT 0x04 /* O/O */ +#define MSG_MESSAGE_REJECT 0x07 /* M/M */ +#define MSG_NOOP 0x08 /* M/M */ + +/* Messages (2 byte) */ +#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */ +#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */ + /* Identify message */ /* M/M */ #define MSG_IDENTIFYFLAG 0x80 #define MSG_IDENTIFY_DISCFLAG 0x40 diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 1a0dc18d6915..ed300a279a38 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1220,6 +1220,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) was a result from the ABTS request rather than the CLEANUP request */ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); + rc = FAILED; goto done; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 499c770d405c..e95408314078 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -4811,14 +4811,14 @@ hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba) { int i; - free_irq(pci_irq_vector(pdev, 1), hisi_hba); - free_irq(pci_irq_vector(pdev, 2), hisi_hba); - free_irq(pci_irq_vector(pdev, 11), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 1), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 2), hisi_hba); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 11), hisi_hba); for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; int nr = hisi_sas_intr_conv ? 16 : 16 + i; - free_irq(pci_irq_vector(pdev, nr), cq); + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, nr), cq); } pci_free_irq_vectors(pdev); } diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 19cf418928fa..e3d03d744713 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -25,7 +25,7 @@ static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy static void sas_resume_port(struct asd_sas_phy *phy) { - struct domain_device *dev; + struct domain_device *dev, *n; struct asd_sas_port *port = phy->port; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *si = to_sas_internal(sas_ha->core.shost->transportt); @@ -44,7 +44,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) * 1/ presume every device came back * 2/ force the next revalidation to check all expander phys */ - list_for_each_entry(dev, &port->dev_list, dev_list_node) { + list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { int i, rc; rc = sas_notify_lldd_dev_found(dev); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index b2008fb1dd38..12a6848ade43 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1563,10 +1563,12 @@ void qlt_stop_phase2(struct qla_tgt *tgt) return; } + mutex_lock(&tgt->ha->optrom_mutex); mutex_lock(&vha->vha_tgt.tgt_mutex); tgt->tgt_stop = 0; tgt->tgt_stopped = 1; mutex_unlock(&vha->vha_tgt.tgt_mutex); + mutex_unlock(&tgt->ha->optrom_mutex); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", tgt); diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 8a79605d9652..b9969fce6b4d 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -585,7 +585,13 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, case BTSTAT_SUCCESS: case BTSTAT_LINKED_COMMAND_COMPLETED: case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: - /* If everything went fine, let's move on.. */ + /* + * Commands like INQUIRY may transfer less data than + * requested by the initiator via bufflen. Set residual + * count to make upper layer aware of the actual amount + * of data returned. + */ + scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); cmd->result = (DID_OK << 16); break; diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index 2827085a323b..0ef79d60e88e 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -1150,8 +1150,16 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl) ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode", bp_mode, nports); - if (ret) - return ret; + if (ret) { + u32 version; + + ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &version); + + if (version <= 0x01030000) + memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); + else + return ret; + } memset(hstart, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS); of_property_read_u8_array(np, "qcom,ports-hstart", hstart, nports); diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c index 741147a4f0fe..ecc5c9da9027 100644 --- a/drivers/staging/emxx_udc/emxx_udc.c +++ b/drivers/staging/emxx_udc/emxx_udc.c @@ -2064,7 +2064,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, int status) { - struct nbu2ss_req *req; + struct nbu2ss_req *req, *n; /* Endpoint Disable */ _nbu2ss_epn_exit(udc, ep); @@ -2076,7 +2076,7 @@ static int _nbu2ss_nuke(struct nbu2ss_udc *udc, return 0; /* called with irqs blocked */ - list_for_each_entry(req, &ep->queue, queue) { + list_for_each_entry_safe(req, n, &ep->queue, queue) { _nbu2ss_ep_done(ep, req, status); } diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c index dfd71e99e872..eab534dc4bcc 100644 --- a/drivers/staging/iio/cdc/ad7746.c +++ b/drivers/staging/iio/cdc/ad7746.c @@ -700,7 +700,6 @@ static int ad7746_probe(struct i2c_client *client, indio_dev->num_channels = ARRAY_SIZE(ad7746_channels); else indio_dev->num_channels = ARRAY_SIZE(ad7746_channels) - 2; - indio_dev->num_channels = ARRAY_SIZE(ad7746_channels); indio_dev->modes = INDIO_DIRECT_MODE; if (pdata) { diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index d6fdd1c61f90..a526f9678c34 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -204,11 +204,11 @@ static struct se_dev_plug *iblock_plug_device(struct se_device *se_dev) struct iblock_dev_plug *ib_dev_plug; /* - * Each se_device has a per cpu work this can be run from. Wwe + * Each se_device has a per cpu work this can be run from. We * shouldn't have multiple threads on the same cpu calling this * at the same time. */ - ib_dev_plug = &ib_dev->ibd_plug[smp_processor_id()]; + ib_dev_plug = &ib_dev->ibd_plug[raw_smp_processor_id()]; if (test_and_set_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags)) return NULL; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 8fbfe75c5744..05d7ffd59df6 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1416,7 +1416,7 @@ void __target_init_cmd( cmd->orig_fe_lun = unpacked_lun; if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) - cmd->cpuid = smp_processor_id(); + cmd->cpuid = raw_smp_processor_id(); cmd->state_active = false; } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 198d25ae482a..4bba10e7755a 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -516,8 +516,10 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, dpi = dbi * udev->data_pages_per_blk; /* Count the number of already allocated pages */ xas_set(&xas, dpi); + rcu_read_lock(); for (cnt = 0; xas_next(&xas) && cnt < page_cnt;) cnt++; + rcu_read_unlock(); for (i = cnt; i < page_cnt; i++) { /* try to get new page from the mm */ @@ -699,11 +701,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev, struct scatterlist *sg, unsigned int sg_nents, struct iovec **iov, size_t data_len) { - XA_STATE(xas, &udev->data_pages, 0); /* start value of dbi + 1 must not be a valid dbi */ int dbi = -2; size_t page_remaining, cp_len; - int page_cnt, page_inx; + int page_cnt, page_inx, dpi; struct sg_mapping_iter sg_iter; unsigned int sg_flags; struct page *page; @@ -726,9 +727,10 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev, if (page_cnt > udev->data_pages_per_blk) page_cnt = udev->data_pages_per_blk; - xas_set(&xas, dbi * udev->data_pages_per_blk); - for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) { - page = xas_next(&xas); + dpi = dbi * udev->data_pages_per_blk; + for (page_inx = 0; page_inx < page_cnt && data_len; + page_inx++, dpi++) { + page = xa_load(&udev->data_pages, dpi); if (direction == TCMU_DATA_AREA_TO_SG) flush_dcache_page(page); diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c index d1248ba943a4..62c0aa5d0783 100644 --- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c @@ -237,6 +237,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, if (ACPI_FAILURE(status)) trip_cnt = 0; else { + int i; + int34x_thermal_zone->aux_trips = kcalloc(trip_cnt, sizeof(*int34x_thermal_zone->aux_trips), @@ -247,6 +249,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, } trip_mask = BIT(trip_cnt) - 1; int34x_thermal_zone->aux_trip_nr = trip_cnt; + for (i = 0; i < trip_cnt; ++i) + int34x_thermal_zone->aux_trips[i] = THERMAL_TEMP_INVALID; } trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone); diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c index 295742e83960..4d8edc61a78b 100644 --- a/drivers/thermal/intel/x86_pkg_temp_thermal.c +++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c @@ -166,7 +166,7 @@ static int sys_get_trip_temp(struct thermal_zone_device *tzd, if (thres_reg_value) *temp = zonedev->tj_max - thres_reg_value * 1000; else - *temp = 0; + *temp = THERMAL_TEMP_INVALID; pr_debug("sys_get_trip_temp %d\n", *temp); return 0; diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c index b460b56e981c..232fd0b33325 100644 --- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c +++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c @@ -441,7 +441,7 @@ static int adc_tm5_get_dt_channel_data(struct adc_tm5_chip *adc_tm, if (args.args_count != 1 || args.args[0] >= ADC5_MAX_CHANNEL) { dev_err(dev, "%s: invalid ADC channel number %d\n", name, chan); - return ret; + return -EINVAL; } channel->adc_channel = args.args[0]; diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index ebe7cb70bfb6..ea0603b59309 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -770,7 +770,7 @@ static int ti_bandgap_tshut_init(struct ti_bandgap *bgp, } /** - * ti_bandgap_alert_init() - setup and initialize talert handling + * ti_bandgap_talert_init() - setup and initialize talert handling * @bgp: pointer to struct ti_bandgap * @pdev: pointer to device struct platform_device * diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c index 7288aaf01ae6..5631319f7b20 100644 --- a/drivers/thunderbolt/dma_port.c +++ b/drivers/thunderbolt/dma_port.c @@ -366,15 +366,15 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, void *buf, size_t size) { unsigned int retries = DMA_PORT_RETRIES; - unsigned int offset; - - offset = address & 3; - address = address & ~3; do { - u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4); + unsigned int offset; + size_t nbytes; int ret; + offset = address & 3; + nbytes = min_t(size_t, size + offset, MAIL_DATA_DWORDS * 4); + ret = dma_port_flash_read_block(dma, address, dma->buf, ALIGN(nbytes, 4)); if (ret) { @@ -386,6 +386,7 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, return ret; } + nbytes -= offset; memcpy(buf, dma->buf + offset, nbytes); size -= nbytes; diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 680bc738dd66..671d72af8ba1 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -68,15 +68,15 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size, unsigned int retries = USB4_DATA_RETRIES; unsigned int offset; - offset = address & 3; - address = address & ~3; - do { - size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4); unsigned int dwaddress, dwords; u8 data[USB4_DATA_DWORDS * 4]; + size_t nbytes; int ret; + offset = address & 3; + nbytes = min_t(size_t, size + offset, USB4_DATA_DWORDS * 4); + dwaddress = address / 4; dwords = ALIGN(nbytes, 4) / 4; @@ -87,6 +87,7 @@ static int usb4_do_read_data(u16 address, void *buf, size_t size, return ret; } + nbytes -= offset; memcpy(buf, data + offset, nbytes); size -= nbytes; diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index 52bb21205bb6..6473361525d1 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -7,6 +7,7 @@ * Copyright (C) 2001 Russell King. */ +#include <linux/bits.h> #include <linux/serial_8250.h> #include <linux/serial_reg.h> #include <linux/dmaengine.h> @@ -70,24 +71,25 @@ struct serial8250_config { unsigned int flags; }; -#define UART_CAP_FIFO (1 << 8) /* UART has FIFO */ -#define UART_CAP_EFR (1 << 9) /* UART has EFR */ -#define UART_CAP_SLEEP (1 << 10) /* UART has IER sleep */ -#define UART_CAP_AFE (1 << 11) /* MCR-based hw flow control */ -#define UART_CAP_UUE (1 << 12) /* UART needs IER bit 6 set (Xscale) */ -#define UART_CAP_RTOIE (1 << 13) /* UART needs IER bit 4 set (Xscale, Tegra) */ -#define UART_CAP_HFIFO (1 << 14) /* UART has a "hidden" FIFO */ -#define UART_CAP_RPM (1 << 15) /* Runtime PM is active while idle */ -#define UART_CAP_IRDA (1 << 16) /* UART supports IrDA line discipline */ -#define UART_CAP_MINI (1 << 17) /* Mini UART on BCM283X family lacks: +#define UART_CAP_FIFO BIT(8) /* UART has FIFO */ +#define UART_CAP_EFR BIT(9) /* UART has EFR */ +#define UART_CAP_SLEEP BIT(10) /* UART has IER sleep */ +#define UART_CAP_AFE BIT(11) /* MCR-based hw flow control */ +#define UART_CAP_UUE BIT(12) /* UART needs IER bit 6 set (Xscale) */ +#define UART_CAP_RTOIE BIT(13) /* UART needs IER bit 4 set (Xscale, Tegra) */ +#define UART_CAP_HFIFO BIT(14) /* UART has a "hidden" FIFO */ +#define UART_CAP_RPM BIT(15) /* Runtime PM is active while idle */ +#define UART_CAP_IRDA BIT(16) /* UART supports IrDA line discipline */ +#define UART_CAP_MINI BIT(17) /* Mini UART on BCM283X family lacks: * STOP PARITY EPAR SPAR WLEN5 WLEN6 */ -#define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */ -#define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */ -#define UART_BUG_NOMSR (1 << 2) /* UART has buggy MSR status bits (Au1x00) */ -#define UART_BUG_THRE (1 << 3) /* UART has buggy THRE reassertion */ -#define UART_BUG_PARITY (1 << 4) /* UART mishandles parity if FIFO enabled */ +#define UART_BUG_QUOT BIT(0) /* UART has buggy quot LSB */ +#define UART_BUG_TXEN BIT(1) /* UART has buggy TX IIR status */ +#define UART_BUG_NOMSR BIT(2) /* UART has buggy MSR status bits (Au1x00) */ +#define UART_BUG_THRE BIT(3) /* UART has buggy THRE reassertion */ +#define UART_BUG_PARITY BIT(4) /* UART mishandles parity if FIFO enabled */ +#define UART_BUG_TXRACE BIT(5) /* UART Tx fails to set remote DR */ #ifdef CONFIG_SERIAL_8250_SHARE_IRQ diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c index 61550f24a2d3..d035d08cb987 100644 --- a/drivers/tty/serial/8250/8250_aspeed_vuart.c +++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c @@ -437,6 +437,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev) port.port.status = UPSTAT_SYNC_FIFO; port.port.dev = &pdev->dev; port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); + port.bugs |= UART_BUG_TXRACE; rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group); if (rc < 0) diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 9e204f9b799a..a3a0154da567 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -714,6 +714,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = { { "APMC0D08", 0}, { "AMD0020", 0 }, { "AMDI0020", 0 }, + { "AMDI0022", 0 }, { "BRCM2032", 0 }, { "HISI0031", 0 }, { }, diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 689d8227f95f..780cc99732b6 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -56,6 +56,8 @@ struct serial_private { int line[]; }; +#define PCI_DEVICE_ID_HPE_PCI_SERIAL 0x37e + static const struct pci_device_id pci_use_msi[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9900, 0xA000, 0x1000) }, @@ -63,6 +65,8 @@ static const struct pci_device_id pci_use_msi[] = { 0xA000, 0x1000) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922, 0xA000, 0x1000) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL, + PCI_ANY_ID, PCI_ANY_ID) }, { } }; @@ -1998,6 +2002,16 @@ static struct pci_serial_quirk pci_serial_quirks[] = { .setup = pci_hp_diva_setup, }, /* + * HPE PCI serial device + */ + { + .vendor = PCI_VENDOR_ID_HP_3PAR, + .device = PCI_DEVICE_ID_HPE_PCI_SERIAL, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_hp_diva_setup, + }, + /* * Intel */ { @@ -3944,21 +3958,26 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ; uart.port.uartclk = board->base_baud * 16; - if (pci_match_id(pci_use_msi, dev)) { - dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n"); - pci_set_master(dev); - rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES); + if (board->flags & FL_NOIRQ) { + uart.port.irq = 0; } else { - dev_dbg(&dev->dev, "Using legacy interrupts\n"); - rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY); - } - if (rc < 0) { - kfree(priv); - priv = ERR_PTR(rc); - goto err_deinit; + if (pci_match_id(pci_use_msi, dev)) { + dev_dbg(&dev->dev, "Using MSI(-X) interrupts\n"); + pci_set_master(dev); + rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_ALL_TYPES); + } else { + dev_dbg(&dev->dev, "Using legacy interrupts\n"); + rc = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY); + } + if (rc < 0) { + kfree(priv); + priv = ERR_PTR(rc); + goto err_deinit; + } + + uart.port.irq = pci_irq_vector(dev, 0); } - uart.port.irq = pci_irq_vector(dev, 0); uart.port.dev = &dev->dev; for (i = 0; i < nr_ports; i++) { @@ -4973,6 +4992,10 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_1_115200 }, + /* HPE PCI serial device */ + { PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b1_1_115200 }, { PCI_VENDOR_ID_DCI, PCI_DEVICE_ID_DCI_PCCOM2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index d45dab1ab316..fc5ab2032282 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1809,6 +1809,18 @@ void serial8250_tx_chars(struct uart_8250_port *up) count = up->tx_loadsz; do { serial_out(up, UART_TX, xmit->buf[xmit->tail]); + if (up->bugs & UART_BUG_TXRACE) { + /* + * The Aspeed BMC virtual UARTs have a bug where data + * may get stuck in the BMC's Tx FIFO from bursts of + * writes on the APB interface. + * + * Delay back-to-back writes by a read cycle to avoid + * stalling the VUART. Read a register that won't have + * side-effects and discard the result. + */ + serial_in(up, UART_SCR); + } xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; if (uart_circ_empty(xmit)) diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c index d60abffab70e..6689d8add8f7 100644 --- a/drivers/tty/serial/rp2.c +++ b/drivers/tty/serial/rp2.c @@ -195,7 +195,6 @@ struct rp2_card { void __iomem *bar0; void __iomem *bar1; spinlock_t card_lock; - struct completion fw_loaded; }; #define RP_ID(prod) PCI_VDEVICE(RP, (prod)) @@ -662,17 +661,10 @@ static void rp2_remove_ports(struct rp2_card *card) card->initialized_ports = 0; } -static void rp2_fw_cb(const struct firmware *fw, void *context) +static int rp2_load_firmware(struct rp2_card *card, const struct firmware *fw) { - struct rp2_card *card = context; resource_size_t phys_base; - int i, rc = -ENOENT; - - if (!fw) { - dev_err(&card->pdev->dev, "cannot find '%s' firmware image\n", - RP2_FW_NAME); - goto no_fw; - } + int i, rc = 0; phys_base = pci_resource_start(card->pdev, 1); @@ -718,23 +710,13 @@ static void rp2_fw_cb(const struct firmware *fw, void *context) card->initialized_ports++; } - release_firmware(fw); -no_fw: - /* - * rp2_fw_cb() is called from a workqueue long after rp2_probe() - * has already returned success. So if something failed here, - * we'll just leave the now-dormant device in place until somebody - * unbinds it. - */ - if (rc) - dev_warn(&card->pdev->dev, "driver initialization failed\n"); - - complete(&card->fw_loaded); + return rc; } static int rp2_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + const struct firmware *fw; struct rp2_card *card; struct rp2_uart_port *ports; void __iomem * const *bars; @@ -745,7 +727,6 @@ static int rp2_probe(struct pci_dev *pdev, return -ENOMEM; pci_set_drvdata(pdev, card); spin_lock_init(&card->card_lock); - init_completion(&card->fw_loaded); rc = pcim_enable_device(pdev); if (rc) @@ -778,21 +759,23 @@ static int rp2_probe(struct pci_dev *pdev, return -ENOMEM; card->ports = ports; - rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, - IRQF_SHARED, DRV_NAME, card); - if (rc) + rc = request_firmware(&fw, RP2_FW_NAME, &pdev->dev); + if (rc < 0) { + dev_err(&pdev->dev, "cannot find '%s' firmware image\n", + RP2_FW_NAME); return rc; + } - /* - * Only catastrophic errors (e.g. ENOMEM) are reported here. - * If the FW image is missing, we'll find out in rp2_fw_cb() - * and print an error message. - */ - rc = request_firmware_nowait(THIS_MODULE, 1, RP2_FW_NAME, &pdev->dev, - GFP_KERNEL, card, rp2_fw_cb); + rc = rp2_load_firmware(card, fw); + + release_firmware(fw); + if (rc < 0) + return rc; + + rc = devm_request_irq(&pdev->dev, pdev->irq, rp2_uart_interrupt, + IRQF_SHARED, DRV_NAME, card); if (rc) return rc; - dev_dbg(&pdev->dev, "waiting for firmware blob...\n"); return 0; } @@ -801,7 +784,6 @@ static void rp2_remove(struct pci_dev *pdev) { struct rp2_card *card = pci_get_drvdata(pdev); - wait_for_completion(&card->fw_loaded); rp2_remove_ports(card); } diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index bbae072a125d..222032792d6c 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -338,7 +338,7 @@ static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits) do { lsr = tegra_uart_read(tup, UART_LSR); - if ((lsr | UART_LSR_TEMT) && !(lsr & UART_LSR_DR)) + if ((lsr & UART_LSR_TEMT) && !(lsr & UART_LSR_DR)) break; udelay(1); } while (--tmout); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 87f7127b57e6..18ff85a83f80 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -863,9 +863,11 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port, goto check_and_exit; } - retval = security_locked_down(LOCKDOWN_TIOCSSERIAL); - if (retval && (change_irq || change_port)) - goto exit; + if (change_irq || change_port) { + retval = security_locked_down(LOCKDOWN_TIOCSSERIAL); + if (retval) + goto exit; + } /* * Ask the low level driver to verify the settings. diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index ef37fdf37612..4baf1316ea72 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1023,10 +1023,10 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig) { unsigned int bits; + if (rx_trig >= port->fifosize) + rx_trig = port->fifosize - 1; if (rx_trig < 1) rx_trig = 1; - if (rx_trig >= port->fifosize) - rx_trig = port->fifosize; /* HSCIF can be set to an arbitrary level. */ if (sci_getreg(port, HSRTRGR)->size) { diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index 9b1bd417cec0..a8b7b50abf64 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -3268,8 +3268,10 @@ static int __cdns3_gadget_init(struct cdns *cdns) pm_runtime_get_sync(cdns->dev); ret = cdns3_gadget_start(cdns); - if (ret) + if (ret) { + pm_runtime_put_sync(cdns->dev); return ret; + } /* * Because interrupt line can be shared with other components in diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index 56707b6b0f57..c083985e387b 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -422,17 +422,17 @@ unmap: int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq) { struct cdnsp_device *pdev = pep->pdev; - int ret; + int ret_stop = 0; + int ret_rem; trace_cdnsp_request_dequeue(preq); - if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) { - ret = cdnsp_cmd_stop_ep(pdev, pep); - if (ret) - return ret; - } + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) + ret_stop = cdnsp_cmd_stop_ep(pdev, pep); + + ret_rem = cdnsp_remove_request(pdev, preq, pep); - return cdnsp_remove_request(pdev, preq, pep); + return ret_rem ? ret_rem : ret_stop; } static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev) diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index c16d900cdaee..393f216b9161 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -2061,6 +2061,7 @@ static int udc_start(struct ci_hdrc *ci) ci->gadget.name = ci->platdata->name; ci->gadget.otg_caps = otg_caps; ci->gadget.sg_supported = 1; + ci->gadget.irq = ci->irq; if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA) ci->gadget.quirk_avoids_skb_reserve = 1; diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 533236366a03..2218941d35a3 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -1218,7 +1218,12 @@ static int do_proc_bulk(struct usb_dev_state *ps, ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb)); if (ret) return ret; - tbuf = kmalloc(len1, GFP_KERNEL); + + /* + * len1 can be almost arbitrarily large. Don't WARN if it's + * too big, just fail the request. + */ + tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN); if (!tbuf) { ret = -ENOMEM; goto done; @@ -1696,7 +1701,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb if (num_sgs) { as->urb->sg = kmalloc_array(num_sgs, sizeof(struct scatterlist), - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (!as->urb->sg) { ret = -ENOMEM; goto error; @@ -1731,7 +1736,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb (uurb_start - as->usbm->vm_start); } else { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, - GFP_KERNEL); + GFP_KERNEL | __GFP_NOWARN); if (!as->urb->transfer_buffer) { ret = -ENOMEM; goto error; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 49ca5da5e279..612825a39f82 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1244,6 +1244,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep, req->start_sg = sg_next(s); req->num_queued_sgs++; + req->num_pending_sgs--; /* * The number of pending SG entries may not correspond to the @@ -1251,7 +1252,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep, * don't include unused SG entries. */ if (length == 0) { - req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs; + req->num_pending_sgs = 0; break; } @@ -2873,15 +2874,15 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep, struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue]; struct scatterlist *sg = req->sg; struct scatterlist *s; - unsigned int pending = req->num_pending_sgs; + unsigned int num_queued = req->num_queued_sgs; unsigned int i; int ret = 0; - for_each_sg(sg, s, pending, i) { + for_each_sg(sg, s, num_queued, i) { trb = &dep->trb_pool[dep->trb_dequeue]; req->sg = sg_next(s); - req->num_pending_sgs--; + req->num_queued_sgs--; ret = dwc3_gadget_ep_reclaim_completed_trb(dep, req, trb, event, status, true); @@ -2904,7 +2905,7 @@ static int dwc3_gadget_ep_reclaim_trb_linear(struct dwc3_ep *dep, static bool dwc3_gadget_ep_request_completed(struct dwc3_request *req) { - return req->num_pending_sgs == 0; + return req->num_pending_sgs == 0 && req->num_queued_sgs == 0; } static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, @@ -2913,7 +2914,7 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, { int ret; - if (req->num_pending_sgs) + if (req->request.num_mapped_sgs) ret = dwc3_gadget_ep_reclaim_trb_sg(dep, req, event, status); else diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 0c418ce50ba0..f1b35a39d1ba 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -1488,7 +1488,7 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep, struct renesas_usb3_request *usb3_req) { struct renesas_usb3 *usb3 = usb3_ep_to_usb3(usb3_ep); - struct renesas_usb3_request *usb3_req_first = usb3_get_request(usb3_ep); + struct renesas_usb3_request *usb3_req_first; unsigned long flags; int ret = -EAGAIN; u32 enable_bits = 0; @@ -1496,7 +1496,8 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep, spin_lock_irqsave(&usb3->lock, flags); if (usb3_ep->halt || usb3_ep->started) goto out; - if (usb3_req != usb3_req_first) + usb3_req_first = __usb3_get_request(usb3_ep); + if (!usb3_req_first || usb3_req != usb3_req_first) goto out; if (usb3_pn_change(usb3, usb3_ep->num) < 0) diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a8e4189277da..6acd2329e08d 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -828,14 +828,10 @@ static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep) list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { - /* - * Doesn't matter what we pass for status, since the core will - * just overwrite it (because the URB has been unlinked). - */ ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb); if (td->cancel_status == TD_CLEARED) - xhci_td_cleanup(ep->xhci, td, ring, 0); + xhci_td_cleanup(ep->xhci, td, ring, td->status); if (ep->xhci->xhc_state & XHCI_STATE_DYING) return; @@ -937,14 +933,18 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) continue; } /* - * If ring stopped on the TD we need to cancel, then we have to + * If a ring stopped on the TD we need to cancel then we have to * move the xHC endpoint ring dequeue pointer past this TD. + * Rings halted due to STALL may show hw_deq is past the stalled + * TD, but still require a set TR Deq command to flush xHC cache. */ hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index, td->urb->stream_id); hw_deq &= ~0xf; - if (trb_in_td(xhci, td->start_seg, td->first_trb, + if (td->cancel_status == TD_HALTED) { + cached_td = td; + } else if (trb_in_td(xhci, td->start_seg, td->first_trb, td->last_trb, hw_deq, false)) { switch (td->cancel_status) { case TD_CLEARED: /* TD is already no-op */ diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c index a3dfc77578ea..26baba3ab7d7 100644 --- a/drivers/usb/misc/trancevibrator.c +++ b/drivers/usb/misc/trancevibrator.c @@ -61,9 +61,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr, /* Set speed */ retval = usb_control_msg(tv->udev, usb_sndctrlpipe(tv->udev, 0), 0x01, /* vendor request: set speed */ - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, tv->speed, /* speed value */ - 0, NULL, 0, USB_CTRL_GET_TIMEOUT); + 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (retval) { tv->speed = old; dev_dbg(&tv->udev->dev, "retval = %d\n", retval); diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index b5d661644263..748139d26263 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -736,6 +736,7 @@ static int uss720_probe(struct usb_interface *intf, parport_announce_port(pp); usb_set_intfdata(intf, pp); + usb_put_dev(usbdev); return 0; probe_abort: diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 6f2659e59b2e..369ef140df78 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1034,6 +1034,9 @@ static const struct usb_device_id id_table_combined[] = { /* Sienna devices */ { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) }, { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) }, + /* IDS GmbH devices */ + { USB_DEVICE(IDS_VID, IDS_SI31A_PID) }, + { USB_DEVICE(IDS_VID, IDS_CM31A_PID) }, /* U-Blox devices */ { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) }, { USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 3d47c6d72256..d854e04a4286 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1568,6 +1568,13 @@ #define UNJO_ISODEBUG_V1_PID 0x150D /* + * IDS GmbH + */ +#define IDS_VID 0x2CAF +#define IDS_SI31A_PID 0x13A2 +#define IDS_CM31A_PID 0x13A3 + +/* * U-Blox products (http://www.u-blox.com). */ #define UBLOX_VID 0x1546 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 3e79a543d3e7..7608584ef4fe 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1240,6 +1240,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ .driver_info = NCTRL(0) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */ + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ + .driver_info = NCTRL(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ .driver_info = NCTRL(0) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index fd773d252691..940050c31482 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -113,6 +113,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, + { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530GC_PRODUCT_ID) }, { USB_DEVICE(SMART_VENDOR_ID, SMART_PRODUCT_ID) }, { USB_DEVICE(AT_VENDOR_ID, AT_VTKIT3_PRODUCT_ID) }, { } /* Terminating entry */ diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h index 0f681ddbfd28..6097ee8fccb2 100644 --- a/drivers/usb/serial/pl2303.h +++ b/drivers/usb/serial/pl2303.h @@ -158,6 +158,7 @@ /* ADLINK ND-6530 RS232,RS485 and RS422 adapter */ #define ADLINK_VENDOR_ID 0x0b63 #define ADLINK_ND6530_PRODUCT_ID 0x6530 +#define ADLINK_ND6530GC_PRODUCT_ID 0x653a /* SMART USB Serial Adapter */ #define SMART_VENDOR_ID 0x0b8c diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index caa46ac23db9..310db5abea9d 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -37,6 +37,7 @@ /* Vendor and product ids */ #define TI_VENDOR_ID 0x0451 #define IBM_VENDOR_ID 0x04b3 +#define STARTECH_VENDOR_ID 0x14b0 #define TI_3410_PRODUCT_ID 0x3410 #define IBM_4543_PRODUCT_ID 0x4543 #define IBM_454B_PRODUCT_ID 0x454b @@ -370,6 +371,7 @@ static const struct usb_device_id ti_id_table_3410[] = { { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) }, + { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) }, { } /* terminator */ }; @@ -408,6 +410,7 @@ static const struct usb_device_id ti_id_table_combined[] = { { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) }, { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) }, + { USB_DEVICE(STARTECH_VENDOR_ID, TI_3410_PRODUCT_ID) }, { } /* terminator */ }; diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c index 9da22ae3006c..8514bec7e1b8 100644 --- a/drivers/usb/typec/mux.c +++ b/drivers/usb/typec/mux.c @@ -191,6 +191,7 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id, bool match; int nval; u16 *val; + int ret; int i; /* @@ -218,10 +219,10 @@ static void *typec_mux_match(struct fwnode_handle *fwnode, const char *id, if (!val) return ERR_PTR(-ENOMEM); - nval = fwnode_property_read_u16_array(fwnode, "svid", val, nval); - if (nval < 0) { + ret = fwnode_property_read_u16_array(fwnode, "svid", val, nval); + if (ret < 0) { kfree(val); - return ERR_PTR(nval); + return ERR_PTR(ret); } for (i = 0; i < nval; i++) { diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 64133e586c64..9ce8c9af4da5 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1550,6 +1550,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, if (PD_VDO_SVDM_VER(p[0]) < svdm_version) typec_partner_set_svdm_version(port->partner, PD_VDO_SVDM_VER(p[0])); + + tcpm_ams_start(port, DISCOVER_IDENTITY); /* 6.4.4.3.1: Only respond as UFP (device) */ if (port->data_role == TYPEC_DEVICE && port->nr_snk_vdo) { @@ -1568,14 +1570,19 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev, } break; case CMD_DISCOVER_SVID: + tcpm_ams_start(port, DISCOVER_SVIDS); break; case CMD_DISCOVER_MODES: + tcpm_ams_start(port, DISCOVER_MODES); break; case CMD_ENTER_MODE: + tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE); break; case CMD_EXIT_MODE: + tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE); break; case CMD_ATTENTION: + tcpm_ams_start(port, ATTENTION); /* Attention command does not have response */ *adev_action = ADEV_ATTENTION; return 0; @@ -2287,6 +2294,12 @@ static void tcpm_pd_data_request(struct tcpm_port *port, bool frs_enable; int ret; + if (tcpm_vdm_ams(port) && type != PD_DATA_VENDOR_DEF) { + port->vdm_state = VDM_STATE_ERR_BUSY; + tcpm_ams_finish(port); + mod_vdm_delayed_work(port, 0); + } + switch (type) { case PD_DATA_SOURCE_CAP: for (i = 0; i < cnt; i++) @@ -2417,7 +2430,10 @@ static void tcpm_pd_data_request(struct tcpm_port *port, NONE_AMS); break; case PD_DATA_VENDOR_DEF: - tcpm_handle_vdm_request(port, msg->payload, cnt); + if (tcpm_vdm_ams(port) || port->nr_snk_vdo) + tcpm_handle_vdm_request(port, msg->payload, cnt); + else if (port->negotiated_rev > PD_REV20) + tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); break; case PD_DATA_BIST: port->bist_request = le32_to_cpu(msg->payload[0]); @@ -2459,6 +2475,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, enum pd_ctrl_msg_type type = pd_header_type_le(msg->header); enum tcpm_state next_state; + /* + * Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in + * VDM AMS if waiting for VDM responses and will be handled later. + */ + if (tcpm_vdm_ams(port) && type != PD_CTRL_NOT_SUPP && type != PD_CTRL_GOOD_CRC) { + port->vdm_state = VDM_STATE_ERR_BUSY; + tcpm_ams_finish(port); + mod_vdm_delayed_work(port, 0); + } + switch (type) { case PD_CTRL_GOOD_CRC: case PD_CTRL_PING: @@ -2717,7 +2743,14 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port, enum pd_ext_msg_type type = pd_header_type_le(msg->header); unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header); - if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) { + /* stopping VDM state machine if interrupted by other Messages */ + if (tcpm_vdm_ams(port)) { + port->vdm_state = VDM_STATE_ERR_BUSY; + tcpm_ams_finish(port); + mod_vdm_delayed_work(port, 0); + } + + if (!(le16_to_cpu(msg->ext_msg.header) & PD_EXT_HDR_CHUNKED)) { tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS); tcpm_log(port, "Unchunked extended messages unsupported"); return; @@ -2811,7 +2844,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work) "Data role mismatch, initiating error recovery"); tcpm_set_state(port, ERROR_RECOVERY, 0); } else { - if (msg->header & PD_HEADER_EXT_HDR) + if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR) tcpm_pd_ext_msg_request(port, msg); else if (cnt) tcpm_pd_data_request(port, msg); diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 1d8b7df59ff4..b433169ef6fa 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -717,8 +717,8 @@ static void ucsi_handle_connector_change(struct work_struct *work) ucsi_send_command(con->ucsi, command, NULL, 0); /* 3. ACK connector change */ - clear_bit(EVENT_PENDING, &ucsi->flags); ret = ucsi_acknowledge_connector_change(ucsi); + clear_bit(EVENT_PENDING, &ucsi->flags); if (ret) { dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret); goto out_unlock; diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig index 53ce78d7d07b..5e2e1b9a9fd3 100644 --- a/drivers/vfio/pci/Kconfig +++ b/drivers/vfio/pci/Kconfig @@ -2,6 +2,7 @@ config VFIO_PCI tristate "VFIO support for PCI devices" depends on VFIO && PCI && EVENTFD + depends on MMU select VFIO_VIRQFD select IRQ_BYPASS_MANAGER help diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index d57f037f65b8..70e28efbc51f 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) if (len == 0xFF) { len = vfio_ext_cap_len(vdev, ecap, epos); if (len < 0) - return ret; + return len; } } diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index 361e5b57e369..470fcf7dac56 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -291,7 +291,7 @@ err_irq: vfio_platform_regions_cleanup(vdev); err_reg: mutex_unlock(&driver_lock); - module_put(THIS_MODULE); + module_put(vdev->parent_module); return ret; } diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index a0747c35a778..a3e925a41b0d 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -2795,7 +2795,7 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu, return 0; } - size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges)); + size = struct_size(cap_iovas, iova_ranges, iovas); cap_iovas = kzalloc(size, GFP_KERNEL); if (!cap_iovas) diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index b292887a2481..a591d291b231 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; get_page(page); + + if (vmf->vma->vm_file) + page->mapping = vmf->vma->vm_file->f_mapping; + else + printk(KERN_ERR "no mapping available\n"); + + BUG_ON(!page->mapping); page->index = vmf->pgoff; vmf->page = page; @@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = { .page_mkwrite = fb_deferred_io_mkwrite, }; +static int fb_deferred_io_set_page_dirty(struct page *page) +{ + if (!PageDirty(page)) + SetPageDirty(page); + return 0; +} + +static const struct address_space_operations fb_deferred_io_aops = { + .set_page_dirty = fb_deferred_io_set_page_dirty, +}; + int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) { vma->vm_ops = &fb_deferred_io_vm_ops; @@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info) } EXPORT_SYMBOL_GPL(fb_deferred_io_init); +void fb_deferred_io_open(struct fb_info *info, + struct inode *inode, + struct file *file) +{ + file->f_mapping->a_ops = &fb_deferred_io_aops; +} +EXPORT_SYMBOL_GPL(fb_deferred_io_open); + void fb_deferred_io_cleanup(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; + struct page *page; + int i; BUG_ON(!fbdefio); cancel_delayed_work_sync(&info->deferred_work); + + /* clear out the mapping that we setup */ + for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { + page = fb_deferred_io_page(info, i); + page->mapping = NULL; + } + mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 072780b0e570..98f193078c05 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1415,6 +1415,10 @@ __releases(&info->lock) if (res) module_put(info->fbops->owner); } +#ifdef CONFIG_FB_DEFERRED_IO + if (info->fbdefio) + fb_deferred_io_open(info, inode, file); +#endif out: unlock_fb_info(info); if (res) diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c index cc8e62ae93f6..bd3d07aa4f0e 100644 --- a/drivers/video/fbdev/hgafb.c +++ b/drivers/video/fbdev/hgafb.c @@ -558,7 +558,7 @@ static int hgafb_probe(struct platform_device *pdev) int ret; ret = hga_card_detect(); - if (!ret) + if (ret) return ret; printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n", diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 9fbe5a5ec9bd..78719f2f567e 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -1919,7 +1919,9 @@ static void afs_rename_edit_dir(struct afs_operation *op) new_inode = d_inode(new_dentry); if (new_inode) { spin_lock(&new_inode->i_lock); - if (new_inode->i_nlink > 0) + if (S_ISDIR(new_inode->i_mode)) + clear_nlink(new_inode); + else if (new_inode->i_nlink > 0) drop_nlink(new_inode); spin_unlock(&new_inode->i_lock); } diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d17ac301032e..1346d698463a 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -457,7 +457,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, bytes_left = compressed_len; for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { int submit = 0; - int len; + int len = 0; page = compressed_pages[pg_index]; page->mapping = inode->vfs_inode.i_mapping; @@ -465,10 +465,17 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio, 0); - if (pg_index == 0 && use_append) - len = bio_add_zone_append_page(bio, page, PAGE_SIZE, 0); - else - len = bio_add_page(bio, page, PAGE_SIZE, 0); + /* + * Page can only be added to bio if the current bio fits in + * stripe. + */ + if (!submit) { + if (pg_index == 0 && use_append) + len = bio_add_zone_append_page(bio, page, + PAGE_SIZE, 0); + else + len = bio_add_page(bio, page, PAGE_SIZE, 0); + } page->mapping = NULL; if (submit || len < PAGE_SIZE) { diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f1d15b68994a..3d5c35e4cb76 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1868,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, trace_run_delayed_ref_head(fs_info, head, 0); btrfs_delayed_ref_unlock(head); btrfs_put_delayed_ref_head(head); - return 0; + return ret; } static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 294602f139ef..441cee7fbb62 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -788,7 +788,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, u64 end_byte = bytenr + len; u64 csum_end; struct extent_buffer *leaf; - int ret; + int ret = 0; const u32 csum_size = fs_info->csum_size; u32 blocksize_bits = fs_info->sectorsize_bits; @@ -806,6 +806,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ret = btrfs_search_slot(trans, root, &key, path, -1, 1); if (ret > 0) { + ret = 0; if (path->slots[0] == 0) break; path->slots[0]--; @@ -862,7 +863,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ret = btrfs_del_items(trans, root, path, path->slots[0], del_nr); if (ret) - goto out; + break; if (key.offset == bytenr) break; } else if (key.offset < bytenr && csum_end > end_byte) { @@ -906,8 +907,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ret = btrfs_split_item(trans, root, path, &key, offset); if (ret && ret != -EAGAIN) { btrfs_abort_transaction(trans, ret); - goto out; + break; } + ret = 0; key.offset = end_byte - 1; } else { @@ -917,12 +919,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, } btrfs_release_path(path); } - ret = 0; -out: btrfs_free_path(path); return ret; } +static int find_next_csum_offset(struct btrfs_root *root, + struct btrfs_path *path, + u64 *next_offset) +{ + const u32 nritems = btrfs_header_nritems(path->nodes[0]); + struct btrfs_key found_key; + int slot = path->slots[0] + 1; + int ret; + + if (nritems == 0 || slot >= nritems) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) { + return ret; + } else if (ret > 0) { + *next_offset = (u64)-1; + return 0; + } + slot = path->slots[0]; + } + + btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); + + if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || + found_key.type != BTRFS_EXTENT_CSUM_KEY) + *next_offset = (u64)-1; + else + *next_offset = found_key.offset; + + return 0; +} + int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_ordered_sum *sums) @@ -938,7 +969,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, u64 total_bytes = 0; u64 csum_offset; u64 bytenr; - u32 nritems; u32 ins_size; int index = 0; int found_next; @@ -981,26 +1011,10 @@ again: goto insert; } } else { - int slot = path->slots[0] + 1; - /* we didn't find a csum item, insert one */ - nritems = btrfs_header_nritems(path->nodes[0]); - if (!nritems || (path->slots[0] >= nritems - 1)) { - ret = btrfs_next_leaf(root, path); - if (ret < 0) { - goto out; - } else if (ret > 0) { - found_next = 1; - goto insert; - } - slot = path->slots[0]; - } - btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); - if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || - found_key.type != BTRFS_EXTENT_CSUM_KEY) { - found_next = 1; - goto insert; - } - next_offset = found_key.offset; + /* We didn't find a csum item, insert one. */ + ret = find_next_csum_offset(root, path, &next_offset); + if (ret < 0) + goto out; found_next = 1; goto insert; } @@ -1056,8 +1070,48 @@ extend_csum: tmp = sums->len - total_bytes; tmp >>= fs_info->sectorsize_bits; WARN_ON(tmp < 1); + extend_nr = max_t(int, 1, tmp); + + /* + * A log tree can already have checksum items with a subset of + * the checksums we are trying to log. This can happen after + * doing a sequence of partial writes into prealloc extents and + * fsyncs in between, with a full fsync logging a larger subrange + * of an extent for which a previous fast fsync logged a smaller + * subrange. And this happens in particular due to merging file + * extent items when we complete an ordered extent for a range + * covered by a prealloc extent - this is done at + * btrfs_mark_extent_written(). + * + * So if we try to extend the previous checksum item, which has + * a range that ends at the start of the range we want to insert, + * make sure we don't extend beyond the start offset of the next + * checksum item. If we are at the last item in the leaf, then + * forget the optimization of extending and add a new checksum + * item - it is not worth the complexity of releasing the path, + * getting the first key for the next leaf, repeat the btree + * search, etc, because log trees are temporary anyway and it + * would only save a few bytes of leaf space. + */ + if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { + if (path->slots[0] + 1 >= + btrfs_header_nritems(path->nodes[0])) { + ret = find_next_csum_offset(root, path, &next_offset); + if (ret < 0) + goto out; + found_next = 1; + goto insert; + } + + ret = find_next_csum_offset(root, path, &next_offset); + if (ret < 0) + goto out; + + tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits; + if (tmp <= INT_MAX) + extend_nr = min_t(int, extend_nr, tmp); + } - extend_nr = max_t(int, 1, (int)tmp); diff = (csum_offset + extend_nr) * csum_size; diff = min(diff, MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 33f14573f2ec..46f392943f4d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3000,6 +3000,18 @@ out: if (ret || truncated) { u64 unwritten_start = start; + /* + * If we failed to finish this ordered extent for any reason we + * need to make sure BTRFS_ORDERED_IOERR is set on the ordered + * extent, and mark the inode with the error if it wasn't + * already set. Any error during writeback would have already + * set the mapping error, so we need to set it if we're the ones + * marking this ordered extent as failed. + */ + if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, + &ordered_extent->flags)) + mapping_set_error(ordered_extent->inode->i_mapping, -EIO); + if (truncated) unwritten_start += logical_len; clear_extent_uptodate(io_tree, unwritten_start, end, NULL); @@ -9076,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, int ret2; bool root_log_pinned = false; bool dest_log_pinned = false; + bool need_abort = false; /* we only allow rename subvolume link between subvolumes */ if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) @@ -9135,6 +9148,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, old_idx); if (ret) goto out_fail; + need_abort = true; } /* And now for the dest. */ @@ -9150,8 +9164,11 @@ static int btrfs_rename_exchange(struct inode *old_dir, new_ino, btrfs_ino(BTRFS_I(old_dir)), new_idx); - if (ret) + if (ret) { + if (need_abort) + btrfs_abort_transaction(trans, ret); goto out_fail; + } } /* Update inode version and ctime/mtime. */ diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c index d434dc78dadf..9178da07cc9c 100644 --- a/fs/btrfs/reflink.c +++ b/fs/btrfs/reflink.c @@ -203,10 +203,7 @@ static int clone_copy_inline_extent(struct inode *dst, * inline extent's data to the page. */ ASSERT(key.offset > 0); - ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, - inline_data, size, datal, - comp_type); - goto out; + goto copy_to_page; } } else if (i_size_read(dst) <= datal) { struct btrfs_file_extent_item *ei; @@ -222,13 +219,10 @@ static int clone_copy_inline_extent(struct inode *dst, BTRFS_FILE_EXTENT_INLINE) goto copy_inline_extent; - ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, - inline_data, size, datal, comp_type); - goto out; + goto copy_to_page; } copy_inline_extent: - ret = 0; /* * We have no extent items, or we have an extent at offset 0 which may * or may not be inlined. All these cases are dealt the same way. @@ -240,11 +234,13 @@ copy_inline_extent: * clone. Deal with all these cases by copying the inline extent * data into the respective page at the destination inode. */ - ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, - inline_data, size, datal, comp_type); - goto out; + goto copy_to_page; } + /* + * Release path before starting a new transaction so we don't hold locks + * that would confuse lockdep. + */ btrfs_release_path(path); /* * If we end up here it means were copy the inline extent into a leaf @@ -282,11 +278,6 @@ copy_inline_extent: out: if (!ret && !trans) { /* - * Release path before starting a new transaction so we don't - * hold locks that would confuse lockdep. - */ - btrfs_release_path(path); - /* * No transaction here means we copied the inline extent into a * page of the destination inode. * @@ -306,6 +297,21 @@ out: *trans_out = trans; return ret; + +copy_to_page: + /* + * Release our path because we don't need it anymore and also because + * copy_inline_to_page() needs to reserve data and metadata, which may + * need to flush delalloc when we are low on available space and + * therefore cause a deadlock if writeback of an inline extent needs to + * write to the same leaf or an ordered extent completion needs to write + * to the same leaf. + */ + btrfs_release_path(path); + + ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, + inline_data, size, datal, comp_type); + goto out; } /** diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 326be57f2828..362d14db1e38 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, if (ret) goto out; - btrfs_update_inode(trans, root, BTRFS_I(inode)); + ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); + if (ret) + goto out; } ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; @@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, if (nlink != inode->i_nlink) { set_nlink(inode, nlink); - btrfs_update_inode(trans, root, BTRFS_I(inode)); + ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); + if (ret) + goto out; } BTRFS_I(inode)->index_cnt = (u64)-1; @@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, break; if (ret == 1) { + ret = 0; if (path->slots[0] == 0) break; path->slots[0]--; @@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, root, path); if (ret) - goto out; + break; btrfs_release_path(path); inode = read_one_inode(root, key.offset); - if (!inode) - return -EIO; + if (!inode) { + ret = -EIO; + break; + } ret = fixup_inode_link_count(trans, root, inode); iput(inode); if (ret) - goto out; + break; /* * fixup on a directory may create new entries, @@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, */ key.offset = (u64)-1; } - ret = 0; -out: btrfs_release_path(path); return ret; } diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h index 4a97fe12006b..37fc7d6ac457 100644 --- a/fs/cifs/cifs_ioctl.h +++ b/fs/cifs/cifs_ioctl.h @@ -72,15 +72,28 @@ struct smb3_key_debug_info { } __packed; /* - * Dump full key (32 byte encrypt/decrypt keys instead of 16 bytes) - * is needed if GCM256 (stronger encryption) negotiated + * Dump variable-sized keys */ struct smb3_full_key_debug_info { - __u64 Suid; + /* INPUT: size of userspace buffer */ + __u32 in_size; + + /* + * INPUT: 0 for current user, otherwise session to dump + * OUTPUT: session id that was dumped + */ + __u64 session_id; __u16 cipher_type; - __u8 auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */ - __u8 smb3encryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */ - __u8 smb3decryptionkey[32]; /* SMB3_ENC_DEC_KEY_SIZE */ + __u8 session_key_length; + __u8 server_in_key_length; + __u8 server_out_key_length; + __u8 data[]; + /* + * return this struct with the keys appended at the end: + * __u8 session_key[session_key_length]; + * __u8 server_in_key[server_in_key_length]; + * __u8 server_out_key[server_out_key_length]; + */ } __packed; struct smb3_notify { diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h index b53a87db282f..554d64fe171e 100644 --- a/fs/cifs/cifspdu.h +++ b/fs/cifs/cifspdu.h @@ -148,7 +148,8 @@ #define SMB3_SIGN_KEY_SIZE (16) /* - * Size of the smb3 encryption/decryption keys + * Size of the smb3 encryption/decryption key storage. + * This size is big enough to store any cipher key types. */ #define SMB3_ENC_DEC_KEY_SIZE (32) diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 28ec8d7c521a..d67d281ab863 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -33,6 +33,7 @@ #include "cifsfs.h" #include "cifs_ioctl.h" #include "smb2proto.h" +#include "smb2glob.h" #include <linux/btrfs.h> static long cifs_ioctl_query_info(unsigned int xid, struct file *filep, @@ -214,48 +215,112 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg) return 0; } -static int cifs_dump_full_key(struct cifs_tcon *tcon, unsigned long arg) +static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in) { - struct smb3_full_key_debug_info pfull_key_inf; - __u64 suid; - struct list_head *tmp; + struct smb3_full_key_debug_info out; struct cifs_ses *ses; + int rc = 0; bool found = false; + u8 __user *end; - if (!smb3_encryption_required(tcon)) - return -EOPNOTSUPP; + if (!smb3_encryption_required(tcon)) { + rc = -EOPNOTSUPP; + goto out; + } + + /* copy user input into our output buffer */ + if (copy_from_user(&out, in, sizeof(out))) { + rc = -EINVAL; + goto out; + } + + if (!out.session_id) { + /* if ses id is 0, use current user session */ + ses = tcon->ses; + } else { + /* otherwise if a session id is given, look for it in all our sessions */ + struct cifs_ses *ses_it = NULL; + struct TCP_Server_Info *server_it = NULL; - ses = tcon->ses; /* default to user id for current user */ - if (get_user(suid, (__u64 __user *)arg)) - suid = 0; - if (suid) { - /* search to see if there is a session with a matching SMB UID */ spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &tcon->ses->server->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - if (ses->Suid == suid) { - found = true; - break; + list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) { + list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) { + if (ses_it->Suid == out.session_id) { + ses = ses_it; + /* + * since we are using the session outside the crit + * section, we need to make sure it won't be released + * so increment its refcount + */ + ses->ses_count++; + found = true; + goto search_end; + } } } +search_end: spin_unlock(&cifs_tcp_ses_lock); - if (found == false) - return -EINVAL; - } /* else uses default user's SMB UID (ie current user) */ - - pfull_key_inf.cipher_type = le16_to_cpu(ses->server->cipher_type); - pfull_key_inf.Suid = ses->Suid; - memcpy(pfull_key_inf.auth_key, ses->auth_key.response, - 16 /* SMB2_NTLMV2_SESSKEY_SIZE */); - memcpy(pfull_key_inf.smb3decryptionkey, ses->smb3decryptionkey, - 32 /* SMB3_ENC_DEC_KEY_SIZE */); - memcpy(pfull_key_inf.smb3encryptionkey, - ses->smb3encryptionkey, 32 /* SMB3_ENC_DEC_KEY_SIZE */); - if (copy_to_user((void __user *)arg, &pfull_key_inf, - sizeof(struct smb3_full_key_debug_info))) - return -EFAULT; + if (!found) { + rc = -ENOENT; + goto out; + } + } - return 0; + switch (ses->server->cipher_type) { + case SMB2_ENCRYPTION_AES128_CCM: + case SMB2_ENCRYPTION_AES128_GCM: + out.session_key_length = CIFS_SESS_KEY_SIZE; + out.server_in_key_length = out.server_out_key_length = SMB3_GCM128_CRYPTKEY_SIZE; + break; + case SMB2_ENCRYPTION_AES256_CCM: + case SMB2_ENCRYPTION_AES256_GCM: + out.session_key_length = CIFS_SESS_KEY_SIZE; + out.server_in_key_length = out.server_out_key_length = SMB3_GCM256_CRYPTKEY_SIZE; + break; + default: + rc = -EOPNOTSUPP; + goto out; + } + + /* check if user buffer is big enough to store all the keys */ + if (out.in_size < sizeof(out) + out.session_key_length + out.server_in_key_length + + out.server_out_key_length) { + rc = -ENOBUFS; + goto out; + } + + out.session_id = ses->Suid; + out.cipher_type = le16_to_cpu(ses->server->cipher_type); + + /* overwrite user input with our output */ + if (copy_to_user(in, &out, sizeof(out))) { + rc = -EINVAL; + goto out; + } + + /* append all the keys at the end of the user buffer */ + end = in->data; + if (copy_to_user(end, ses->auth_key.response, out.session_key_length)) { + rc = -EINVAL; + goto out; + } + end += out.session_key_length; + + if (copy_to_user(end, ses->smb3encryptionkey, out.server_in_key_length)) { + rc = -EINVAL; + goto out; + } + end += out.server_in_key_length; + + if (copy_to_user(end, ses->smb3decryptionkey, out.server_out_key_length)) { + rc = -EINVAL; + goto out; + } + +out: + if (found) + cifs_put_smb_ses(ses); + return rc; } long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) @@ -371,6 +436,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) rc = -EOPNOTSUPP; break; case CIFS_DUMP_KEY: + /* + * Dump encryption keys. This is an old ioctl that only + * handles AES-128-{CCM,GCM}. + */ if (pSMBFile == NULL) break; if (!capable(CAP_SYS_ADMIN)) { @@ -398,11 +467,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) else rc = 0; break; - /* - * Dump full key (32 bytes instead of 16 bytes) is - * needed if GCM256 (stronger encryption) negotiated - */ case CIFS_DUMP_FULL_KEY: + /* + * Dump encryption keys (handles any key sizes) + */ if (pSMBFile == NULL) break; if (!capable(CAP_SYS_ADMIN)) { @@ -410,8 +478,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) break; } tcon = tlink_tcon(pSMBFile->tlink); - rc = cifs_dump_full_key(tcon, arg); - + rc = cifs_dump_full_key(tcon, (void __user *)arg); break; case CIFS_IOC_NOTIFY: if (!S_ISDIR(inode->i_mode)) { diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 9f24eb88297a..c205f93e0a10 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -958,6 +958,13 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses) /* Internal types */ server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES; + /* + * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context + * Set the cipher type manually. + */ + if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) + server->cipher_type = SMB2_ENCRYPTION_AES128_CCM; + security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, (struct smb2_sync_hdr *)rsp); /* diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h index d6df908dccad..dafcb6ab050d 100644 --- a/fs/cifs/trace.h +++ b/fs/cifs/trace.h @@ -12,6 +12,11 @@ #include <linux/tracepoint.h> +/* + * Please use this 3-part article as a reference for writing new tracepoints: + * https://lwn.net/Articles/379903/ + */ + /* For logging errors in read or write */ DECLARE_EVENT_CLASS(smb3_rw_err_class, TP_PROTO(unsigned int xid, @@ -529,16 +534,16 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class, TP_ARGS(xid, func_name, rc), TP_STRUCT__entry( __field(unsigned int, xid) - __field(const char *, func_name) + __string(func_name, func_name) __field(int, rc) ), TP_fast_assign( __entry->xid = xid; - __entry->func_name = func_name; + __assign_str(func_name, func_name); __entry->rc = rc; ), TP_printk("\t%s: xid=%u rc=%d", - __entry->func_name, __entry->xid, __entry->rc) + __get_str(func_name), __entry->xid, __entry->rc) ) #define DEFINE_SMB3_EXIT_ERR_EVENT(name) \ @@ -583,14 +588,14 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class, TP_ARGS(xid, func_name), TP_STRUCT__entry( __field(unsigned int, xid) - __field(const char *, func_name) + __string(func_name, func_name) ), TP_fast_assign( __entry->xid = xid; - __entry->func_name = func_name; + __assign_str(func_name, func_name); ), TP_printk("\t%s: xid=%u", - __entry->func_name, __entry->xid) + __get_str(func_name), __entry->xid) ) #define DEFINE_SMB3_ENTER_EXIT_EVENT(name) \ @@ -857,16 +862,16 @@ DECLARE_EVENT_CLASS(smb3_reconnect_class, TP_STRUCT__entry( __field(__u64, currmid) __field(__u64, conn_id) - __field(char *, hostname) + __string(hostname, hostname) ), TP_fast_assign( __entry->currmid = currmid; __entry->conn_id = conn_id; - __entry->hostname = hostname; + __assign_str(hostname, hostname); ), TP_printk("conn_id=0x%llx server=%s current_mid=%llu", __entry->conn_id, - __entry->hostname, + __get_str(hostname), __entry->currmid) ) @@ -891,7 +896,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class, TP_STRUCT__entry( __field(__u64, currmid) __field(__u64, conn_id) - __field(char *, hostname) + __string(hostname, hostname) __field(int, credits) __field(int, credits_to_add) __field(int, in_flight) @@ -899,7 +904,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class, TP_fast_assign( __entry->currmid = currmid; __entry->conn_id = conn_id; - __entry->hostname = hostname; + __assign_str(hostname, hostname); __entry->credits = credits; __entry->credits_to_add = credits_to_add; __entry->in_flight = in_flight; @@ -907,7 +912,7 @@ DECLARE_EVENT_CLASS(smb3_credit_class, TP_printk("conn_id=0x%llx server=%s current_mid=%llu " "credits=%d credit_change=%d in_flight=%d", __entry->conn_id, - __entry->hostname, + __get_str(hostname), __entry->currmid, __entry->credits, __entry->credits_to_add, diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 1d252164d97b..8129a430d789 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -45,10 +45,13 @@ static unsigned int debugfs_allow __ro_after_init = DEFAULT_DEBUGFS_ALLOW_BITS; static int debugfs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, struct iattr *ia) { - int ret = security_locked_down(LOCKDOWN_DEBUGFS); + int ret; - if (ret && (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))) - return ret; + if (ia->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) { + ret = security_locked_down(LOCKDOWN_DEBUGFS); + if (ret) + return ret; + } return simple_setattr(&init_user_ns, dentry, ia); } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index a0b542d84cd9..493a83e3f590 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -911,8 +911,11 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) current->backing_dev_info = inode_to_bdi(inode); buffered = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); current->backing_dev_info = NULL; - if (unlikely(buffered <= 0)) + if (unlikely(buffered <= 0)) { + if (!ret) + ret = buffered; goto out_unlock; + } /* * We need to ensure that the page cache pages are written to diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index ea7fc5c641c7..d9cb261f55b0 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -582,6 +582,16 @@ out_locked: spin_unlock(&gl->gl_lockref.lock); } +static bool is_system_glock(struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); + + if (gl == m_ip->i_gl) + return true; + return false; +} + /** * do_xmote - Calls the DLM to change the state of a lock * @gl: The lock state @@ -671,17 +681,25 @@ skip_inval: * to see sd_log_error and withdraw, and in the meantime, requeue the * work for later. * + * We make a special exception for some system glocks, such as the + * system statfs inode glock, which needs to be granted before the + * gfs2_quotad daemon can exit, and that exit needs to finish before + * we can unmount the withdrawn file system. + * * However, if we're just unlocking the lock (say, for unmount, when * gfs2_gl_hash_clear calls clear_glock) and recovery is complete * then it's okay to tell dlm to unlock it. */ if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp))) gfs2_withdraw_delayed(sdp); - if (glock_blocked_by_withdraw(gl)) { - if (target != LM_ST_UNLOCKED || - test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) { + if (glock_blocked_by_withdraw(gl) && + (target != LM_ST_UNLOCKED || + test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags))) { + if (!is_system_glock(gl)) { gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); goto out; + } else { + clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); } } @@ -1466,9 +1484,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh) glock_blocked_by_withdraw(gl) && gh->gh_gl != sdp->sd_jinode_gl) { sdp->sd_glock_dqs_held++; + spin_unlock(&gl->gl_lockref.lock); might_sleep(); wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, TASK_UNINTERRUPTIBLE); + spin_lock(&gl->gl_lockref.lock); } if (gh->gh_flags & GL_NOCACHE) handle_callback(gl, LM_ST_UNLOCKED, 0, false); @@ -1775,6 +1795,7 @@ __acquires(&lru_lock) while(!list_empty(list)) { gl = list_first_entry(list, struct gfs2_glock, gl_lru); list_del_init(&gl->gl_lru); + clear_bit(GLF_LRU, &gl->gl_flags); if (!spin_trylock(&gl->gl_lockref.lock)) { add_back_to_lru: list_add(&gl->gl_lru, &lru_list); @@ -1820,7 +1841,6 @@ static long gfs2_scan_glock_lru(int nr) if (!test_bit(GLF_LOCK, &gl->gl_flags)) { list_move(&gl->gl_lru, &dispose); atomic_dec(&lru_count); - clear_bit(GLF_LRU, &gl->gl_flags); freed++; continue; } diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 454095e9fedf..54d3fbeb3002 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -396,7 +396,7 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) struct timespec64 atime; u16 height, depth; umode_t mode = be32_to_cpu(str->di_mode); - bool is_new = ip->i_inode.i_flags & I_NEW; + bool is_new = ip->i_inode.i_state & I_NEW; if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) goto corrupt; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 97d54e581a7b..42c15cfc0821 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -926,10 +926,10 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) } /** - * ail_drain - drain the ail lists after a withdraw + * gfs2_ail_drain - drain the ail lists after a withdraw * @sdp: Pointer to GFS2 superblock */ -static void ail_drain(struct gfs2_sbd *sdp) +void gfs2_ail_drain(struct gfs2_sbd *sdp) { struct gfs2_trans *tr; @@ -956,6 +956,7 @@ static void ail_drain(struct gfs2_sbd *sdp) list_del(&tr->tr_list); gfs2_trans_free(sdp, tr); } + gfs2_drain_revokes(sdp); spin_unlock(&sdp->sd_ail_lock); } @@ -1162,7 +1163,6 @@ out_withdraw: if (tr && list_empty(&tr->tr_list)) list_add(&tr->tr_list, &sdp->sd_ail1_list); spin_unlock(&sdp->sd_ail_lock); - ail_drain(sdp); /* frees all transactions */ tr = NULL; goto out_end; } diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h index eea58015710e..fc905c2af53c 100644 --- a/fs/gfs2/log.h +++ b/fs/gfs2/log.h @@ -93,5 +93,6 @@ extern int gfs2_logd(void *data); extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl); extern void gfs2_flush_revokes(struct gfs2_sbd *sdp); +extern void gfs2_ail_drain(struct gfs2_sbd *sdp); #endif /* __LOG_DOT_H__ */ diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 221e7118cc3b..8ee05d25dfa6 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -885,7 +885,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) gfs2_log_write_page(sdp, page); } -static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) +void gfs2_drain_revokes(struct gfs2_sbd *sdp) { struct list_head *head = &sdp->sd_log_revokes; struct gfs2_bufdata *bd; @@ -900,6 +900,11 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) } } +static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) +{ + gfs2_drain_revokes(sdp); +} + static void revoke_lo_before_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int pass) { diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 31b6dd0d2e5d..f707601597dc 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h @@ -20,6 +20,7 @@ extern void gfs2_log_submit_bio(struct bio **biop, int opf); extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); extern int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, bool keep_cache); +extern void gfs2_drain_revokes(struct gfs2_sbd *sdp); static inline unsigned int buf_limit(struct gfs2_sbd *sdp) { return sdp->sd_ldptrs; diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index 3e08027a6c81..f4325b44956d 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -131,6 +131,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc) return; + gfs2_ail_drain(sdp); /* frees all transactions */ inode = sdp->sd_jdesc->jd_inode; ip = GFS2_I(inode); i_gl = ip->i_gl; diff --git a/fs/io-wq.c b/fs/io-wq.c index 5361a9b4b47b..b3e8624a37d0 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -979,13 +979,16 @@ static bool io_task_work_match(struct callback_head *cb, void *data) return cwd->wqe->wq == data; } +void io_wq_exit_start(struct io_wq *wq) +{ + set_bit(IO_WQ_BIT_EXIT, &wq->state); +} + static void io_wq_exit_workers(struct io_wq *wq) { struct callback_head *cb; int node; - set_bit(IO_WQ_BIT_EXIT, &wq->state); - if (!wq->task) return; @@ -1003,13 +1006,16 @@ static void io_wq_exit_workers(struct io_wq *wq) struct io_wqe *wqe = wq->wqes[node]; io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL); - spin_lock_irq(&wq->hash->wait.lock); - list_del_init(&wq->wqes[node]->wait.entry); - spin_unlock_irq(&wq->hash->wait.lock); } rcu_read_unlock(); io_worker_ref_put(wq); wait_for_completion(&wq->worker_done); + + for_each_node(node) { + spin_lock_irq(&wq->hash->wait.lock); + list_del_init(&wq->wqes[node]->wait.entry); + spin_unlock_irq(&wq->hash->wait.lock); + } put_task_struct(wq->task); wq->task = NULL; } @@ -1020,8 +1026,6 @@ static void io_wq_destroy(struct io_wq *wq) cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node); - io_wq_exit_workers(wq); - for_each_node(node) { struct io_wqe *wqe = wq->wqes[node]; struct io_cb_cancel_data match = { @@ -1036,16 +1040,13 @@ static void io_wq_destroy(struct io_wq *wq) kfree(wq); } -void io_wq_put(struct io_wq *wq) -{ - if (refcount_dec_and_test(&wq->refs)) - io_wq_destroy(wq); -} - void io_wq_put_and_exit(struct io_wq *wq) { + WARN_ON_ONCE(!test_bit(IO_WQ_BIT_EXIT, &wq->state)); + io_wq_exit_workers(wq); - io_wq_put(wq); + if (refcount_dec_and_test(&wq->refs)) + io_wq_destroy(wq); } static bool io_wq_worker_affinity(struct io_worker *worker, void *data) diff --git a/fs/io-wq.h b/fs/io-wq.h index 0e6d310999e8..af2df0680ee2 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -122,7 +122,7 @@ struct io_wq_data { }; struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data); -void io_wq_put(struct io_wq *wq); +void io_wq_exit_start(struct io_wq *wq); void io_wq_put_and_exit(struct io_wq *wq); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); diff --git a/fs/io_uring.c b/fs/io_uring.c index 5f82954004f6..42380ed563c4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -8228,6 +8228,7 @@ static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, { int i, ret; + imu->acct_pages = 0; for (i = 0; i < nr_pages; i++) { if (!PageCompound(pages[i])) { imu->acct_pages++; @@ -9039,11 +9040,16 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx) struct io_tctx_node *node; unsigned long index; - tctx->io_wq = NULL; xa_for_each(&tctx->xa, index, node) io_uring_del_task_file(index); - if (wq) + if (wq) { + /* + * Must be after io_uring_del_task_file() (removes nodes under + * uring_lock) to avoid race with io_uring_try_cancel_iowq(). + */ + tctx->io_wq = NULL; io_wq_put_and_exit(wq); + } } static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked) @@ -9078,6 +9084,9 @@ static void io_uring_cancel_sqpoll(struct io_sq_data *sqd) if (!current->io_uring) return; + if (tctx->io_wq) + io_wq_exit_start(tctx->io_wq); + WARN_ON_ONCE(!sqd || sqd->thread != current); atomic_inc(&tctx->in_idle); @@ -9112,6 +9121,9 @@ void __io_uring_cancel(struct files_struct *files) DEFINE_WAIT(wait); s64 inflight; + if (tctx->io_wq) + io_wq_exit_start(tctx->io_wq); + /* make sure overflow events are dropped */ atomic_inc(&tctx->in_idle); do { diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index d158a500c25c..d2103852475f 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -718,7 +718,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo, if (unlikely(!p)) goto out_err; fl->fh_array[i]->size = be32_to_cpup(p++); - if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) { + if (fl->fh_array[i]->size > NFS_MAXFHSIZE) { printk(KERN_ERR "NFS: Too big fh %d received %d\n", i, fl->fh_array[i]->size); goto out_err; diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 93e60e921f92..bc0c698f3350 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -362,7 +362,7 @@ static const struct kernel_param_ops param_ops_nfs_timeout = { .set = param_set_nfs_timeout, .get = param_get_nfs_timeout, }; -#define param_check_nfs_timeout(name, p) __param_check(name, p, int); +#define param_check_nfs_timeout(name, p) __param_check(name, p, int) module_param(nfs_mountpoint_expiry_timeout, nfs_timeout, 0644); MODULE_PARM_DESC(nfs_mountpoint_expiry_timeout, diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 57b3821d975a..a1e5c6b85ded 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -211,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) case SEEK_HOLE: case SEEK_DATA: ret = nfs42_proc_llseek(filep, offset, whence); - if (ret != -ENOTSUPP) + if (ret != -EOPNOTSUPP) return ret; fallthrough; default: diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 87d04f2c9385..0cd965882232 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1706,7 +1706,7 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, rcu_read_unlock(); trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); - if (!signal_pending(current)) { + if (!fatal_signal_pending(current)) { if (schedule_timeout(5*HZ) == 0) status = -EAGAIN; else @@ -3487,7 +3487,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, write_sequnlock(&state->seqlock); trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); - if (signal_pending(current)) + if (fatal_signal_pending(current)) status = -EINTR; else if (schedule_timeout(5*HZ) != 0) diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 6c20b28d9d7c..cf9cc62ec48e 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -1094,15 +1094,16 @@ nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *prev = NULL; unsigned int size; - if (mirror->pg_count != 0) { - prev = nfs_list_entry(mirror->pg_list.prev); - } else { + if (list_empty(&mirror->pg_list)) { if (desc->pg_ops->pg_init) desc->pg_ops->pg_init(desc, req); if (desc->pg_error < 0) return 0; mirror->pg_base = req->wb_pgbase; - } + mirror->pg_count = 0; + mirror->pg_recoalesce = 0; + } else + prev = nfs_list_entry(mirror->pg_list.prev); if (desc->pg_maxretrans && req->wb_nio > desc->pg_maxretrans) { if (NFS_SERVER(desc->pg_inode)->flags & NFS_MOUNT_SOFTERR) @@ -1127,18 +1128,13 @@ static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) { struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); - if (!list_empty(&mirror->pg_list)) { int error = desc->pg_ops->pg_doio(desc); if (error < 0) desc->pg_error = error; - else + if (list_empty(&mirror->pg_list)) mirror->pg_bytes_written += mirror->pg_count; } - if (list_empty(&mirror->pg_list)) { - mirror->pg_count = 0; - mirror->pg_base = 0; - } } static void @@ -1227,10 +1223,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) do { list_splice_init(&mirror->pg_list, &head); - mirror->pg_bytes_written -= mirror->pg_count; - mirror->pg_count = 0; - mirror->pg_base = 0; - mirror->pg_recoalesce = 0; while (!list_empty(&head)) { struct nfs_page *req; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 03e0b34c4a64..2c01ee805306 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1317,6 +1317,11 @@ _pnfs_return_layout(struct inode *ino) { struct pnfs_layout_hdr *lo = NULL; struct nfs_inode *nfsi = NFS_I(ino); + struct pnfs_layout_range range = { + .iomode = IOMODE_ANY, + .offset = 0, + .length = NFS4_MAX_UINT64, + }; LIST_HEAD(tmp_list); const struct cred *cred; nfs4_stateid stateid; @@ -1344,16 +1349,10 @@ _pnfs_return_layout(struct inode *ino) } valid_layout = pnfs_layout_is_valid(lo); pnfs_clear_layoutcommit(ino, &tmp_list); - pnfs_mark_matching_lsegs_return(lo, &tmp_list, NULL, 0); + pnfs_mark_matching_lsegs_return(lo, &tmp_list, &range, 0); - if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) { - struct pnfs_layout_range range = { - .iomode = IOMODE_ANY, - .offset = 0, - .length = NFS4_MAX_UINT64, - }; + if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range); - } /* Don't send a LAYOUTRETURN if list was initially empty */ if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) || @@ -2678,7 +2677,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range); void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) { - u64 rd_size = req->wb_bytes; + u64 rd_size; pnfs_generic_pg_check_layout(pgio); pnfs_generic_pg_check_range(pgio, req); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 19a212f9725d..fe58525cfed4 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1379,7 +1379,7 @@ static const struct kernel_param_ops param_ops_portnr = { .set = param_set_portnr, .get = param_get_uint, }; -#define param_check_portnr(name, p) __param_check(name, p, unsigned int); +#define param_check_portnr(name, p) __param_check(name, p, unsigned int) module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644); module_param_named(callback_nr_threads, nfs_callback_nr_threads, ushort, 0644); diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 71fefb30e015..be5b6d2c01e7 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -424,11 +424,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, * events generated by the listener process itself, without disclosing * the pids of other processes. */ - if (!capable(CAP_SYS_ADMIN) && + if (FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && task_tgid(current) != event->pid) metadata.pid = 0; - if (path && path->mnt && path->dentry) { + /* + * For now, fid mode is required for an unprivileged listener and + * fid mode does not report fd in events. Keep this check anyway + * for safety in case fid mode requirement is relaxed in the future + * to allow unprivileged listener to get events with no fd and no fid. + */ + if (!FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV) && + path && path->mnt && path->dentry) { fd = create_fd(group, path, &f); if (fd < 0) return fd; @@ -1040,6 +1047,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) int f_flags, fd; unsigned int fid_mode = flags & FANOTIFY_FID_BITS; unsigned int class = flags & FANOTIFY_CLASS_BITS; + unsigned int internal_flags = 0; pr_debug("%s: flags=%x event_f_flags=%x\n", __func__, flags, event_f_flags); @@ -1053,6 +1061,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) */ if ((flags & FANOTIFY_ADMIN_INIT_FLAGS) || !fid_mode) return -EPERM; + + /* + * Setting the internal flag FANOTIFY_UNPRIV on the group + * prevents setting mount/filesystem marks on this group and + * prevents reporting pid and open fd in events. + */ + internal_flags |= FANOTIFY_UNPRIV; } #ifdef CONFIG_AUDITSYSCALL @@ -1105,7 +1120,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) goto out_destroy_group; } - group->fanotify_data.flags = flags; + group->fanotify_data.flags = flags | internal_flags; group->memcg = get_mem_cgroup_from_mm(current->mm); group->fanotify_data.merge_hash = fanotify_alloc_merge_hash(); @@ -1305,11 +1320,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, group = f.file->private_data; /* - * An unprivileged user is not allowed to watch a mount point nor - * a filesystem. + * An unprivileged user is not allowed to setup mount nor filesystem + * marks. This also includes setting up such marks by a group that + * was initialized by an unprivileged user. */ ret = -EPERM; - if (!capable(CAP_SYS_ADMIN) && + if ((!capable(CAP_SYS_ADMIN) || + FAN_GROUP_FLAG(group, FANOTIFY_UNPRIV)) && mark_type != FAN_MARK_INODE) goto fput_and_out; @@ -1460,6 +1477,7 @@ static int __init fanotify_user_setup(void) max_marks = clamp(max_marks, FANOTIFY_OLD_DEFAULT_MAX_MARKS, FANOTIFY_DEFAULT_MAX_USER_MARKS); + BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9); diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c index a712b2aaa9ac..57f0d5d9f934 100644 --- a/fs/notify/fdinfo.c +++ b/fs/notify/fdinfo.c @@ -144,7 +144,7 @@ void fanotify_show_fdinfo(struct seq_file *m, struct file *f) struct fsnotify_group *group = f->private_data; seq_printf(m, "fanotify flags:%x event-flags:%x\n", - group->fanotify_data.flags, + group->fanotify_data.flags & FANOTIFY_INIT_FLAGS, group->fanotify_data.f_flags); show_fdinfo(m, f, fanotify_fdinfo); diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c index e32a1833d523..bbfea8022a3b 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.c +++ b/fs/xfs/libxfs/xfs_ag_resv.c @@ -325,10 +325,22 @@ out: error2 = xfs_alloc_pagf_init(mp, tp, pag->pag_agno, 0); if (error2) return error2; - ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved + - xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved <= - pag->pagf_freeblks + pag->pagf_flcount); + + /* + * If there isn't enough space in the AG to satisfy the + * reservation, let the caller know that there wasn't enough + * space. Callers are responsible for deciding what to do + * next, since (in theory) we can stumble along with + * insufficient reservation if data blocks are being freed to + * replenish the AG's free space. + */ + if (!error && + xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved + + xfs_perag_resv(pag, XFS_AG_RESV_RMAPBT)->ar_reserved > + pag->pagf_freeblks + pag->pagf_flcount) + error = -ENOSPC; } + return error; } diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 7e3b9b01431e..a3e0e6f672d6 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -605,7 +605,6 @@ xfs_bmap_btree_to_extents( ASSERT(cur); ASSERT(whichfork != XFS_COW_FORK); - ASSERT(!xfs_need_iread_extents(ifp)); ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE); ASSERT(be16_to_cpu(rblock->bb_level) == 1); ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1); @@ -5350,7 +5349,6 @@ __xfs_bunmapi( xfs_fsblock_t sum; xfs_filblks_t len = *rlen; /* length to unmap in file */ xfs_fileoff_t max_len; - xfs_agnumber_t prev_agno = NULLAGNUMBER, agno; xfs_fileoff_t end; struct xfs_iext_cursor icur; bool done = false; @@ -5442,16 +5440,6 @@ __xfs_bunmapi( del = got; wasdel = isnullstartblock(del.br_startblock); - /* - * Make sure we don't touch multiple AGF headers out of order - * in a single transaction, as that could cause AB-BA deadlocks. - */ - if (!wasdel && !isrt) { - agno = XFS_FSB_TO_AGNO(mp, del.br_startblock); - if (prev_agno != NULLAGNUMBER && prev_agno > agno) - break; - prev_agno = agno; - } if (got.br_startoff < start) { del.br_startoff = start; del.br_blockcount -= start - got.br_startoff; diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 5c9a7440d9e4..f3254a4f4cb4 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -559,8 +559,17 @@ xfs_dinode_calc_crc( /* * Validate di_extsize hint. * - * The rules are documented at xfs_ioctl_setattr_check_extsize(). - * These functions must be kept in sync with each other. + * 1. Extent size hint is only valid for directories and regular files. + * 2. FS_XFLAG_EXTSIZE is only valid for regular files. + * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories. + * 4. Hint cannot be larger than MAXTEXTLEN. + * 5. Can be changed on directories at any time. + * 6. Hint value of 0 turns off hints, clears inode flags. + * 7. Extent size must be a multiple of the appropriate block size. + * For realtime files, this is the rt extent size. + * 8. For non-realtime files, the extent size hint must be limited + * to half the AG size to avoid alignment extending the extent beyond the + * limits of the AG. */ xfs_failaddr_t xfs_inode_validate_extsize( @@ -580,6 +589,28 @@ xfs_inode_validate_extsize( inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT); extsize_bytes = XFS_FSB_TO_B(mp, extsize); + /* + * This comment describes a historic gap in this verifier function. + * + * On older kernels, the extent size hint verifier doesn't check that + * the extent size hint is an integer multiple of the realtime extent + * size on a directory with both RTINHERIT and EXTSZINHERIT flags set. + * The verifier has always enforced the alignment rule for regular + * files with the REALTIME flag set. + * + * If a directory with a misaligned extent size hint is allowed to + * propagate that hint into a new regular realtime file, the result + * is that the inode cluster buffer verifier will trigger a corruption + * shutdown the next time it is run. + * + * Unfortunately, there could be filesystems with these misconfigured + * directories in the wild, so we cannot add a check to this verifier + * at this time because that will result a new source of directory + * corruption errors when reading an existing filesystem. Instead, we + * permit the misconfiguration to pass through the verifiers so that + * callers of this function can correct and mitigate externally. + */ + if (rt_flag) blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog; else @@ -616,8 +647,15 @@ xfs_inode_validate_extsize( /* * Validate di_cowextsize hint. * - * The rules are documented at xfs_ioctl_setattr_check_cowextsize(). - * These functions must be kept in sync with each other. + * 1. CoW extent size hint can only be set if reflink is enabled on the fs. + * The inode does not have to have any shared blocks, but it must be a v3. + * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files; + * for a directory, the hint is propagated to new files. + * 3. Can be changed on files & directories at any time. + * 4. Hint value of 0 turns off hints, clears inode flags. + * 5. Extent size must be a multiple of the appropriate block size. + * 6. The extent size hint must be limited to half the AG size to avoid + * alignment extending the extent beyond the limits of the AG. */ xfs_failaddr_t xfs_inode_validate_cowextsize( diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index 78324e043e25..8d595a5c4abd 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -143,6 +143,23 @@ xfs_trans_log_inode( } /* + * Inode verifiers on older kernels don't check that the extent size + * hint is an integer multiple of the rt extent size on a directory + * with both rtinherit and extszinherit flags set. If we're logging a + * directory that is misconfigured in this way, clear the hint. + */ + if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) && + (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) && + (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) { + xfs_info_once(ip->i_mount, + "Correcting misaligned extent size hint in inode 0x%llx.", ip->i_ino); + ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | + XFS_DIFLAG_EXTSZINHERIT); + ip->i_extsize = 0; + flags |= XFS_ILOG_CORE; + } + + /* * Record the specific change for fdatasync optimisation. This allows * fdatasync to skip log forces for inodes that are only timestamp * dirty. diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 0369eb22c1bb..e4c2da4566f1 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -690,6 +690,7 @@ xfs_inode_inherit_flags( const struct xfs_inode *pip) { unsigned int di_flags = 0; + xfs_failaddr_t failaddr; umode_t mode = VFS_I(ip)->i_mode; if (S_ISDIR(mode)) { @@ -729,6 +730,24 @@ xfs_inode_inherit_flags( di_flags |= XFS_DIFLAG_FILESTREAM; ip->i_diflags |= di_flags; + + /* + * Inode verifiers on older kernels only check that the extent size + * hint is an integer multiple of the rt extent size on realtime files. + * They did not check the hint alignment on a directory with both + * rtinherit and extszinherit flags set. If the misaligned hint is + * propagated from a directory into a new realtime file, new file + * allocations will fail due to math errors in the rt allocator and/or + * trip the verifiers. Validate the hint settings in the new file so + * that we don't let broken hints propagate. + */ + failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize, + VFS_I(ip)->i_mode, ip->i_diflags); + if (failaddr) { + ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | + XFS_DIFLAG_EXTSZINHERIT); + ip->i_extsize = 0; + } } /* Propagate di_flags2 from a parent inode to a child inode. */ @@ -737,12 +756,22 @@ xfs_inode_inherit_flags2( struct xfs_inode *ip, const struct xfs_inode *pip) { + xfs_failaddr_t failaddr; + if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) { ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE; ip->i_cowextsize = pip->i_cowextsize; } if (pip->i_diflags2 & XFS_DIFLAG2_DAX) ip->i_diflags2 |= XFS_DIFLAG2_DAX; + + /* Don't let invalid cowextsize hints propagate. */ + failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize, + VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2); + if (failaddr) { + ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE; + ip->i_cowextsize = 0; + } } /* diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 3925bfcb2365..1fe4c1fc0aea 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1267,20 +1267,8 @@ out_error: } /* - * extent size hint validation is somewhat cumbersome. Rules are: - * - * 1. extent size hint is only valid for directories and regular files - * 2. FS_XFLAG_EXTSIZE is only valid for regular files - * 3. FS_XFLAG_EXTSZINHERIT is only valid for directories. - * 4. can only be changed on regular files if no extents are allocated - * 5. can be changed on directories at any time - * 6. extsize hint of 0 turns off hints, clears inode flags. - * 7. Extent size must be a multiple of the appropriate block size. - * 8. for non-realtime files, the extent size hint must be limited - * to half the AG size to avoid alignment extending the extent beyond the - * limits of the AG. - * - * Please keep this function in sync with xfs_scrub_inode_extsize. + * Validate a proposed extent size hint. For regular files, the hint can only + * be changed if no extents are allocated. */ static int xfs_ioctl_setattr_check_extsize( @@ -1288,86 +1276,65 @@ xfs_ioctl_setattr_check_extsize( struct fileattr *fa) { struct xfs_mount *mp = ip->i_mount; - xfs_extlen_t size; - xfs_fsblock_t extsize_fsb; + xfs_failaddr_t failaddr; + uint16_t new_diflags; if (!fa->fsx_valid) return 0; if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents && - ((ip->i_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize)) + XFS_FSB_TO_B(mp, ip->i_extsize) != fa->fsx_extsize) return -EINVAL; - if (fa->fsx_extsize == 0) - return 0; - - extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); - if (extsize_fsb > MAXEXTLEN) + if (fa->fsx_extsize & mp->m_blockmask) return -EINVAL; - if (XFS_IS_REALTIME_INODE(ip) || - (fa->fsx_xflags & FS_XFLAG_REALTIME)) { - size = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog; - } else { - size = mp->m_sb.sb_blocksize; - if (extsize_fsb > mp->m_sb.sb_agblocks / 2) + new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); + + /* + * Inode verifiers on older kernels don't check that the extent size + * hint is an integer multiple of the rt extent size on a directory + * with both rtinherit and extszinherit flags set. Don't let sysadmins + * misconfigure directories. + */ + if ((new_diflags & XFS_DIFLAG_RTINHERIT) && + (new_diflags & XFS_DIFLAG_EXTSZINHERIT)) { + unsigned int rtextsize_bytes; + + rtextsize_bytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize); + if (fa->fsx_extsize % rtextsize_bytes) return -EINVAL; } - if (fa->fsx_extsize % size) - return -EINVAL; - - return 0; + failaddr = xfs_inode_validate_extsize(ip->i_mount, + XFS_B_TO_FSB(mp, fa->fsx_extsize), + VFS_I(ip)->i_mode, new_diflags); + return failaddr != NULL ? -EINVAL : 0; } -/* - * CoW extent size hint validation rules are: - * - * 1. CoW extent size hint can only be set if reflink is enabled on the fs. - * The inode does not have to have any shared blocks, but it must be a v3. - * 2. FS_XFLAG_COWEXTSIZE is only valid for directories and regular files; - * for a directory, the hint is propagated to new files. - * 3. Can be changed on files & directories at any time. - * 4. CoW extsize hint of 0 turns off hints, clears inode flags. - * 5. Extent size must be a multiple of the appropriate block size. - * 6. The extent size hint must be limited to half the AG size to avoid - * alignment extending the extent beyond the limits of the AG. - * - * Please keep this function in sync with xfs_scrub_inode_cowextsize. - */ static int xfs_ioctl_setattr_check_cowextsize( struct xfs_inode *ip, struct fileattr *fa) { struct xfs_mount *mp = ip->i_mount; - xfs_extlen_t size; - xfs_fsblock_t cowextsize_fsb; + xfs_failaddr_t failaddr; + uint64_t new_diflags2; + uint16_t new_diflags; if (!fa->fsx_valid) return 0; - if (!(fa->fsx_xflags & FS_XFLAG_COWEXTSIZE)) - return 0; - - if (!xfs_sb_version_hasreflink(&ip->i_mount->m_sb)) + if (fa->fsx_cowextsize & mp->m_blockmask) return -EINVAL; - if (fa->fsx_cowextsize == 0) - return 0; + new_diflags = xfs_flags2diflags(ip, fa->fsx_xflags); + new_diflags2 = xfs_flags2diflags2(ip, fa->fsx_xflags); - cowextsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_cowextsize); - if (cowextsize_fsb > MAXEXTLEN) - return -EINVAL; - - size = mp->m_sb.sb_blocksize; - if (cowextsize_fsb > mp->m_sb.sb_agblocks / 2) - return -EINVAL; - - if (fa->fsx_cowextsize % size) - return -EINVAL; - - return 0; + failaddr = xfs_inode_validate_cowextsize(ip->i_mount, + XFS_B_TO_FSB(mp, fa->fsx_cowextsize), + VFS_I(ip)->i_mode, new_diflags, new_diflags2); + return failaddr != NULL ? -EINVAL : 0; } static int diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h index 3c392b1512ac..7ec1a9207517 100644 --- a/fs/xfs/xfs_message.h +++ b/fs/xfs/xfs_message.h @@ -73,6 +73,8 @@ do { \ xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__) #define xfs_notice_once(dev, fmt, ...) \ xfs_printk_once(xfs_notice, dev, fmt, ##__VA_ARGS__) +#define xfs_info_once(dev, fmt, ...) \ + xfs_printk_once(xfs_info, dev, fmt, ##__VA_ARGS__) void assfail(struct xfs_mount *mp, char *expr, char *f, int l); void asswarn(struct xfs_mount *mp, char *expr, char *f, int l); diff --git a/include/linux/device.h b/include/linux/device.h index 38a2071cf776..f1a00040fa53 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -570,7 +570,7 @@ struct device { * @flags: Link flags. * @rpm_active: Whether or not the consumer device is runtime-PM-active. * @kref: Count repeated addition of the same link. - * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. + * @rm_work: Work structure used for removing the link. * @supplier_preactivated: Supplier has been made active before consumer probe. */ struct device_link { @@ -583,9 +583,7 @@ struct device_link { u32 flags; refcount_t rpm_active; struct kref kref; -#ifdef CONFIG_SRCU - struct rcu_head rcu_head; -#endif + struct work_struct rm_work; bool supplier_preactivated; /* Owned by consumer probe. */ }; diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index bad41bcb25df..a16dbeced152 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -51,6 +51,10 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ #define FANOTIFY_INIT_FLAGS (FANOTIFY_ADMIN_INIT_FLAGS | \ FANOTIFY_USER_INIT_FLAGS) +/* Internal group flags */ +#define FANOTIFY_UNPRIV 0x80000000 +#define FANOTIFY_INTERNAL_GROUP_FLAGS (FANOTIFY_UNPRIV) + #define FANOTIFY_MARK_TYPE_BITS (FAN_MARK_INODE | FAN_MARK_MOUNT | \ FAN_MARK_FILESYSTEM) diff --git a/include/linux/fb.h b/include/linux/fb.h index a8dccd23c249..ecfbcc0553a5 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, /* drivers/video/fb_defio.c */ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); extern void fb_deferred_io_init(struct fb_info *info); +extern void fb_deferred_io_open(struct fb_info *info, + struct inode *inode, + struct file *file); extern void fb_deferred_io_cleanup(struct fb_info *info); extern int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync); diff --git a/include/linux/hid.h b/include/linux/hid.h index 271021e20a3f..10e922cee4eb 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -1167,8 +1167,7 @@ static inline void hid_hw_wait(struct hid_device *hdev) */ static inline u32 hid_report_len(struct hid_report *report) { - /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ - return ((report->size - 1) >> 3) + 1 + (report->id > 0); + return DIV_ROUND_UP(report->size, 8) + (report->id > 0); } int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size, diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 232e1bd507a7..9b0487c88571 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev) int host1x_device_init(struct host1x_device *device); int host1x_device_exit(struct host1x_device *device); -int __host1x_client_register(struct host1x_client *client, - struct lock_class_key *key); -#define host1x_client_register(class) \ - ({ \ - static struct lock_class_key __key; \ - __host1x_client_register(class, &__key); \ +void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key); +void host1x_client_exit(struct host1x_client *client); + +#define host1x_client_init(client) \ + ({ \ + static struct lock_class_key __key; \ + __host1x_client_init(client, &__key); \ + }) + +int __host1x_client_register(struct host1x_client *client); + +/* + * Note that this wrapper calls __host1x_client_init() for compatibility + * with existing callers. Callers that want to separately initialize and + * register a host1x client must first initialize using either of the + * __host1x_client_init() or host1x_client_init() functions and then use + * the low-level __host1x_client_register() function to avoid the client + * getting reinitialized. + */ +#define host1x_client_register(client) \ + ({ \ + static struct lock_class_key __key; \ + __host1x_client_init(client, &__key); \ + __host1x_client_register(client); \ }) int host1x_client_unregister(struct host1x_client *client); diff --git a/include/linux/init.h b/include/linux/init.h index 045ad1650ed1..d82b4b2e1d25 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -242,7 +242,8 @@ extern bool initcall_debug; asm(".section \"" __sec "\", \"a\" \n" \ __stringify(__name) ": \n" \ ".long " __stringify(__stub) " - . \n" \ - ".previous \n"); + ".previous \n"); \ + static_assert(__same_type(initcall_t, &fn)); #else #define ____define_initcall(fn, __unused, __name, __sec) \ static initcall_t __name __used \ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2f34487e21f2..76102efbf079 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -10,6 +10,7 @@ #include <linux/spinlock.h> #include <linux/signal.h> #include <linux/sched.h> +#include <linux/sched/stat.h> #include <linux/bug.h> #include <linux/minmax.h> #include <linux/mm.h> @@ -146,7 +147,7 @@ static inline bool is_error_page(struct page *page) */ #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) -#define KVM_REQ_PENDING_TIMER 2 +#define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 #define KVM_REQUEST_ARCH_BASE 8 @@ -265,6 +266,11 @@ static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) return !!map->hva; } +static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) +{ + return single_task_running() && !need_resched() && ktime_before(cur, stop); +} + /* * Sometimes a large or cross-page mmio needs to be broken up into separate * exits for userspace servicing. diff --git a/include/linux/pci.h b/include/linux/pci.h index c20211e59a57..24306504226a 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -2344,6 +2344,7 @@ int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, struct device_node; struct irq_domain; struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); +bool pci_host_of_has_msi_map(struct device *dev); /* Arch may override this (weak) */ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); @@ -2351,6 +2352,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); #else /* CONFIG_OF */ static inline struct irq_domain * pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } +static inline bool pci_host_of_has_msi_map(struct device *dev) { return false; } #endif /* CONFIG_OF */ static inline struct device_node * diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index d81fe8b364d0..61b622e334ee 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -368,6 +368,8 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size, unsigned int num_prealloc, unsigned int max_req); void xprt_free(struct rpc_xprt *); +void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task); +bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req); static inline int xprt_enable_swap(struct rpc_xprt *xprt) diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 1358a0ceb4d0..0bc29c4516e7 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -81,7 +81,7 @@ struct snd_compr_stream; #define SND_SOC_DAIFMT_CBP_CFP (1 << 12) /* codec clk provider & frame provider */ #define SND_SOC_DAIFMT_CBC_CFP (2 << 12) /* codec clk consumer & frame provider */ #define SND_SOC_DAIFMT_CBP_CFC (3 << 12) /* codec clk provider & frame consumer */ -#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame follower */ +#define SND_SOC_DAIFMT_CBC_CFC (4 << 12) /* codec clk consumer & frame consumer */ /* previous definitions kept for backwards-compatibility, do not use in new contributions */ #define SND_SOC_DAIFMT_CBM_CFM SND_SOC_DAIFMT_CBP_CFP diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index ee93428ced9a..225ec87d4f22 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -611,6 +611,7 @@ #define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */ #define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */ #define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */ +#define KEY_EMOJI_PICKER 0x249 /* Show/hide emoji picker (HUTRR101) */ #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 3fd9a7e9d90c..79d9c44d1ad7 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -8,6 +8,7 @@ * Note: you must update KVM_API_VERSION if you change this interface. */ +#include <linux/const.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/ioctl.h> @@ -1879,8 +1880,8 @@ struct kvm_hyperv_eventfd { * conversion after harvesting an entry. Also, it must not skip any * dirty bits, so that dirty bits are always harvested in sequence. */ -#define KVM_DIRTY_GFN_F_DIRTY BIT(0) -#define KVM_DIRTY_GFN_F_RESET BIT(1) +#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0) +#define KVM_DIRTY_GFN_F_RESET _BITUL(1) #define KVM_DIRTY_GFN_F_MASK 0x3 /* diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 6ecd3f3a52b5..9f58049ac16d 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -1105,28 +1105,30 @@ static int seccomp_do_user_notification(int this_syscall, up(&match->notif->request); wake_up_poll(&match->wqh, EPOLLIN | EPOLLRDNORM); - mutex_unlock(&match->notify_lock); /* * This is where we wait for a reply from userspace. */ -wait: - err = wait_for_completion_interruptible(&n.ready); - mutex_lock(&match->notify_lock); - if (err == 0) { - /* Check if we were woken up by a addfd message */ + do { + mutex_unlock(&match->notify_lock); + err = wait_for_completion_interruptible(&n.ready); + mutex_lock(&match->notify_lock); + if (err != 0) + goto interrupted; + addfd = list_first_entry_or_null(&n.addfd, struct seccomp_kaddfd, list); - if (addfd && n.state != SECCOMP_NOTIFY_REPLIED) { + /* Check if we were woken up by a addfd message */ + if (addfd) seccomp_handle_addfd(addfd); - mutex_unlock(&match->notify_lock); - goto wait; - } - ret = n.val; - err = n.error; - flags = n.flags; - } + } while (n.state != SECCOMP_NOTIFY_REPLIED); + + ret = n.val; + err = n.error; + flags = n.flags; + +interrupted: /* If there were any pending addfd calls, clear them out */ list_for_each_entry_safe(addfd, tmp, &n.addfd, list) { /* The process went away before we got a chance to handle it */ diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index a1071cdefb5a..af9302141bcf 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -275,7 +275,7 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref, wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch, percpu_ref_switch_lock); - if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) + if (data->force_atomic || percpu_ref_is_dying(ref)) __percpu_ref_switch_to_atomic(ref, confirm_switch); else __percpu_ref_switch_to_percpu(ref); @@ -385,7 +385,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, spin_lock_irqsave(&percpu_ref_switch_lock, flags); - WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, + WARN_ONCE(percpu_ref_is_dying(ref), "%s called more than once on %ps!", __func__, ref->data->release); @@ -465,7 +465,7 @@ void percpu_ref_resurrect(struct percpu_ref *ref) spin_lock_irqsave(&percpu_ref_switch_lock, flags); - WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)); + WARN_ON_ONCE(!percpu_ref_is_dying(ref)); WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count)); ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index f555d335e910..42623d6b8f0e 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -1677,13 +1677,6 @@ call_reserveresult(struct rpc_task *task) return; } - /* - * Even though there was an error, we may have acquired - * a request slot somehow. Make sure not to leak it. - */ - if (task->tk_rqstp) - xprt_release(task); - switch (status) { case -ENOMEM: rpc_delay(task, HZ >> 2); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e5b5a960a69b..3509a7f139b9 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -70,6 +70,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net); static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); static void xprt_destroy(struct rpc_xprt *xprt); +static void xprt_request_init(struct rpc_task *task); static DEFINE_SPINLOCK(xprt_list_lock); static LIST_HEAD(xprt_list); @@ -1606,17 +1607,40 @@ xprt_transmit(struct rpc_task *task) spin_unlock(&xprt->queue_lock); } -static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) +static void xprt_complete_request_init(struct rpc_task *task) +{ + if (task->tk_rqstp) + xprt_request_init(task); +} + +void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) { set_bit(XPRT_CONGESTED, &xprt->state); - rpc_sleep_on(&xprt->backlog, task, NULL); + rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init); +} +EXPORT_SYMBOL_GPL(xprt_add_backlog); + +static bool __xprt_set_rq(struct rpc_task *task, void *data) +{ + struct rpc_rqst *req = data; + + if (task->tk_rqstp == NULL) { + memset(req, 0, sizeof(*req)); /* mark unused */ + task->tk_rqstp = req; + return true; + } + return false; } -static void xprt_wake_up_backlog(struct rpc_xprt *xprt) +bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req) { - if (rpc_wake_up_next(&xprt->backlog) == NULL) + if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) { clear_bit(XPRT_CONGESTED, &xprt->state); + return false; + } + return true; } +EXPORT_SYMBOL_GPL(xprt_wake_up_backlog); static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task) { @@ -1626,7 +1650,7 @@ static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task goto out; spin_lock(&xprt->reserve_lock); if (test_bit(XPRT_CONGESTED, &xprt->state)) { - rpc_sleep_on(&xprt->backlog, task, NULL); + xprt_add_backlog(xprt, task); ret = true; } spin_unlock(&xprt->reserve_lock); @@ -1703,11 +1727,11 @@ EXPORT_SYMBOL_GPL(xprt_alloc_slot); void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) { spin_lock(&xprt->reserve_lock); - if (!xprt_dynamic_free_slot(xprt, req)) { + if (!xprt_wake_up_backlog(xprt, req) && + !xprt_dynamic_free_slot(xprt, req)) { memset(req, 0, sizeof(*req)); /* mark unused */ list_add(&req->rq_list, &xprt->free); } - xprt_wake_up_backlog(xprt); spin_unlock(&xprt->reserve_lock); } EXPORT_SYMBOL_GPL(xprt_free_slot); @@ -1894,10 +1918,10 @@ void xprt_release(struct rpc_task *task) xdr_free_bvec(&req->rq_snd_buf); if (req->rq_cred != NULL) put_rpccred(req->rq_cred); - task->tk_rqstp = NULL; if (req->rq_release_snd_buf) req->rq_release_snd_buf(req); + task->tk_rqstp = NULL; if (likely(!bc_prealloc(req))) xprt->ops->free_slot(xprt, req); else diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 649f7d8b9733..c335c1361564 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -628,8 +628,9 @@ out_mapping_err: return false; } -/* The tail iovec might not reside in the same page as the - * head iovec. +/* The tail iovec may include an XDR pad for the page list, + * as well as additional content, and may not reside in the + * same page as the head iovec. */ static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req, struct xdr_buf *xdr, @@ -747,19 +748,27 @@ static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, struct xdr_buf *xdr) { - struct kvec *tail = &xdr->tail[0]; - if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len)) return false; - /* If there is a Read chunk, the page list is handled + /* If there is a Read chunk, the page list is being handled * via explicit RDMA, and thus is skipped here. */ - if (tail->iov_len) { - if (!rpcrdma_prepare_tail_iov(req, xdr, - offset_in_page(tail->iov_base), - tail->iov_len)) + /* Do not include the tail if it is only an XDR pad */ + if (xdr->tail[0].iov_len > 3) { + unsigned int page_base, len; + + /* If the content in the page list is an odd length, + * xdr_write_pages() adds a pad at the beginning of + * the tail iovec. Force the tail's non-pad content to + * land at the next XDR position in the Send message. + */ + page_base = offset_in_page(xdr->tail[0].iov_base); + len = xdr->tail[0].iov_len; + page_base += len & 3; + len -= len & 3; + if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len)) return false; kref_get(&req->rl_kref); } diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 09953597d055..19a49d26b1e4 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -520,9 +520,8 @@ xprt_rdma_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) return; out_sleep: - set_bit(XPRT_CONGESTED, &xprt->state); - rpc_sleep_on(&xprt->backlog, task, NULL); task->tk_status = -EAGAIN; + xprt_add_backlog(xprt, task); } /** @@ -537,10 +536,11 @@ xprt_rdma_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *rqst) struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt, rx_xprt); - memset(rqst, 0, sizeof(*rqst)); - rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); - if (unlikely(!rpc_wake_up_next(&xprt->backlog))) - clear_bit(XPRT_CONGESTED, &xprt->state); + rpcrdma_reply_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); + if (!xprt_wake_up_backlog(xprt, rqst)) { + memset(rqst, 0, sizeof(*rqst)); + rpcrdma_buffer_put(&r_xprt->rx_buf, rpcr_to_rdmar(rqst)); + } } static bool rpcrdma_check_regbuf(struct rpcrdma_xprt *r_xprt, diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 1e965a380896..649c23518ec0 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1201,6 +1201,20 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) } /** + * rpcrdma_reply_put - Put reply buffers back into pool + * @buffers: buffer pool + * @req: object to return + * + */ +void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) +{ + if (req->rl_reply) { + rpcrdma_rep_put(buffers, req->rl_reply); + req->rl_reply = NULL; + } +} + +/** * rpcrdma_buffer_get - Get a request buffer * @buffers: Buffer pool from which to obtain a buffer * @@ -1228,9 +1242,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) */ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req) { - if (req->rl_reply) - rpcrdma_rep_put(buffers, req->rl_reply); - req->rl_reply = NULL; + rpcrdma_reply_put(buffers, req); spin_lock(&buffers->rb_lock); list_add(&req->rl_list, &buffers->rb_send_bufs); diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 436ad7312614..5d231d94e944 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -479,6 +479,7 @@ struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req); void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep); +void rpcrdma_reply_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req); bool rpcrdma_regbuf_realloc(struct rpcrdma_regbuf *rb, size_t size, gfp_t flags); diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 47aa47a2b07c..316d04945587 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1010,6 +1010,8 @@ static int xs_tcp_send_request(struct rpc_rqst *req) kernel_sock_shutdown(transport->sock, SHUT_RDWR); return -ENOTCONN; } + if (!transport->inet) + return -ENOTCONN; xs_pktdump("packet data:", req->rq_svec->iov_base, diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c index 21dbf63d6e41..9ec93d90e8a5 100644 --- a/samples/vfio-mdev/mdpy-fb.c +++ b/samples/vfio-mdev/mdpy-fb.c @@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev, if (format != DRM_FORMAT_XRGB8888) { pci_err(pdev, "format mismatch (0x%x != 0x%x)\n", format, DRM_FORMAT_XRGB8888); - return -EINVAL; + ret = -EINVAL; + goto err_release_regions; } if (width < 100 || width > 10000) { pci_err(pdev, "width (%d) out of range\n", width); - return -EINVAL; + ret = -EINVAL; + goto err_release_regions; } if (height < 100 || height > 10000) { pci_err(pdev, "height (%d) out of range\n", height); - return -EINVAL; + ret = -EINVAL; + goto err_release_regions; } pci_info(pdev, "mdpy found: %dx%d framebuffer\n", width, height); info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev); - if (!info) + if (!info) { + ret = -ENOMEM; goto err_release_regions; + } pci_set_drvdata(pdev, info); par = info->par; diff --git a/sound/core/control_led.c b/sound/core/control_led.c index 25f57c14f294..a90e31dbde61 100644 --- a/sound/core/control_led.c +++ b/sound/core/control_led.c @@ -17,6 +17,9 @@ MODULE_LICENSE("GPL"); #define MAX_LED (((SNDRV_CTL_ELEM_ACCESS_MIC_LED - SNDRV_CTL_ELEM_ACCESS_SPK_LED) \ >> SNDRV_CTL_ELEM_ACCESS_LED_SHIFT) + 1) +#define to_led_card_dev(_dev) \ + container_of(_dev, struct snd_ctl_led_card, dev) + enum snd_ctl_led_mode { MODE_FOLLOW_MUTE = 0, MODE_FOLLOW_ROUTE, @@ -371,6 +374,21 @@ static void snd_ctl_led_disconnect(struct snd_card *card) snd_ctl_led_refresh(); } +static void snd_ctl_led_card_release(struct device *dev) +{ + struct snd_ctl_led_card *led_card = to_led_card_dev(dev); + + kfree(led_card); +} + +static void snd_ctl_led_release(struct device *dev) +{ +} + +static void snd_ctl_led_dev_release(struct device *dev) +{ +} + /* * sysfs */ @@ -663,6 +681,7 @@ static void snd_ctl_led_sysfs_add(struct snd_card *card) led_card->number = card->number; led_card->led = led; device_initialize(&led_card->dev); + led_card->dev.release = snd_ctl_led_card_release; if (dev_set_name(&led_card->dev, "card%d", card->number) < 0) goto cerr; led_card->dev.parent = &led->dev; @@ -681,7 +700,6 @@ cerr: put_device(&led_card->dev); cerr2: printk(KERN_ERR "snd_ctl_led: unable to add card%d", card->number); - kfree(led_card); } } @@ -700,8 +718,7 @@ static void snd_ctl_led_sysfs_remove(struct snd_card *card) snprintf(link_name, sizeof(link_name), "led-%s", led->name); sysfs_remove_link(&card->ctl_dev.kobj, link_name); sysfs_remove_link(&led_card->dev.kobj, "card"); - device_del(&led_card->dev); - kfree(led_card); + device_unregister(&led_card->dev); led->cards[card->number] = NULL; } } @@ -723,6 +740,7 @@ static int __init snd_ctl_led_init(void) device_initialize(&snd_ctl_led_dev); snd_ctl_led_dev.class = sound_class; + snd_ctl_led_dev.release = snd_ctl_led_dev_release; dev_set_name(&snd_ctl_led_dev, "ctl-led"); if (device_add(&snd_ctl_led_dev)) { put_device(&snd_ctl_led_dev); @@ -733,15 +751,16 @@ static int __init snd_ctl_led_init(void) INIT_LIST_HEAD(&led->controls); device_initialize(&led->dev); led->dev.parent = &snd_ctl_led_dev; + led->dev.release = snd_ctl_led_release; led->dev.groups = snd_ctl_led_dev_attr_groups; dev_set_name(&led->dev, led->name); if (device_add(&led->dev)) { put_device(&led->dev); for (; group > 0; group--) { led = &snd_ctl_leds[group - 1]; - device_del(&led->dev); + device_unregister(&led->dev); } - device_del(&snd_ctl_led_dev); + device_unregister(&snd_ctl_led_dev); return -ENOMEM; } } @@ -767,9 +786,9 @@ static void __exit snd_ctl_led_exit(void) } for (group = 0; group < MAX_LED; group++) { led = &snd_ctl_leds[group]; - device_del(&led->dev); + device_unregister(&led->dev); } - device_del(&snd_ctl_led_dev); + device_unregister(&snd_ctl_led_dev); snd_ctl_led_clean(NULL); } diff --git a/sound/core/timer.c b/sound/core/timer.c index 6898b1ac0d7f..92b7008fcdb8 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event) return; if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) return; + event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */ list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) - ts->ccallback(ts, event + 100, &tstamp, resolution); + ts->ccallback(ts, event, &tstamp, resolution); } /* start/continue a master timer */ diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index ab5ff7867eb9..d8be146793ee 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -331,6 +331,10 @@ static const struct config_entry config_table[] = { .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, .device = 0x51c8, }, + { + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, + .device = 0x51cc, + }, #endif }; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index a31009afc025..5462f771c2f9 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2917,6 +2917,7 @@ static int hda_codec_runtime_resume(struct device *dev) #ifdef CONFIG_PM_SLEEP static int hda_codec_pm_prepare(struct device *dev) { + dev->power.power_state = PMSG_SUSPEND; return pm_runtime_suspended(dev); } @@ -2924,6 +2925,10 @@ static void hda_codec_pm_complete(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); + /* If no other pm-functions are called between prepare() and complete() */ + if (dev->power.power_state.event == PM_EVENT_SUSPEND) + dev->power.power_state = PMSG_RESUME; + if (pm_runtime_suspended(dev) && (codec->jackpoll_interval || hda_codec_need_resume(codec) || codec->forced_resume)) pm_request_resume(dev); diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index b638fc2ef6f7..1f8018f9ce57 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -3520,6 +3520,7 @@ static int cap_sw_put(struct snd_kcontrol *kcontrol, static const struct snd_kcontrol_new cap_sw_temp = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Switch", + .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .info = cap_sw_info, .get = cap_sw_get, .put = cap_sw_put, diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 79ade335c8a0..470753b36c8a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2485,6 +2485,9 @@ static const struct pci_device_id azx_ids[] = { /* Alderlake-P */ { PCI_DEVICE(0x8086, 0x51c8), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Alderlake-M */ + { PCI_DEVICE(0x8086, 0x51cc), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Elkhart Lake */ { PCI_DEVICE(0x8086, 0x4b55), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index 726507d0b04c..8629e84fef23 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -2206,10 +2206,9 @@ static void cs8409_cs42l42_fixups(struct hda_codec *codec, break; case HDA_FIXUP_ACT_PROBE: - /* Set initial volume on Bullseye to -26 dB */ - if (codec->fixup_id == CS8409_BULLSEYE) - snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID, - HDA_INPUT, 0, 0xff, 0x19); + /* Set initial DMIC volume to -26 dB */ + snd_hda_codec_amp_init_stereo(codec, CS8409_CS42L42_DMIC_ADC_PIN_NID, + HDA_INPUT, 0, 0xff, 0x19); snd_hda_gen_add_kctl(&spec->gen, NULL, &cs8409_cs42l42_hp_volume_mixer); snd_hda_gen_add_kctl(&spec->gen, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 552e2cb73291..43e37145eb5d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2603,6 +2603,28 @@ static const struct hda_model_fixup alc882_fixup_models[] = { {} }; +static const struct snd_hda_pin_quirk alc882_pin_fixup_tbl[] = { + SND_HDA_PIN_QUIRK(0x10ec1220, 0x1043, "ASUS", ALC1220_FIXUP_CLEVO_P950, + {0x14, 0x01014010}, + {0x15, 0x01011012}, + {0x16, 0x01016011}, + {0x18, 0x01a19040}, + {0x19, 0x02a19050}, + {0x1a, 0x0181304f}, + {0x1b, 0x0221401f}, + {0x1e, 0x01456130}), + SND_HDA_PIN_QUIRK(0x10ec1220, 0x1462, "MS-7C35", ALC1220_FIXUP_CLEVO_P950, + {0x14, 0x01015010}, + {0x15, 0x01011012}, + {0x16, 0x01011011}, + {0x18, 0x01a11040}, + {0x19, 0x02a19050}, + {0x1a, 0x0181104f}, + {0x1b, 0x0221401f}, + {0x1e, 0x01451130}), + {} +}; + /* * BIOS auto configuration */ @@ -2644,6 +2666,7 @@ static int patch_alc882(struct hda_codec *codec) snd_hda_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl, alc882_fixups); + snd_hda_pick_pin_fixup(codec, alc882_pin_fixup_tbl, alc882_fixups, true); snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE); alc_auto_parse_customize_define(codec); @@ -6543,6 +6566,8 @@ enum { ALC295_FIXUP_ASUS_DACS, ALC295_FIXUP_HP_OMEN, ALC285_FIXUP_HP_SPECTRE_X360, + ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, + ALC623_FIXUP_LENOVO_THINKSTATION_P340, }; static const struct hda_fixup alc269_fixups[] = { @@ -8109,6 +8134,18 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1, }, + [ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_ideapad_s740_coef, + .chained = true, + .chain_id = ALC285_FIXUP_THINKPAD_HEADSET_JACK, + }, + [ALC623_FIXUP_LENOVO_THINKSTATION_P340] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_no_shutup, + .chained = true, + .chain_id = ALC283_FIXUP_HEADSET_MIC, + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -8266,6 +8303,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN), SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), @@ -8291,6 +8329,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP), SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884c, "HP EliteBook 840 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x886d, "HP ZBook Fury 17.3 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), + SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), + SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), + SND_PCI_QUIRK(0x103c, 0x8896, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8898, "HP EliteBook 845 G8 Notebook PC", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), @@ -8427,7 +8469,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1558, 0xc019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1558, 0xc022, "Clevo NH77[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), - SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), @@ -8477,6 +8519,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF), + SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), @@ -8692,6 +8735,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC245_FIXUP_HP_X360_AMP, .name = "alc245-hp-x360-amp"}, {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"}, + {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, + {.id = ALC623_FIXUP_LENOVO_THINKSTATION_P340, .name = "alc623-lenovo-thinkstation-p340"}, {} }; #define ALC225_STANDARD_PINS \ diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c index f22bb2bdf527..8148b0d22e88 100644 --- a/sound/soc/amd/raven/acp3x-pcm-dma.c +++ b/sound/soc/amd/raven/acp3x-pcm-dma.c @@ -235,10 +235,6 @@ static int acp3x_dma_open(struct snd_soc_component *component, return ret; } - if (!adata->play_stream && !adata->capture_stream && - !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream) - rv_writel(1, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB); - i2s_data->acp3x_base = adata->acp3x_base; runtime->private_data = i2s_data; return ret; @@ -365,12 +361,6 @@ static int acp3x_dma_close(struct snd_soc_component *component, } } - /* Disable ACP irq, when the current stream is being closed and - * another stream is also not active. - */ - if (!adata->play_stream && !adata->capture_stream && - !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream) - rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB); return 0; } diff --git a/sound/soc/amd/raven/acp3x.h b/sound/soc/amd/raven/acp3x.h index 03fe93913e12..c3f0c8b7545d 100644 --- a/sound/soc/amd/raven/acp3x.h +++ b/sound/soc/amd/raven/acp3x.h @@ -77,6 +77,7 @@ #define ACP_POWER_OFF_IN_PROGRESS 0x03 #define ACP3x_ITER_IRER_SAMP_LEN_MASK 0x38 +#define ACP_EXT_INTR_STAT_CLEAR_MASK 0xFFFFFFFF struct acp3x_platform_info { u16 play_i2s_instance; diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c index d3536fd6a124..a013a607b3d4 100644 --- a/sound/soc/amd/raven/pci-acp3x.c +++ b/sound/soc/amd/raven/pci-acp3x.c @@ -76,6 +76,19 @@ static int acp3x_reset(void __iomem *acp3x_base) return -ETIMEDOUT; } +static void acp3x_enable_interrupts(void __iomem *acp_base) +{ + rv_writel(0x01, acp_base + mmACP_EXTERNAL_INTR_ENB); +} + +static void acp3x_disable_interrupts(void __iomem *acp_base) +{ + rv_writel(ACP_EXT_INTR_STAT_CLEAR_MASK, acp_base + + mmACP_EXTERNAL_INTR_STAT); + rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_CNTL); + rv_writel(0x00, acp_base + mmACP_EXTERNAL_INTR_ENB); +} + static int acp3x_init(struct acp3x_dev_data *adata) { void __iomem *acp3x_base = adata->acp3x_base; @@ -93,6 +106,7 @@ static int acp3x_init(struct acp3x_dev_data *adata) pr_err("ACP3x reset failed\n"); return ret; } + acp3x_enable_interrupts(acp3x_base); return 0; } @@ -100,6 +114,7 @@ static int acp3x_deinit(void __iomem *acp3x_base) { int ret; + acp3x_disable_interrupts(acp3x_base); /* Reset */ ret = acp3x_reset(acp3x_base); if (ret) { diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c index 34aed80db0eb..37d4600b6f2c 100644 --- a/sound/soc/codecs/ak5558.c +++ b/sound/soc/codecs/ak5558.c @@ -307,7 +307,7 @@ static struct snd_soc_dai_driver ak5558_dai = { }; static struct snd_soc_dai_driver ak5552_dai = { - .name = "ak5558-aif", + .name = "ak5552-aif", .capture = { .stream_name = "Capture", .channels_min = 1, diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c index f4067230ac42..88e79b9f52ed 100644 --- a/sound/soc/codecs/cs35l32.c +++ b/sound/soc/codecs/cs35l32.c @@ -261,6 +261,9 @@ static const struct regmap_config cs35l32_regmap = { .readable_reg = cs35l32_readable_register, .precious_reg = cs35l32_precious_register, .cache_type = REGCACHE_RBTREE, + + .use_single_read = true, + .use_single_write = true, }; static int cs35l32_handle_of_data(struct i2c_client *i2c_client, diff --git a/sound/soc/codecs/cs35l33.c b/sound/soc/codecs/cs35l33.c index 7ad7b733af9b..e8f3dcfd144d 100644 --- a/sound/soc/codecs/cs35l33.c +++ b/sound/soc/codecs/cs35l33.c @@ -1201,6 +1201,7 @@ static int cs35l33_i2c_probe(struct i2c_client *i2c_client, dev_err(&i2c_client->dev, "CS35L33 Device ID (%X). Expected ID %X\n", devid, CS35L33_CHIP_ID); + ret = -EINVAL; goto err_enable; } diff --git a/sound/soc/codecs/cs35l34.c b/sound/soc/codecs/cs35l34.c index 110ee2d06358..3d3c3c34dfe2 100644 --- a/sound/soc/codecs/cs35l34.c +++ b/sound/soc/codecs/cs35l34.c @@ -800,6 +800,9 @@ static struct regmap_config cs35l34_regmap = { .readable_reg = cs35l34_readable_register, .precious_reg = cs35l34_precious_register, .cache_type = REGCACHE_RBTREE, + + .use_single_read = true, + .use_single_write = true, }; static int cs35l34_handle_of_data(struct i2c_client *i2c_client, diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c index bf982e145e94..77473c226f9e 100644 --- a/sound/soc/codecs/cs42l42.c +++ b/sound/soc/codecs/cs42l42.c @@ -399,6 +399,9 @@ static const struct regmap_config cs42l42_regmap = { .reg_defaults = cs42l42_reg_defaults, .num_reg_defaults = ARRAY_SIZE(cs42l42_reg_defaults), .cache_type = REGCACHE_RBTREE, + + .use_single_read = true, + .use_single_write = true, }; static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false); diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index c44a5cdb796e..7cdffdf6b8cf 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -1175,7 +1175,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, struct cs42l56_platform_data *pdata = dev_get_platdata(&i2c_client->dev); int ret, i; - unsigned int devid = 0; + unsigned int devid; unsigned int alpha_rev, metal_rev; unsigned int reg; @@ -1245,6 +1245,11 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client, } ret = regmap_read(cs42l56->regmap, CS42L56_CHIP_ID_1, ®); + if (ret) { + dev_err(&i2c_client->dev, "Failed to read chip ID: %d\n", ret); + return ret; + } + devid = reg & CS42L56_CHIP_ID_MASK; if (devid != CS42L56_DEVID) { dev_err(&i2c_client->dev, diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c index c3f974ec78e5..e92bacaab53f 100644 --- a/sound/soc/codecs/cs42l73.c +++ b/sound/soc/codecs/cs42l73.c @@ -1268,6 +1268,9 @@ static const struct regmap_config cs42l73_regmap = { .volatile_reg = cs42l73_volatile_register, .readable_reg = cs42l73_readable_register, .cache_type = REGCACHE_RBTREE, + + .use_single_read = true, + .use_single_write = true, }; static int cs42l73_i2c_probe(struct i2c_client *i2c_client, diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c index 3d67cbf9eaaa..abe0cc0bc03a 100644 --- a/sound/soc/codecs/cs53l30.c +++ b/sound/soc/codecs/cs53l30.c @@ -912,6 +912,9 @@ static struct regmap_config cs53l30_regmap = { .writeable_reg = cs53l30_writeable_register, .readable_reg = cs53l30_readable_register, .cache_type = REGCACHE_RBTREE, + + .use_single_read = true, + .use_single_write = true, }; static int cs53l30_i2c_probe(struct i2c_client *client, diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c index bd3c523a8617..13009d08b09a 100644 --- a/sound/soc/codecs/da7219.c +++ b/sound/soc/codecs/da7219.c @@ -2181,10 +2181,7 @@ static int da7219_register_dai_clks(struct snd_soc_component *component) ret); goto err; } - - da7219->dai_clks[i] = devm_clk_hw_get_clk(dev, dai_clk_hw, NULL); - if (IS_ERR(da7219->dai_clks[i])) - return PTR_ERR(da7219->dai_clks[i]); + da7219->dai_clks[i] = dai_clk_hw->clk; /* For DT setup onecell data, otherwise create lookup */ if (np) { diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c index b0ebfc8d180c..171ab7f519c0 100644 --- a/sound/soc/codecs/lpass-rx-macro.c +++ b/sound/soc/codecs/lpass-rx-macro.c @@ -3579,6 +3579,7 @@ static const struct of_device_id rx_macro_dt_match[] = { { .compatible = "qcom,sm8250-lpass-rx-macro" }, { } }; +MODULE_DEVICE_TABLE(of, rx_macro_dt_match); static struct platform_driver rx_macro_driver = { .driver = { diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c index acd2fbc0ca7c..27a0d5defd27 100644 --- a/sound/soc/codecs/lpass-tx-macro.c +++ b/sound/soc/codecs/lpass-tx-macro.c @@ -1846,6 +1846,7 @@ static const struct of_device_id tx_macro_dt_match[] = { { .compatible = "qcom,sm8250-lpass-tx-macro" }, { } }; +MODULE_DEVICE_TABLE(of, tx_macro_dt_match); static struct platform_driver tx_macro_driver = { .driver = { .name = "tx_macro", diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c index 4be24e7f51c8..f8e49e45ce33 100644 --- a/sound/soc/codecs/max98088.c +++ b/sound/soc/codecs/max98088.c @@ -41,6 +41,7 @@ struct max98088_priv { enum max98088_type devtype; struct max98088_pdata *pdata; struct clk *mclk; + unsigned char mclk_prescaler; unsigned int sysclk; struct max98088_cdata dai[2]; int eq_textcnt; @@ -998,13 +999,16 @@ static int max98088_dai1_hw_params(struct snd_pcm_substream *substream, /* Configure NI when operating as master */ if (snd_soc_component_read(component, M98088_REG_14_DAI1_FORMAT) & M98088_DAI_MAS) { + unsigned long pclk; + if (max98088->sysclk == 0) { dev_err(component->dev, "Invalid system clock frequency\n"); return -EINVAL; } ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL) * (unsigned long long int)rate; - do_div(ni, (unsigned long long int)max98088->sysclk); + pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler); + ni = DIV_ROUND_CLOSEST_ULL(ni, pclk); snd_soc_component_write(component, M98088_REG_12_DAI1_CLKCFG_HI, (ni >> 8) & 0x7F); snd_soc_component_write(component, M98088_REG_13_DAI1_CLKCFG_LO, @@ -1065,13 +1069,16 @@ static int max98088_dai2_hw_params(struct snd_pcm_substream *substream, /* Configure NI when operating as master */ if (snd_soc_component_read(component, M98088_REG_1C_DAI2_FORMAT) & M98088_DAI_MAS) { + unsigned long pclk; + if (max98088->sysclk == 0) { dev_err(component->dev, "Invalid system clock frequency\n"); return -EINVAL; } ni = 65536ULL * (rate < 50000 ? 96ULL : 48ULL) * (unsigned long long int)rate; - do_div(ni, (unsigned long long int)max98088->sysclk); + pclk = DIV_ROUND_CLOSEST(max98088->sysclk, max98088->mclk_prescaler); + ni = DIV_ROUND_CLOSEST_ULL(ni, pclk); snd_soc_component_write(component, M98088_REG_1A_DAI2_CLKCFG_HI, (ni >> 8) & 0x7F); snd_soc_component_write(component, M98088_REG_1B_DAI2_CLKCFG_LO, @@ -1113,8 +1120,10 @@ static int max98088_dai_set_sysclk(struct snd_soc_dai *dai, */ if ((freq >= 10000000) && (freq < 20000000)) { snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x10); + max98088->mclk_prescaler = 1; } else if ((freq >= 20000000) && (freq < 30000000)) { snd_soc_component_write(component, M98088_REG_10_SYS_CLK, 0x20); + max98088->mclk_prescaler = 2; } else { dev_err(component->dev, "Invalid master clock frequency\n"); return -EINVAL; diff --git a/sound/soc/codecs/rt711-sdca.c b/sound/soc/codecs/rt711-sdca.c index cc36739f7fcf..24a084e0b48a 100644 --- a/sound/soc/codecs/rt711-sdca.c +++ b/sound/soc/codecs/rt711-sdca.c @@ -683,13 +683,13 @@ static int rt711_sdca_set_fu1e_capture_ctl(struct rt711_sdca_priv *rt711) ch_r = (rt711->fu1e_dapm_mute || rt711->fu1e_mixer_r_mute) ? 0x01 : 0x00; err = regmap_write(rt711->regmap, - SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E, + SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E, RT711_SDCA_CTL_FU_MUTE, CH_L), ch_l); if (err < 0) return err; err = regmap_write(rt711->regmap, - SDW_SDCA_CTL(FUNC_NUM_JACK_CODEC, RT711_SDCA_ENT_USER_FU1E, + SDW_SDCA_CTL(FUNC_NUM_MIC_ARRAY, RT711_SDCA_ENT_USER_FU1E, RT711_SDCA_CTL_FU_MUTE, CH_R), ch_r); if (err < 0) return err; diff --git a/sound/soc/codecs/sti-sas.c b/sound/soc/codecs/sti-sas.c index ffdf7e559515..82a24e330065 100644 --- a/sound/soc/codecs/sti-sas.c +++ b/sound/soc/codecs/sti-sas.c @@ -408,6 +408,7 @@ static const struct of_device_id sti_sas_dev_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, sti_sas_dev_match); static int sti_sas_driver_probe(struct platform_device *pdev) { diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig index 0917d65d6921..556c284f49dd 100644 --- a/sound/soc/fsl/Kconfig +++ b/sound/soc/fsl/Kconfig @@ -119,6 +119,7 @@ config SND_SOC_FSL_RPMSG tristate "NXP Audio Base On RPMSG support" depends on COMMON_CLK depends on RPMSG + depends on SND_IMX_SOC || SND_IMX_SOC = n select SND_SOC_IMX_RPMSG if SND_IMX_SOC != n help Say Y if you want to add rpmsg audio support for the Freescale CPUs. diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c index 2c8a2fcb7922..5e71382467e8 100644 --- a/sound/soc/generic/audio-graph-card.c +++ b/sound/soc/generic/audio-graph-card.c @@ -209,7 +209,7 @@ static void graph_parse_mclk_fs(struct device_node *top, static int graph_parse_node(struct asoc_simple_priv *priv, struct device_node *ep, struct link_info *li, - int is_cpu) + int *cpu) { struct device *dev = simple_priv_to_dev(priv); struct device_node *top = dev->of_node; @@ -217,9 +217,9 @@ static int graph_parse_node(struct asoc_simple_priv *priv, struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link); struct snd_soc_dai_link_component *dlc; struct asoc_simple_dai *dai; - int ret, single = 0; + int ret; - if (is_cpu) { + if (cpu) { dlc = asoc_link_to_cpu(dai_link, 0); dai = simple_props_to_dai_cpu(dai_props, 0); } else { @@ -229,7 +229,7 @@ static int graph_parse_node(struct asoc_simple_priv *priv, graph_parse_mclk_fs(top, ep, dai_props); - ret = asoc_simple_parse_dai(ep, dlc, &single); + ret = asoc_simple_parse_dai(ep, dlc, cpu); if (ret < 0) return ret; @@ -241,9 +241,6 @@ static int graph_parse_node(struct asoc_simple_priv *priv, if (ret < 0) return ret; - if (is_cpu) - asoc_simple_canonicalize_cpu(dlc, single); - return 0; } @@ -276,33 +273,29 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, struct link_info *li) { struct device *dev = simple_priv_to_dev(priv); - struct snd_soc_card *card = simple_priv_to_card(priv); struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link); struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link); struct device_node *top = dev->of_node; struct device_node *ep = li->cpu ? cpu_ep : codec_ep; - struct device_node *port; - struct device_node *ports; - struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0); - struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0); char dai_name[64]; int ret; - port = of_get_parent(ep); - ports = of_get_parent(port); - dev_dbg(dev, "link_of DPCM (%pOF)\n", ep); if (li->cpu) { + struct snd_soc_card *card = simple_priv_to_card(priv); + struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0); + int is_single_links = 0; + /* Codec is dummy */ /* FE settings */ dai_link->dynamic = 1; dai_link->dpcm_merged_format = 1; - ret = graph_parse_node(priv, cpu_ep, li, 1); + ret = graph_parse_node(priv, cpu_ep, li, &is_single_links); if (ret) - goto out_put_node; + return ret; snprintf(dai_name, sizeof(dai_name), "fe.%pOFP.%s", cpus->of_node, cpus->dai_name); @@ -318,8 +311,13 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, */ if (card->component_chaining && !soc_component_is_pcm(cpus)) dai_link->no_pcm = 1; + + asoc_simple_canonicalize_cpu(cpus, is_single_links); } else { - struct snd_soc_codec_conf *cconf; + struct snd_soc_codec_conf *cconf = simple_props_to_codec_conf(dai_props, 0); + struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0); + struct device_node *port; + struct device_node *ports; /* CPU is dummy */ @@ -327,22 +325,25 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, dai_link->no_pcm = 1; dai_link->be_hw_params_fixup = asoc_simple_be_hw_params_fixup; - cconf = simple_props_to_codec_conf(dai_props, 0); - - ret = graph_parse_node(priv, codec_ep, li, 0); + ret = graph_parse_node(priv, codec_ep, li, NULL); if (ret < 0) - goto out_put_node; + return ret; snprintf(dai_name, sizeof(dai_name), "be.%pOFP.%s", codecs->of_node, codecs->dai_name); /* check "prefix" from top node */ + port = of_get_parent(ep); + ports = of_get_parent(port); snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node, "prefix"); if (of_node_name_eq(ports, "ports")) snd_soc_of_parse_node_prefix(ports, cconf, codecs->of_node, "prefix"); snd_soc_of_parse_node_prefix(port, cconf, codecs->of_node, "prefix"); + + of_node_put(ports); + of_node_put(port); } graph_parse_convert(dev, ep, &dai_props->adata); @@ -351,11 +352,8 @@ static int graph_dai_link_of_dpcm(struct asoc_simple_priv *priv, ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name); -out_put_node: li->link++; - of_node_put(ports); - of_node_put(port); return ret; } @@ -369,20 +367,23 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv, struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0); struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0); char dai_name[64]; - int ret; + int ret, is_single_links = 0; dev_dbg(dev, "link_of (%pOF)\n", cpu_ep); - ret = graph_parse_node(priv, cpu_ep, li, 1); + ret = graph_parse_node(priv, cpu_ep, li, &is_single_links); if (ret < 0) return ret; - ret = graph_parse_node(priv, codec_ep, li, 0); + ret = graph_parse_node(priv, codec_ep, li, NULL); if (ret < 0) return ret; snprintf(dai_name, sizeof(dai_name), "%s-%s", cpus->dai_name, codecs->dai_name); + + asoc_simple_canonicalize_cpu(cpus, is_single_links); + ret = graph_link_init(priv, cpu_ep, codec_ep, li, dai_name); if (ret < 0) return ret; diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index a1373be4558f..0015f534d42d 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -93,12 +93,11 @@ static void simple_parse_convert(struct device *dev, } static void simple_parse_mclk_fs(struct device_node *top, - struct device_node *cpu, - struct device_node *codec, + struct device_node *np, struct simple_dai_props *props, char *prefix) { - struct device_node *node = of_get_parent(cpu); + struct device_node *node = of_get_parent(np); char prop[128]; snprintf(prop, sizeof(prop), "%smclk-fs", PREFIX); @@ -106,12 +105,71 @@ static void simple_parse_mclk_fs(struct device_node *top, snprintf(prop, sizeof(prop), "%smclk-fs", prefix); of_property_read_u32(node, prop, &props->mclk_fs); - of_property_read_u32(cpu, prop, &props->mclk_fs); - of_property_read_u32(codec, prop, &props->mclk_fs); + of_property_read_u32(np, prop, &props->mclk_fs); of_node_put(node); } +static int simple_parse_node(struct asoc_simple_priv *priv, + struct device_node *np, + struct link_info *li, + char *prefix, + int *cpu) +{ + struct device *dev = simple_priv_to_dev(priv); + struct device_node *top = dev->of_node; + struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link); + struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link); + struct snd_soc_dai_link_component *dlc; + struct asoc_simple_dai *dai; + int ret; + + if (cpu) { + dlc = asoc_link_to_cpu(dai_link, 0); + dai = simple_props_to_dai_cpu(dai_props, 0); + } else { + dlc = asoc_link_to_codec(dai_link, 0); + dai = simple_props_to_dai_codec(dai_props, 0); + } + + simple_parse_mclk_fs(top, np, dai_props, prefix); + + ret = asoc_simple_parse_dai(np, dlc, cpu); + if (ret) + return ret; + + ret = asoc_simple_parse_clk(dev, np, dai, dlc); + if (ret) + return ret; + + ret = asoc_simple_parse_tdm(np, dai); + if (ret) + return ret; + + return 0; +} + +static int simple_link_init(struct asoc_simple_priv *priv, + struct device_node *node, + struct device_node *codec, + struct link_info *li, + char *prefix, char *name) +{ + struct device *dev = simple_priv_to_dev(priv); + struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link); + int ret; + + ret = asoc_simple_parse_daifmt(dev, node, codec, + prefix, &dai_link->dai_fmt); + if (ret < 0) + return 0; + + dai_link->init = asoc_simple_dai_init; + dai_link->ops = &simple_ops; + + return asoc_simple_set_dailink_name(dev, dai_link, name); +} + static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, struct device_node *np, struct device_node *codec, @@ -121,24 +179,21 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, struct device *dev = simple_priv_to_dev(priv); struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link); struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link); - struct asoc_simple_dai *dai; - struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0); - struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0); - struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0); struct device_node *top = dev->of_node; struct device_node *node = of_get_parent(np); char *prefix = ""; + char dai_name[64]; int ret; dev_dbg(dev, "link_of DPCM (%pOF)\n", np); - li->link++; - /* For single DAI link & old style of DT node */ if (is_top) prefix = PREFIX; if (li->cpu) { + struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0); + struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0); int is_single_links = 0; /* Codec is dummy */ @@ -147,25 +202,16 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, dai_link->dynamic = 1; dai_link->dpcm_merged_format = 1; - dai = simple_props_to_dai_cpu(dai_props, 0); - - ret = asoc_simple_parse_dai(np, cpus, &is_single_links); - if (ret) - goto out_put_node; - - ret = asoc_simple_parse_clk(dev, np, dai, cpus); + ret = simple_parse_node(priv, np, li, prefix, &is_single_links); if (ret < 0) goto out_put_node; - ret = asoc_simple_set_dailink_name(dev, dai_link, - "fe.%s", - cpus->dai_name); - if (ret < 0) - goto out_put_node; + snprintf(dai_name, sizeof(dai_name), "fe.%s", cpus->dai_name); asoc_simple_canonicalize_cpu(cpus, is_single_links); asoc_simple_canonicalize_platform(platforms, cpus); } else { + struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0); struct snd_soc_codec_conf *cconf; /* CPU is dummy */ @@ -174,22 +220,13 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, dai_link->no_pcm = 1; dai_link->be_hw_params_fixup = asoc_simple_be_hw_params_fixup; - dai = simple_props_to_dai_codec(dai_props, 0); cconf = simple_props_to_codec_conf(dai_props, 0); - ret = asoc_simple_parse_dai(np, codecs, NULL); + ret = simple_parse_node(priv, np, li, prefix, NULL); if (ret < 0) goto out_put_node; - ret = asoc_simple_parse_clk(dev, np, dai, codecs); - if (ret < 0) - goto out_put_node; - - ret = asoc_simple_set_dailink_name(dev, dai_link, - "be.%s", - codecs->dai_name); - if (ret < 0) - goto out_put_node; + snprintf(dai_name, sizeof(dai_name), "be.%s", codecs->dai_name); /* check "prefix" from top node */ snd_soc_of_parse_node_prefix(top, cconf, codecs->of_node, @@ -201,23 +238,14 @@ static int simple_dai_link_of_dpcm(struct asoc_simple_priv *priv, } simple_parse_convert(dev, np, &dai_props->adata); - simple_parse_mclk_fs(top, np, codec, dai_props, prefix); - - ret = asoc_simple_parse_tdm(np, dai); - if (ret) - goto out_put_node; - - ret = asoc_simple_parse_daifmt(dev, node, codec, - prefix, &dai_link->dai_fmt); - if (ret < 0) - goto out_put_node; snd_soc_dai_link_set_capabilities(dai_link); - dai_link->ops = &simple_ops; - dai_link->init = asoc_simple_dai_init; + ret = simple_link_init(priv, node, codec, li, prefix, dai_name); out_put_node: + li->link++; + of_node_put(node); return ret; } @@ -230,23 +258,19 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv, { struct device *dev = simple_priv_to_dev(priv); struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, li->link); - struct simple_dai_props *dai_props = simple_priv_to_props(priv, li->link); - struct asoc_simple_dai *cpu_dai = simple_props_to_dai_cpu(dai_props, 0); - struct asoc_simple_dai *codec_dai = simple_props_to_dai_codec(dai_props, 0); struct snd_soc_dai_link_component *cpus = asoc_link_to_cpu(dai_link, 0); struct snd_soc_dai_link_component *codecs = asoc_link_to_codec(dai_link, 0); struct snd_soc_dai_link_component *platforms = asoc_link_to_platform(dai_link, 0); - struct device_node *top = dev->of_node; struct device_node *cpu = NULL; struct device_node *node = NULL; struct device_node *plat = NULL; + char dai_name[64]; char prop[128]; char *prefix = ""; int ret, single_cpu = 0; cpu = np; node = of_get_parent(np); - li->link++; dev_dbg(dev, "link_of (%pOF)\n", node); @@ -257,18 +281,11 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv, snprintf(prop, sizeof(prop), "%splat", prefix); plat = of_get_child_by_name(node, prop); - ret = asoc_simple_parse_daifmt(dev, node, codec, - prefix, &dai_link->dai_fmt); - if (ret < 0) - goto dai_link_of_err; - - simple_parse_mclk_fs(top, cpu, codec, dai_props, prefix); - - ret = asoc_simple_parse_dai(cpu, cpus, &single_cpu); + ret = simple_parse_node(priv, cpu, li, prefix, &single_cpu); if (ret < 0) goto dai_link_of_err; - ret = asoc_simple_parse_dai(codec, codecs, NULL); + ret = simple_parse_node(priv, codec, li, prefix, NULL); if (ret < 0) goto dai_link_of_err; @@ -276,39 +293,20 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv, if (ret < 0) goto dai_link_of_err; - ret = asoc_simple_parse_tdm(cpu, cpu_dai); - if (ret < 0) - goto dai_link_of_err; - - ret = asoc_simple_parse_tdm(codec, codec_dai); - if (ret < 0) - goto dai_link_of_err; - - ret = asoc_simple_parse_clk(dev, cpu, cpu_dai, cpus); - if (ret < 0) - goto dai_link_of_err; - - ret = asoc_simple_parse_clk(dev, codec, codec_dai, codecs); - if (ret < 0) - goto dai_link_of_err; - - ret = asoc_simple_set_dailink_name(dev, dai_link, - "%s-%s", - cpus->dai_name, - codecs->dai_name); - if (ret < 0) - goto dai_link_of_err; - - dai_link->ops = &simple_ops; - dai_link->init = asoc_simple_dai_init; + snprintf(dai_name, sizeof(dai_name), + "%s-%s", cpus->dai_name, codecs->dai_name); asoc_simple_canonicalize_cpu(cpus, single_cpu); asoc_simple_canonicalize_platform(platforms, cpus); + ret = simple_link_init(priv, node, codec, li, prefix, dai_name); + dai_link_of_err: of_node_put(plat); of_node_put(node); + li->link++; + return ret; } diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index df2f5d55e8ff..22dbd9d93c1e 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -574,6 +574,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF1 | BYT_RT5640_MCLK_EN), }, + { /* Glavey TM800A550L */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Above strings are too generic, also match on BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), @@ -652,6 +663,20 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_MONO_SPEAKER | BYT_RT5640_MCLK_EN), }, + { /* Lenovo Miix 3-830 */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 3-830"), + }, + .driver_data = (void *)(BYT_RT5640_IN1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_MONO_SPEAKER | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { /* Linx Linx7 tablet */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"), diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index c62d2612e8f5..28c7497344e3 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -835,18 +835,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev) if (dai_id == LPASS_DP_RX) continue; - drvdata->mi2s_osr_clk[dai_id] = devm_clk_get(dev, + drvdata->mi2s_osr_clk[dai_id] = devm_clk_get_optional(dev, variant->dai_osr_clk_names[i]); - if (IS_ERR(drvdata->mi2s_osr_clk[dai_id])) { - dev_warn(dev, - "%s() error getting optional %s: %ld\n", - __func__, - variant->dai_osr_clk_names[i], - PTR_ERR(drvdata->mi2s_osr_clk[dai_id])); - - drvdata->mi2s_osr_clk[dai_id] = NULL; - } - drvdata->mi2s_bit_clk[dai_id] = devm_clk_get(dev, variant->dai_bit_clk_names[i]); if (IS_ERR(drvdata->mi2s_bit_clk[dai_id])) { diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c index 8d7bab433fb3..c1f9f0f58464 100644 --- a/sound/soc/sof/intel/hda-dai.c +++ b/sound/soc/sof/intel/hda-dai.c @@ -421,11 +421,16 @@ static int ssp_dai_hw_params(struct snd_pcm_substream *substream, struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, SOF_AUDIO_PCM_DRV_NAME); struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component); + struct sof_ipc_fw_version *v = &sdev->fw_ready.version; struct sof_ipc_dai_config *config; struct snd_sof_dai *sof_dai; struct sof_ipc_reply reply; int ret; + /* DAI_CONFIG IPC during hw_params is not supported in older firmware */ + if (v->abi_version < SOF_ABI_VER(3, 18, 0)) + return 0; + list_for_each_entry(sof_dai, &sdev->dai_list, list) { if (!sof_dai->cpu_dai_name || !sof_dai->dai_config) continue; diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index c1561237ee24..3aa1cf262402 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -484,10 +484,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai) dev_err(dev, "mclk register returned %d\n", ret); return ret; } - - sai->sai_mclk = devm_clk_hw_get_clk(dev, hw, NULL); - if (IS_ERR(sai->sai_mclk)) - return PTR_ERR(sai->sai_mclk); + sai->sai_mclk = hw->clk; /* register mclk provider */ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw); diff --git a/sound/usb/format.c b/sound/usb/format.c index e6ff317a6785..2287f8c65315 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -436,7 +436,7 @@ static bool check_valid_altsetting_v2v3(struct snd_usb_audio *chip, int iface, if (snd_BUG_ON(altsetting >= 64 - 8)) return false; - err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR, + err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_AS_VAL_ALT_SETTINGS << 8, iface, &raw_data, sizeof(raw_data)); diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index fda66b2dbb01..37ad77524c0b 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -3060,7 +3060,7 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) case USB_ID(0x1235, 0x8203): /* Focusrite Scarlett 6i6 2nd Gen */ case USB_ID(0x1235, 0x8204): /* Focusrite Scarlett 18i8 2nd Gen */ case USB_ID(0x1235, 0x8201): /* Focusrite Scarlett 18i20 2nd Gen */ - err = snd_scarlett_gen2_controls_create(mixer); + err = snd_scarlett_gen2_init(mixer); break; case USB_ID(0x041e, 0x323b): /* Creative Sound Blaster E1 */ diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c index 560c2ade829d..4caf379d5b99 100644 --- a/sound/usb/mixer_scarlett_gen2.c +++ b/sound/usb/mixer_scarlett_gen2.c @@ -635,7 +635,7 @@ static int scarlett2_usb( /* send a second message to get the response */ err = snd_usb_ctl_msg(mixer->chip->dev, - usb_sndctrlpipe(mixer->chip->dev, 0), + usb_rcvctrlpipe(mixer->chip->dev, 0), SCARLETT2_USB_VENDOR_SPECIFIC_CMD_RESP, USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN, 0, @@ -1997,38 +1997,11 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer) return usb_submit_urb(mixer->urb, GFP_KERNEL); } -/* Entry point */ -int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer) +static int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer, + const struct scarlett2_device_info *info) { - const struct scarlett2_device_info *info; int err; - /* only use UAC_VERSION_2 */ - if (!mixer->protocol) - return 0; - - switch (mixer->chip->usb_id) { - case USB_ID(0x1235, 0x8203): - info = &s6i6_gen2_info; - break; - case USB_ID(0x1235, 0x8204): - info = &s18i8_gen2_info; - break; - case USB_ID(0x1235, 0x8201): - info = &s18i20_gen2_info; - break; - default: /* device not (yet) supported */ - return -EINVAL; - } - - if (!(mixer->chip->setup & SCARLETT2_ENABLE)) { - usb_audio_err(mixer->chip, - "Focusrite Scarlett Gen 2 Mixer Driver disabled; " - "use options snd_usb_audio device_setup=1 " - "to enable and report any issues to g@b4.vu"); - return 0; - } - /* Initialise private data, routing, sequence number */ err = scarlett2_init_private(mixer, info); if (err < 0) @@ -2073,3 +2046,51 @@ int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer) return 0; } + +int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer) +{ + struct snd_usb_audio *chip = mixer->chip; + const struct scarlett2_device_info *info; + int err; + + /* only use UAC_VERSION_2 */ + if (!mixer->protocol) + return 0; + + switch (chip->usb_id) { + case USB_ID(0x1235, 0x8203): + info = &s6i6_gen2_info; + break; + case USB_ID(0x1235, 0x8204): + info = &s18i8_gen2_info; + break; + case USB_ID(0x1235, 0x8201): + info = &s18i20_gen2_info; + break; + default: /* device not (yet) supported */ + return -EINVAL; + } + + if (!(chip->setup & SCARLETT2_ENABLE)) { + usb_audio_info(chip, + "Focusrite Scarlett Gen 2 Mixer Driver disabled; " + "use options snd_usb_audio vid=0x%04x pid=0x%04x " + "device_setup=1 to enable and report any issues " + "to g@b4.vu", + USB_ID_VENDOR(chip->usb_id), + USB_ID_PRODUCT(chip->usb_id)); + return 0; + } + + usb_audio_info(chip, + "Focusrite Scarlett Gen 2 Mixer Driver enabled pid=0x%04x", + USB_ID_PRODUCT(chip->usb_id)); + + err = snd_scarlett_gen2_controls_create(mixer, info); + if (err < 0) + usb_audio_err(mixer->chip, + "Error initialising Scarlett Mixer Driver: %d", + err); + + return err; +} diff --git a/sound/usb/mixer_scarlett_gen2.h b/sound/usb/mixer_scarlett_gen2.h index 52e1dad77afd..668c6b0cb50a 100644 --- a/sound/usb/mixer_scarlett_gen2.h +++ b/sound/usb/mixer_scarlett_gen2.h @@ -2,6 +2,6 @@ #ifndef __USB_MIXER_SCARLETT_GEN2_H #define __USB_MIXER_SCARLETT_GEN2_H -int snd_scarlett_gen2_controls_create(struct usb_mixer_interface *mixer); +int snd_scarlett_gen2_init(struct usb_mixer_interface *mixer); #endif /* __USB_MIXER_SCARLETT_GEN2_H */ diff --git a/tools/arch/mips/include/uapi/asm/perf_regs.h b/tools/arch/mips/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000000..d0f4ecd616cf --- /dev/null +++ b/tools/arch/mips/include/uapi/asm/perf_regs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _ASM_MIPS_PERF_REGS_H +#define _ASM_MIPS_PERF_REGS_H + +enum perf_event_mips_regs { + PERF_REG_MIPS_PC, + PERF_REG_MIPS_R1, + PERF_REG_MIPS_R2, + PERF_REG_MIPS_R3, + PERF_REG_MIPS_R4, + PERF_REG_MIPS_R5, + PERF_REG_MIPS_R6, + PERF_REG_MIPS_R7, + PERF_REG_MIPS_R8, + PERF_REG_MIPS_R9, + PERF_REG_MIPS_R10, + PERF_REG_MIPS_R11, + PERF_REG_MIPS_R12, + PERF_REG_MIPS_R13, + PERF_REG_MIPS_R14, + PERF_REG_MIPS_R15, + PERF_REG_MIPS_R16, + PERF_REG_MIPS_R17, + PERF_REG_MIPS_R18, + PERF_REG_MIPS_R19, + PERF_REG_MIPS_R20, + PERF_REG_MIPS_R21, + PERF_REG_MIPS_R22, + PERF_REG_MIPS_R23, + PERF_REG_MIPS_R24, + PERF_REG_MIPS_R25, + PERF_REG_MIPS_R26, + PERF_REG_MIPS_R27, + PERF_REG_MIPS_R28, + PERF_REG_MIPS_R29, + PERF_REG_MIPS_R30, + PERF_REG_MIPS_R31, + PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1, +}; +#endif /* _ASM_MIPS_PERF_REGS_H */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 3fd9a7e9d90c..79d9c44d1ad7 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -8,6 +8,7 @@ * Note: you must update KVM_API_VERSION if you change this interface. */ +#include <linux/const.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/ioctl.h> @@ -1879,8 +1880,8 @@ struct kvm_hyperv_eventfd { * conversion after harvesting an entry. Also, it must not skip any * dirty bits, so that dirty bits are always harvested in sequence. */ -#define KVM_DIRTY_GFN_F_DIRTY BIT(0) -#define KVM_DIRTY_GFN_F_RESET BIT(1) +#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0) +#define KVM_DIRTY_GFN_F_RESET _BITUL(1) #define KVM_DIRTY_GFN_F_MASK 0x3 /* diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 406a9519145e..73df23dd664c 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -90,7 +90,6 @@ endif ifeq ($(ARCH),mips) NO_PERF_REGS := 0 CFLAGS += -I$(OUTPUT)arch/mips/include/generated - CFLAGS += -I../../arch/mips/include/uapi -I../../arch/mips/include/generated/uapi LIBUNWIND_LIBS = -lunwind -lunwind-mips endif diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3337b5f93336..84803abeb942 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -2714,6 +2714,12 @@ int cmd_record(int argc, const char **argv) rec->no_buildid = true; } + if (rec->opts.record_cgroup && !perf_can_record_cgroup()) { + pr_err("Kernel has no cgroup sampling support.\n"); + err = -EINVAL; + goto out_opts; + } + if (rec->opts.kcore) rec->data.is_dir = true; diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh index dd8ff287e930..c783558332b8 100755 --- a/tools/perf/check-headers.sh +++ b/tools/perf/check-headers.sh @@ -39,6 +39,7 @@ arch/x86/lib/x86-opcode-map.txt arch/x86/tools/gen-insn-attr-x86.awk arch/arm/include/uapi/asm/perf_regs.h arch/arm64/include/uapi/asm/perf_regs.h +arch/mips/include/uapi/asm/perf_regs.h arch/powerpc/include/uapi/asm/perf_regs.h arch/s390/include/uapi/asm/perf_regs.h arch/x86/include/uapi/asm/perf_regs.h diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 20cb91ef06ff..2f6b67189b42 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -443,6 +443,8 @@ int main(int argc, const char **argv) const char *cmd; char sbuf[STRERR_BUFSIZE]; + perf_debug_setup(); + /* libsubcmd init */ exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT); pager_init(PERF_PAGER_ENVIRONMENT); @@ -531,8 +533,6 @@ int main(int argc, const char **argv) */ pthread__block_sigwinch(); - perf_debug_setup(); - while (1) { static int done_help; diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json index 616f29098c71..605be14f441c 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/cache.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json @@ -1,46 +1,56 @@ [ { - "EventCode": "1003C", + "EventCode": "0x1003C", "EventName": "PM_EXEC_STALL_DMISS_L2L3", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3." }, { - "EventCode": "34056", + "EventCode": "0x1E054", + "EventName": "PM_EXEC_STALL_DMISS_L21_L31", + "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip." + }, + { + "EventCode": "0x34054", + "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT", + "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict." + }, + { + "EventCode": "0x34056", "EventName": "PM_EXEC_STALL_LOAD_FINISH", - "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ." + "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction." }, { - "EventCode": "3006C", + "EventCode": "0x3006C", "EventName": "PM_RUN_CYC_SMT2_MODE", "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode." }, { - "EventCode": "300F4", + "EventCode": "0x300F4", "EventName": "PM_RUN_INST_CMPL_CONC", "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set." }, { - "EventCode": "4C016", + "EventCode": "0x4C016", "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict." }, { - "EventCode": "4D014", + "EventCode": "0x4D014", "EventName": "PM_EXEC_STALL_LOAD", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit." }, { - "EventCode": "4D016", + "EventCode": "0x4D016", "EventName": "PM_EXEC_STALL_PTESYNC", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit." }, { - "EventCode": "401EA", + "EventCode": "0x401EA", "EventName": "PM_THRESH_EXC_128", "BriefDescription": "Threshold counter exceeded a value of 128." }, { - "EventCode": "400F6", + "EventCode": "0x400F6", "EventName": "PM_BR_MPRED_CMPL", "BriefDescription": "A mispredicted branch completed. Includes direction and target." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json index 703cd431ae5b..54acb55e2c8c 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json @@ -1,6 +1,6 @@ [ { - "EventCode": "4016E", + "EventCode": "0x4016E", "EventName": "PM_THRESH_NOT_MET", "BriefDescription": "Threshold counter did not meet threshold." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json index eac8609dcc90..558f9530f54e 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json @@ -1,216 +1,246 @@ [ { - "EventCode": "10004", + "EventCode": "0x10004", "EventName": "PM_EXEC_STALL_TRANSLATION", "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve." }, { - "EventCode": "10010", + "EventCode": "0x10006", + "EventName": "PM_DISP_STALL_HELD_OTHER_CYC", + "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason." + }, + { + "EventCode": "0x10010", "EventName": "PM_PMC4_OVERFLOW", "BriefDescription": "The event selected for PMC4 caused the event counter to overflow." }, { - "EventCode": "10020", + "EventCode": "0x10020", "EventName": "PM_PMC4_REWIND", "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged." }, { - "EventCode": "10038", + "EventCode": "0x10038", "EventName": "PM_DISP_STALL_TRANSLATION", "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss." }, { - "EventCode": "1003A", + "EventCode": "0x1003A", "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2", "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict." }, { - "EventCode": "1E050", + "EventCode": "0x1D05E", + "EventName": "PM_DISP_STALL_HELD_HALT_CYC", + "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management." + }, + { + "EventCode": "0x1E050", "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC", "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR." }, { - "EventCode": "1F054", + "EventCode": "0x1F054", "EventName": "PM_DTLB_HIT", "BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT." }, { - "EventCode": "101E8", + "EventCode": "0x10064", + "EventName": "PM_DISP_STALL_IC_L2", + "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2." + }, + { + "EventCode": "0x101E8", "EventName": "PM_THRESH_EXC_256", "BriefDescription": "Threshold counter exceeded a count of 256." }, { - "EventCode": "101EC", + "EventCode": "0x101EC", "EventName": "PM_THRESH_MET", "BriefDescription": "Threshold exceeded." }, { - "EventCode": "100F2", + "EventCode": "0x100F2", "EventName": "PM_1PLUS_PPC_CMPL", "BriefDescription": "Cycles in which at least one instruction is completed by this thread." }, { - "EventCode": "100F6", + "EventCode": "0x100F6", "EventName": "PM_IERAT_MISS", "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event." }, { - "EventCode": "100F8", + "EventCode": "0x100F8", "EventName": "PM_DISP_STALL_CYC", "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)." }, { - "EventCode": "20114", + "EventCode": "0x20006", + "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC", + "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue." + }, + { + "EventCode": "0x20114", "EventName": "PM_MRK_L2_RC_DISP", "BriefDescription": "Marked instruction RC dispatched in L2." }, { - "EventCode": "2C010", + "EventCode": "0x2C010", "EventName": "PM_EXEC_STALL_LSU", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions." }, { - "EventCode": "2C016", + "EventCode": "0x2C016", "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS", "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss." }, { - "EventCode": "2C01E", + "EventCode": "0x2C01E", "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3", "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict." }, { - "EventCode": "2D01A", + "EventCode": "0x2D01A", "EventName": "PM_DISP_STALL_IC_MISS", "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss." }, { - "EventCode": "2D01C", - "EventName": "PM_CMPL_STALL_STCX", - "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing." - }, - { - "EventCode": "2E018", + "EventCode": "0x2E018", "EventName": "PM_DISP_STALL_FETCH", "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held." }, { - "EventCode": "2E01A", + "EventCode": "0x2E01A", "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC", "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full." }, { - "EventCode": "2C142", + "EventCode": "0x2C142", "EventName": "PM_MRK_XFER_FROM_SRC_PMC2", "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads." }, { - "EventCode": "24050", + "EventCode": "0x24050", "EventName": "PM_IOPS_DISP", "BriefDescription": "Internal Operations dispatched. PM_IOPS_DISP / PM_INST_DISP will show the average number of internal operations per PowerPC instruction." }, { - "EventCode": "2405E", + "EventCode": "0x2405E", "EventName": "PM_ISSUE_CANCEL", "BriefDescription": "An instruction issued and the issue was later cancelled. Only one cancel per PowerPC instruction." }, { - "EventCode": "200FA", + "EventCode": "0x200FA", "EventName": "PM_BR_TAKEN_CMPL", "BriefDescription": "Branch Taken instruction completed." }, { - "EventCode": "30012", + "EventCode": "0x30004", + "EventName": "PM_DISP_STALL_FLUSH", + "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC." + }, + { + "EventCode": "0x3000A", + "EventName": "PM_DISP_STALL_ITLB_MISS", + "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss." + }, + { + "EventCode": "0x30012", "EventName": "PM_FLUSH_COMPLETION", "BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush." }, { - "EventCode": "30014", + "EventCode": "0x30014", "EventName": "PM_EXEC_STALL_STORE", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit." }, { - "EventCode": "30018", + "EventCode": "0x30018", "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC", "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together." }, { - "EventCode": "30026", + "EventCode": "0x30026", "EventName": "PM_EXEC_STALL_STORE_MISS", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1." }, { - "EventCode": "3012A", + "EventCode": "0x3012A", "EventName": "PM_MRK_L2_RC_DONE", "BriefDescription": "L2 RC machine completed the transaction for the marked instruction." }, { - "EventCode": "3F046", + "EventCode": "0x3F046", "EventName": "PM_ITLB_HIT_1G", "BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches." }, { - "EventCode": "34058", + "EventCode": "0x34058", "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS", "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss." }, { - "EventCode": "3D05C", + "EventCode": "0x3D05C", "EventName": "PM_DISP_STALL_HELD_RENAME_CYC", "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC." }, { - "EventCode": "3E052", + "EventCode": "0x3E052", "EventName": "PM_DISP_STALL_IC_L3", "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3." }, { - "EventCode": "3E054", + "EventCode": "0x3E054", "EventName": "PM_LD_MISS_L1", "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load." }, { - "EventCode": "301EA", + "EventCode": "0x301EA", "EventName": "PM_THRESH_EXC_1024", "BriefDescription": "Threshold counter exceeded a value of 1024." }, { - "EventCode": "300FA", + "EventCode": "0x300FA", "EventName": "PM_INST_FROM_L3MISS", "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss." }, { - "EventCode": "40006", + "EventCode": "0x40006", "EventName": "PM_ISSUE_KILL", "BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group." }, { - "EventCode": "40116", + "EventCode": "0x40116", "EventName": "PM_MRK_LARX_FIN", "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock." }, { - "EventCode": "4C010", + "EventCode": "0x4C010", "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS", "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch." }, { - "EventCode": "4D01E", + "EventCode": "0x4D01E", "EventName": "PM_DISP_STALL_BR_MPRED", "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch." }, { - "EventCode": "4E010", + "EventCode": "0x4E010", "EventName": "PM_DISP_STALL_IC_L3MISS", "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3." }, { - "EventCode": "4E01A", + "EventCode": "0x4E01A", "EventName": "PM_DISP_STALL_HELD_CYC", "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason." }, { - "EventCode": "44056", + "EventCode": "0x4003C", + "EventName": "PM_DISP_STALL_HELD_SYNC_CYC", + "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch." + }, + { + "EventCode": "0x44056", "EventName": "PM_VECTOR_ST_CMPL", "BriefDescription": "Vector store instructions completed." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/locks.json b/tools/perf/pmu-events/arch/powerpc/power10/locks.json index 016d8de0e14a..b5a0d6521963 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/locks.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/locks.json @@ -1,11 +1,11 @@ [ { - "EventCode": "1E058", + "EventCode": "0x1E058", "EventName": "PM_STCX_FAIL_FIN", "BriefDescription": "Conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock." }, { - "EventCode": "4E050", + "EventCode": "0x4E050", "EventName": "PM_STCX_PASS_FIN", "BriefDescription": "Conditional store instruction (STCX) passed. LARX and STCX are instructions used to acquire a lock." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json index 93a5a5910648..58b5dfe3a273 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/marked.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json @@ -1,146 +1,141 @@ [ { - "EventCode": "1002C", + "EventCode": "0x1002C", "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS", "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request." }, { - "EventCode": "10132", + "EventCode": "0x10132", "EventName": "PM_MRK_INST_ISSUED", "BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction." }, { - "EventCode": "101E0", + "EventCode": "0x101E0", "EventName": "PM_MRK_INST_DISP", "BriefDescription": "The thread has dispatched a randomly sampled marked instruction." }, { - "EventCode": "101E2", + "EventCode": "0x101E2", "EventName": "PM_MRK_BR_TAKEN_CMPL", "BriefDescription": "Marked Branch Taken instruction completed." }, { - "EventCode": "20112", + "EventCode": "0x20112", "EventName": "PM_MRK_NTF_FIN", "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch." }, { - "EventCode": "2C01C", + "EventCode": "0x2C01C", "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip." }, { - "EventCode": "20138", + "EventCode": "0x20138", "EventName": "PM_MRK_ST_NEST", "BriefDescription": "A store has been sampled/marked and is at the point of execution where it has completed in the core and can no longer be flushed. At this point the store is sent to the L2." }, { - "EventCode": "2013A", + "EventCode": "0x2013A", "EventName": "PM_MRK_BRU_FIN", "BriefDescription": "Marked Branch instruction finished." }, { - "EventCode": "2C144", + "EventCode": "0x2C144", "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2", "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[15:27]." }, { - "EventCode": "24156", + "EventCode": "0x24156", "EventName": "PM_MRK_STCX_FIN", "BriefDescription": "Marked conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock." }, { - "EventCode": "24158", + "EventCode": "0x24158", "EventName": "PM_MRK_INST", "BriefDescription": "An instruction was marked. Includes both Random Instruction Sampling (RIS) at decode time and Random Event Sampling (RES) at the time the configured event happens." }, { - "EventCode": "2415C", + "EventCode": "0x2415C", "EventName": "PM_MRK_BR_CMPL", "BriefDescription": "A marked branch completed. All branches are included." }, { - "EventCode": "200FD", + "EventCode": "0x200FD", "EventName": "PM_L1_ICACHE_MISS", "BriefDescription": "Demand iCache Miss." }, { - "EventCode": "30130", + "EventCode": "0x30130", "EventName": "PM_MRK_INST_FIN", "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU." }, { - "EventCode": "34146", + "EventCode": "0x34146", "EventName": "PM_MRK_LD_CMPL", "BriefDescription": "Marked loads completed." }, { - "EventCode": "3E158", + "EventCode": "0x3E158", "EventName": "PM_MRK_STCX_FAIL", "BriefDescription": "Marked conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock." }, { - "EventCode": "3E15A", + "EventCode": "0x3E15A", "EventName": "PM_MRK_ST_FIN", "BriefDescription": "The marked instruction was a store of any kind." }, { - "EventCode": "30068", + "EventCode": "0x30068", "EventName": "PM_L1_ICACHE_RELOADED_PREF", "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)." }, { - "EventCode": "301E4", + "EventCode": "0x301E4", "EventName": "PM_MRK_BR_MPRED_CMPL", "BriefDescription": "Marked Branch Mispredicted. Includes direction and target." }, { - "EventCode": "300F6", + "EventCode": "0x300F6", "EventName": "PM_LD_DEMAND_MISS_L1", "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish." }, { - "EventCode": "300FE", + "EventCode": "0x300FE", "EventName": "PM_DATA_FROM_L3MISS", "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss." }, { - "EventCode": "40012", + "EventCode": "0x40012", "EventName": "PM_L1_ICACHE_RELOADED_ALL", "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch." }, { - "EventCode": "40134", + "EventCode": "0x40134", "EventName": "PM_MRK_INST_TIMEO", "BriefDescription": "Marked instruction finish timeout (instruction was lost)." }, { - "EventCode": "4003C", - "EventName": "PM_DISP_STALL_HELD_SYNC_CYC", - "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch." - }, - { - "EventCode": "4505A", + "EventCode": "0x4505A", "EventName": "PM_SP_FLOP_CMPL", "BriefDescription": "Single Precision floating point instructions completed." }, { - "EventCode": "4D058", + "EventCode": "0x4D058", "EventName": "PM_VECTOR_FLOP_CMPL", "BriefDescription": "Vector floating point instructions completed." }, { - "EventCode": "4D05A", + "EventCode": "0x4D05A", "EventName": "PM_NON_MATH_FLOP_CMPL", "BriefDescription": "Non Math instructions completed." }, { - "EventCode": "401E0", + "EventCode": "0x401E0", "EventName": "PM_MRK_INST_CMPL", "BriefDescription": "marked instruction completed." }, { - "EventCode": "400FE", + "EventCode": "0x400FE", "EventName": "PM_DATA_FROM_MEMORY", "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json index b01141eeebee..843b51f531e9 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/memory.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json @@ -1,191 +1,186 @@ [ { - "EventCode": "1000A", + "EventCode": "0x1000A", "EventName": "PM_PMC3_REWIND", "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged." }, { - "EventCode": "1C040", + "EventCode": "0x1C040", "EventName": "PM_XFER_FROM_SRC_PMC1", "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads." }, { - "EventCode": "1C142", + "EventCode": "0x1C142", "EventName": "PM_MRK_XFER_FROM_SRC_PMC1", "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads." }, { - "EventCode": "1C144", + "EventCode": "0x1C144", "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1", "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]." }, { - "EventCode": "1C056", + "EventCode": "0x1C056", "EventName": "PM_DERAT_MISS_4K", "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "1C058", + "EventCode": "0x1C058", "EventName": "PM_DTLB_MISS_16G", "BriefDescription": "Data TLB reload (after a miss) page size 16G. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "1C05C", + "EventCode": "0x1C05C", "EventName": "PM_DTLB_MISS_2M", "BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "1E056", + "EventCode": "0x1E056", "EventName": "PM_EXEC_STALL_STORE_PIPE", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions." }, { - "EventCode": "1F150", + "EventCode": "0x1F150", "EventName": "PM_MRK_ST_L2_CYC", "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion." }, { - "EventCode": "10062", + "EventCode": "0x10062", "EventName": "PM_LD_L3MISS_PEND_CYC", "BriefDescription": "Cycles L3 miss was pending for this thread." }, { - "EventCode": "20010", + "EventCode": "0x20010", "EventName": "PM_PMC1_OVERFLOW", "BriefDescription": "The event selected for PMC1 caused the event counter to overflow." }, { - "EventCode": "2001A", + "EventCode": "0x2001A", "EventName": "PM_ITLB_HIT", "BriefDescription": "The PTE required to translate the instruction address was resident in the TLB (instruction TLB access/IERAT reload). Applies to both HPT and RPT. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches." }, { - "EventCode": "2003E", + "EventCode": "0x2003E", "EventName": "PM_PTESYNC_FIN", "BriefDescription": "Ptesync instruction finished in the store unit. Only one ptesync can finish at a time." }, { - "EventCode": "2C040", + "EventCode": "0x2C040", "EventName": "PM_XFER_FROM_SRC_PMC2", "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads." }, { - "EventCode": "2C054", + "EventCode": "0x2C054", "EventName": "PM_DERAT_MISS_64K", "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "2C056", + "EventCode": "0x2C056", "EventName": "PM_DTLB_MISS_4K", "BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "2D154", + "EventCode": "0x2D154", "EventName": "PM_MRK_DERAT_MISS_64K", "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "200F6", + "EventCode": "0x200F6", "EventName": "PM_DERAT_MISS", "BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "3000A", - "EventName": "PM_DISP_STALL_ITLB_MISS", - "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss." - }, - { - "EventCode": "30016", + "EventCode": "0x30016", "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS", "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve." }, { - "EventCode": "3C040", + "EventCode": "0x3C040", "EventName": "PM_XFER_FROM_SRC_PMC3", "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads." }, { - "EventCode": "3C142", + "EventCode": "0x3C142", "EventName": "PM_MRK_XFER_FROM_SRC_PMC3", "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads." }, { - "EventCode": "3C144", + "EventCode": "0x3C144", "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3", "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]." }, { - "EventCode": "3C054", + "EventCode": "0x3C054", "EventName": "PM_DERAT_MISS_16M", "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "3C056", + "EventCode": "0x3C056", "EventName": "PM_DTLB_MISS_64K", "BriefDescription": "Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "3C058", + "EventCode": "0x3C058", "EventName": "PM_LARX_FIN", "BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock." }, { - "EventCode": "301E2", + "EventCode": "0x301E2", "EventName": "PM_MRK_ST_CMPL", "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores." }, { - "EventCode": "300FC", + "EventCode": "0x300FC", "EventName": "PM_DTLB_MISS", "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity." }, { - "EventCode": "4D02C", + "EventCode": "0x4D02C", "EventName": "PM_PMC1_REWIND", "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged." }, { - "EventCode": "4003E", + "EventCode": "0x4003E", "EventName": "PM_LD_CMPL", "BriefDescription": "Loads completed." }, { - "EventCode": "4C040", + "EventCode": "0x4C040", "EventName": "PM_XFER_FROM_SRC_PMC4", "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads." }, { - "EventCode": "4C142", + "EventCode": "0x4C142", "EventName": "PM_MRK_XFER_FROM_SRC_PMC4", "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads." }, { - "EventCode": "4C144", + "EventCode": "0x4C144", "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4", "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]." }, { - "EventCode": "4C056", + "EventCode": "0x4C056", "EventName": "PM_DTLB_MISS_16M", "BriefDescription": "Data TLB reload (after a miss) page size 16M. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "4C05A", + "EventCode": "0x4C05A", "EventName": "PM_DTLB_MISS_1G", "BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "4C15E", + "EventCode": "0x4C15E", "EventName": "PM_MRK_DTLB_MISS_64K", "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "4D056", + "EventCode": "0x4D056", "EventName": "PM_NON_FMA_FLOP_CMPL", "BriefDescription": "Non FMA instruction completed." }, { - "EventCode": "40164", + "EventCode": "0x40164", "EventName": "PM_MRK_DERAT_MISS_2M", "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json index a119e56cbf1c..7d0de1a2860b 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/others.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json @@ -1,296 +1,271 @@ [ { - "EventCode": "10016", + "EventCode": "0x10016", "EventName": "PM_VSU0_ISSUE", "BriefDescription": "VSU instructions issued to VSU pipe 0." }, { - "EventCode": "1001C", + "EventCode": "0x1001C", "EventName": "PM_ULTRAVISOR_INST_CMPL", "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state." }, { - "EventCode": "100F0", + "EventCode": "0x100F0", "EventName": "PM_CYC", "BriefDescription": "Processor cycles." }, { - "EventCode": "10134", + "EventCode": "0x10134", "EventName": "PM_MRK_ST_DONE_L2", "BriefDescription": "Marked stores completed in L2 (RC machine done)." }, { - "EventCode": "1505E", + "EventCode": "0x1505E", "EventName": "PM_LD_HIT_L1", "BriefDescription": "Loads that finished without experiencing an L1 miss." }, { - "EventCode": "1D05E", - "EventName": "PM_DISP_STALL_HELD_HALT_CYC", - "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management." - }, - { - "EventCode": "1E054", - "EventName": "PM_EXEC_STALL_DMISS_L21_L31", - "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip." - }, - { - "EventCode": "1E05A", - "EventName": "PM_CMPL_STALL_LWSYNC", - "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete." - }, - { - "EventCode": "1F056", + "EventCode": "0x1F056", "EventName": "PM_DISP_SS0_2_INSTR_CYC", "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions." }, { - "EventCode": "1F15C", + "EventCode": "0x1F15C", "EventName": "PM_MRK_STCX_L2_CYC", "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)." }, { - "EventCode": "10066", + "EventCode": "0x10066", "EventName": "PM_ADJUNCT_CYC", "BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011." }, { - "EventCode": "101E4", + "EventCode": "0x101E4", "EventName": "PM_MRK_L1_ICACHE_MISS", "BriefDescription": "Marked Instruction suffered an icache Miss." }, { - "EventCode": "101EA", + "EventCode": "0x101EA", "EventName": "PM_MRK_L1_RELOAD_VALID", "BriefDescription": "Marked demand reload." }, { - "EventCode": "100F4", + "EventCode": "0x100F4", "EventName": "PM_FLOP_CMPL", "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops." }, { - "EventCode": "100FA", + "EventCode": "0x100FA", "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC", "BriefDescription": "Cycles when at least one thread has the run latch set." }, { - "EventCode": "100FC", + "EventCode": "0x100FC", "EventName": "PM_LD_REF_L1", "BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included." }, { - "EventCode": "20006", - "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC", - "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue." - }, - { - "EventCode": "2000C", + "EventCode": "0x2000C", "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC", "BriefDescription": "Cycles when the run latch is set for all threads." }, { - "EventCode": "2E010", + "EventCode": "0x2E010", "EventName": "PM_ADJUNCT_INST_CMPL", "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state." }, { - "EventCode": "2E014", + "EventCode": "0x2E014", "EventName": "PM_STCX_FIN", "BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock." }, { - "EventCode": "20130", + "EventCode": "0x20130", "EventName": "PM_MRK_INST_DECODED", "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only." }, { - "EventCode": "20132", + "EventCode": "0x20132", "EventName": "PM_MRK_DFU_ISSUE", "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time." }, { - "EventCode": "20134", + "EventCode": "0x20134", "EventName": "PM_MRK_FXU_ISSUE", "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time." }, { - "EventCode": "2505C", + "EventCode": "0x2505C", "EventName": "PM_VSU_ISSUE", "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations." }, { - "EventCode": "2F054", + "EventCode": "0x2F054", "EventName": "PM_DISP_SS1_2_INSTR_CYC", "BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions." }, { - "EventCode": "2F056", + "EventCode": "0x2F056", "EventName": "PM_DISP_SS1_4_INSTR_CYC", "BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions." }, { - "EventCode": "2006C", + "EventCode": "0x2006C", "EventName": "PM_RUN_CYC_SMT4_MODE", "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode." }, { - "EventCode": "201E0", + "EventCode": "0x201E0", "EventName": "PM_MRK_DATA_FROM_MEMORY", "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load." }, { - "EventCode": "201E4", + "EventCode": "0x201E4", "EventName": "PM_MRK_DATA_FROM_L3MISS", "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load." }, { - "EventCode": "201E8", + "EventCode": "0x201E8", "EventName": "PM_THRESH_EXC_512", "BriefDescription": "Threshold counter exceeded a value of 512." }, { - "EventCode": "200F2", + "EventCode": "0x200F2", "EventName": "PM_INST_DISP", "BriefDescription": "PowerPC instructions dispatched." }, { - "EventCode": "30132", + "EventCode": "0x30132", "EventName": "PM_MRK_VSU_FIN", "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit." }, { - "EventCode": "30038", + "EventCode": "0x30038", "EventName": "PM_EXEC_STALL_DMISS_LMEM", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory." }, { - "EventCode": "3F04A", + "EventCode": "0x3F04A", "EventName": "PM_LSU_ST5_FIN", "BriefDescription": "LSU Finished an internal operation in ST2 port." }, { - "EventCode": "34054", - "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT", - "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict." - }, - { - "EventCode": "3405A", + "EventCode": "0x3405A", "EventName": "PM_PRIVILEGED_INST_CMPL", "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state." }, { - "EventCode": "3F150", + "EventCode": "0x3F150", "EventName": "PM_MRK_ST_DRAIN_CYC", "BriefDescription": "cycles to drain st from core to L2." }, { - "EventCode": "3F054", + "EventCode": "0x3F054", "EventName": "PM_DISP_SS0_4_INSTR_CYC", "BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions." }, { - "EventCode": "3F056", + "EventCode": "0x3F056", "EventName": "PM_DISP_SS0_8_INSTR_CYC", "BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions." }, { - "EventCode": "30162", + "EventCode": "0x30162", "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD", "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill." }, { - "EventCode": "40114", + "EventCode": "0x40114", "EventName": "PM_MRK_START_PROBE_NOP_DISP", "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0." }, { - "EventCode": "4001C", + "EventCode": "0x4001C", "EventName": "PM_VSU_FIN", "BriefDescription": "VSU instructions finished." }, { - "EventCode": "4C01A", + "EventCode": "0x4C01A", "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip." }, { - "EventCode": "4D012", + "EventCode": "0x4D012", "EventName": "PM_PMC3_SAVED", "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged." }, { - "EventCode": "4D022", + "EventCode": "0x4D022", "EventName": "PM_HYPERVISOR_INST_CMPL", "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state." }, { - "EventCode": "4D026", + "EventCode": "0x4D026", "EventName": "PM_ULTRAVISOR_CYC", "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110." }, { - "EventCode": "4D028", + "EventCode": "0x4D028", "EventName": "PM_PRIVILEGED_CYC", "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00." }, { - "EventCode": "40030", + "EventCode": "0x40030", "EventName": "PM_INST_FIN", "BriefDescription": "Instructions finished." }, { - "EventCode": "44146", + "EventCode": "0x44146", "EventName": "PM_MRK_STCX_CORE_CYC", "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2." }, { - "EventCode": "44054", + "EventCode": "0x44054", "EventName": "PM_VECTOR_LD_CMPL", "BriefDescription": "Vector load instructions completed." }, { - "EventCode": "45054", + "EventCode": "0x45054", "EventName": "PM_FMA_CMPL", "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only." }, { - "EventCode": "45056", + "EventCode": "0x45056", "EventName": "PM_SCALAR_FLOP_CMPL", "BriefDescription": "Scalar floating point instructions completed." }, { - "EventCode": "4505C", + "EventCode": "0x4505C", "EventName": "PM_MATH_FLOP_CMPL", "BriefDescription": "Math floating point instructions completed." }, { - "EventCode": "4D05E", + "EventCode": "0x4D05E", "EventName": "PM_BR_CMPL", "BriefDescription": "A branch completed. All branches are included." }, { - "EventCode": "4E15E", + "EventCode": "0x4E15E", "EventName": "PM_MRK_INST_FLUSHED", "BriefDescription": "The marked instruction was flushed." }, { - "EventCode": "401E6", + "EventCode": "0x401E6", "EventName": "PM_MRK_INST_FROM_L3MISS", "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction." }, { - "EventCode": "401E8", + "EventCode": "0x401E8", "EventName": "PM_MRK_DATA_FROM_L2MISS", "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load." }, { - "EventCode": "400F0", + "EventCode": "0x400F0", "EventName": "PM_LD_DEMAND_MISS_L1_FIN", "BriefDescription": "Load Missed L1, counted at finish time." }, { - "EventCode": "400FA", + "EventCode": "0x400FA", "EventName": "PM_RUN_INST_CMPL", "BriefDescription": "Completed PowerPC instructions gated by the run latch." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json index b61b5cc157ee..b8aded6045fa 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json @@ -1,296 +1,291 @@ [ { - "EventCode": "100FE", + "EventCode": "0x100FE", "EventName": "PM_INST_CMPL", "BriefDescription": "PowerPC instructions completed." }, { - "EventCode": "10006", - "EventName": "PM_DISP_STALL_HELD_OTHER_CYC", - "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason." - }, - { - "EventCode": "1000C", + "EventCode": "0x1000C", "EventName": "PM_LSU_LD0_FIN", "BriefDescription": "LSU Finished an internal operation in LD0 port." }, { - "EventCode": "1000E", + "EventCode": "0x1000E", "EventName": "PM_MMA_ISSUED", "BriefDescription": "MMA instructions issued." }, { - "EventCode": "10012", + "EventCode": "0x10012", "EventName": "PM_LSU_ST0_FIN", "BriefDescription": "LSU Finished an internal operation in ST0 port." }, { - "EventCode": "10014", + "EventCode": "0x10014", "EventName": "PM_LSU_ST4_FIN", "BriefDescription": "LSU Finished an internal operation in ST4 port." }, { - "EventCode": "10018", + "EventCode": "0x10018", "EventName": "PM_IC_DEMAND_CYC", "BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss." }, { - "EventCode": "10022", + "EventCode": "0x10022", "EventName": "PM_PMC2_SAVED", "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged." }, { - "EventCode": "10024", + "EventCode": "0x10024", "EventName": "PM_PMC5_OVERFLOW", "BriefDescription": "The event selected for PMC5 caused the event counter to overflow." }, { - "EventCode": "10058", + "EventCode": "0x10058", "EventName": "PM_EXEC_STALL_FIN_AT_DISP", "BriefDescription": "Cycles in which the oldest instruction in the pipeline finished at dispatch and did not require execution in the LSU, BRU or VSU." }, { - "EventCode": "1005A", + "EventCode": "0x1005A", "EventName": "PM_FLUSH_MPRED", "BriefDescription": "A flush occurred due to a mispredicted branch. Includes target and direction." }, { - "EventCode": "1C05A", + "EventCode": "0x1C05A", "EventName": "PM_DERAT_MISS_2M", "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches." }, { - "EventCode": "10064", - "EventName": "PM_DISP_STALL_IC_L2", - "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2." + "EventCode": "0x1E05A", + "EventName": "PM_CMPL_STALL_LWSYNC", + "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete." }, { - "EventCode": "10068", + "EventCode": "0x10068", "EventName": "PM_BR_FIN", "BriefDescription": "A branch instruction finished. Includes predicted/mispredicted/unconditional." }, { - "EventCode": "1006A", + "EventCode": "0x1006A", "EventName": "PM_FX_LSU_FIN", "BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time." }, { - "EventCode": "1006C", + "EventCode": "0x1006C", "EventName": "PM_RUN_CYC_ST_MODE", "BriefDescription": "Cycles when the run latch is set and the core is in ST mode." }, { - "EventCode": "20004", + "EventCode": "0x20004", "EventName": "PM_ISSUE_STALL", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet." }, { - "EventCode": "2000A", + "EventCode": "0x2000A", "EventName": "PM_HYPERVISOR_CYC", "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010." }, { - "EventCode": "2000E", + "EventCode": "0x2000E", "EventName": "PM_LSU_LD1_FIN", "BriefDescription": "LSU Finished an internal operation in LD1 port." }, { - "EventCode": "2C014", + "EventCode": "0x2C014", "EventName": "PM_CMPL_STALL_SPECIAL", "BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing." }, { - "EventCode": "2C018", + "EventCode": "0x2C018", "EventName": "PM_EXEC_STALL_DMISS_L3MISS", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3." }, { - "EventCode": "2D010", + "EventCode": "0x2D010", "EventName": "PM_LSU_ST1_FIN", "BriefDescription": "LSU Finished an internal operation in ST1 port." }, { - "EventCode": "2D012", + "EventCode": "0x2D012", "EventName": "PM_VSU1_ISSUE", "BriefDescription": "VSU instructions issued to VSU pipe 1." }, { - "EventCode": "2D018", + "EventCode": "0x2D018", "EventName": "PM_EXEC_STALL_VSU", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)." }, { - "EventCode": "2E01E", + "EventCode": "0x2D01C", + "EventName": "PM_CMPL_STALL_STCX", + "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing." + }, + { + "EventCode": "0x2E01E", "EventName": "PM_EXEC_STALL_NTC_FLUSH", - "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children." + "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch." }, { - "EventCode": "2013C", + "EventCode": "0x2013C", "EventName": "PM_MRK_FX_LSU_FIN", "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time." }, { - "EventCode": "2405A", + "EventCode": "0x2405A", "EventName": "PM_NTC_FIN", "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status." }, { - "EventCode": "201E2", + "EventCode": "0x201E2", "EventName": "PM_MRK_LD_MISS_L1", "BriefDescription": "Marked DL1 Demand Miss counted at finish time." }, { - "EventCode": "200F4", + "EventCode": "0x200F4", "EventName": "PM_RUN_CYC", "BriefDescription": "Processor cycles gated by the run latch." }, { - "EventCode": "30004", - "EventName": "PM_DISP_STALL_FLUSH", - "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC." - }, - { - "EventCode": "30008", + "EventCode": "0x30008", "EventName": "PM_EXEC_STALL", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category." }, { - "EventCode": "3001A", + "EventCode": "0x3001A", "EventName": "PM_LSU_ST2_FIN", "BriefDescription": "LSU Finished an internal operation in ST2 port." }, { - "EventCode": "30020", + "EventCode": "0x30020", "EventName": "PM_PMC2_REWIND", "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged." }, { - "EventCode": "30022", + "EventCode": "0x30022", "EventName": "PM_PMC4_SAVED", "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged." }, { - "EventCode": "30024", + "EventCode": "0x30024", "EventName": "PM_PMC6_OVERFLOW", "BriefDescription": "The event selected for PMC6 caused the event counter to overflow." }, { - "EventCode": "30028", + "EventCode": "0x30028", "EventName": "PM_CMPL_STALL_MEM_ECC", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC." }, { - "EventCode": "30036", + "EventCode": "0x30036", "EventName": "PM_EXEC_STALL_SIMPLE_FX", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit." }, { - "EventCode": "3003A", + "EventCode": "0x3003A", "EventName": "PM_CMPL_STALL_EXCEPTION", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete." }, { - "EventCode": "3F044", + "EventCode": "0x3F044", "EventName": "PM_VSU2_ISSUE", "BriefDescription": "VSU instructions issued to VSU pipe 2." }, { - "EventCode": "30058", + "EventCode": "0x30058", "EventName": "PM_TLBIE_FIN", "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted." }, { - "EventCode": "3D058", + "EventCode": "0x3D058", "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE", "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)." }, { - "EventCode": "30066", + "EventCode": "0x30066", "EventName": "PM_LSU_FIN", "BriefDescription": "LSU Finished an internal operation (up to 4 per cycle)." }, { - "EventCode": "40004", + "EventCode": "0x40004", "EventName": "PM_FXU_ISSUE", "BriefDescription": "A fixed point instruction was issued to the VSU." }, { - "EventCode": "40008", + "EventCode": "0x40008", "EventName": "PM_NTC_ALL_FIN", "BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline." }, { - "EventCode": "40010", + "EventCode": "0x40010", "EventName": "PM_PMC3_OVERFLOW", "BriefDescription": "The event selected for PMC3 caused the event counter to overflow." }, { - "EventCode": "4C012", + "EventCode": "0x4C012", "EventName": "PM_EXEC_STALL_DERAT_ONLY_MISS", "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve." }, { - "EventCode": "4C018", + "EventCode": "0x4C018", "EventName": "PM_CMPL_STALL", "BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason." }, { - "EventCode": "4C01E", + "EventCode": "0x4C01E", "EventName": "PM_LSU_ST3_FIN", "BriefDescription": "LSU Finished an internal operation in ST3 port." }, { - "EventCode": "4D018", + "EventCode": "0x4D018", "EventName": "PM_EXEC_STALL_BRU", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Branch unit." }, { - "EventCode": "4D01A", + "EventCode": "0x4D01A", "EventName": "PM_CMPL_STALL_HWSYNC", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a hwsync waiting for response from L2 before completing." }, { - "EventCode": "4D01C", + "EventCode": "0x4D01C", "EventName": "PM_EXEC_STALL_TLBIEL", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest." }, { - "EventCode": "4E012", + "EventCode": "0x4E012", "EventName": "PM_EXEC_STALL_UNKNOWN", "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together." }, { - "EventCode": "4D020", + "EventCode": "0x4D020", "EventName": "PM_VSU3_ISSUE", "BriefDescription": "VSU instruction was issued to VSU pipe 3." }, { - "EventCode": "40132", + "EventCode": "0x40132", "EventName": "PM_MRK_LSU_FIN", "BriefDescription": "LSU marked instruction finish." }, { - "EventCode": "45058", + "EventCode": "0x45058", "EventName": "PM_IC_MISS_CMPL", "BriefDescription": "Non-speculative icache miss, counted at completion." }, { - "EventCode": "4D050", + "EventCode": "0x4D050", "EventName": "PM_VSU_NON_FLOP_CMPL", "BriefDescription": "Non-floating point VSU instructions completed." }, { - "EventCode": "4D052", + "EventCode": "0x4D052", "EventName": "PM_2FLOP_CMPL", "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed." }, { - "EventCode": "400F2", + "EventCode": "0x400F2", "EventName": "PM_1PLUS_PPC_DISP", "BriefDescription": "Cycles at least one Instr Dispatched." }, { - "EventCode": "400F8", + "EventCode": "0x400F8", "EventName": "PM_FLUSH", "BriefDescription": "Flush (any type)." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json index ea122a91ceb0..b5d1bd39cfb2 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json @@ -1,21 +1,21 @@ [ { - "EventCode": "301E8", + "EventCode": "0x301E8", "EventName": "PM_THRESH_EXC_64", "BriefDescription": "Threshold counter exceeded a value of 64." }, { - "EventCode": "45050", + "EventCode": "0x45050", "EventName": "PM_1FLOP_CMPL", "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)." }, { - "EventCode": "45052", + "EventCode": "0x45052", "EventName": "PM_4FLOP_CMPL", "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)." }, { - "EventCode": "4D054", + "EventCode": "0x4D054", "EventName": "PM_8FLOP_CMPL", "BriefDescription": "Four Double Precision vector instructions completed." } diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json index 5a714e3dd71a..db3766dca07c 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/translation.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json @@ -1,56 +1,56 @@ [ { - "EventCode": "1F15E", + "EventCode": "0x1F15E", "EventName": "PM_MRK_START_PROBE_NOP_CMPL", "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed." }, { - "EventCode": "20016", + "EventCode": "0x20016", "EventName": "PM_ST_FIN", "BriefDescription": "Store finish count. Includes speculative activity." }, { - "EventCode": "20018", + "EventCode": "0x20018", "EventName": "PM_ST_FWD", "BriefDescription": "Store forwards that finished." }, { - "EventCode": "2011C", + "EventCode": "0x2011C", "EventName": "PM_MRK_NTF_CYC", "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)." }, { - "EventCode": "2E01C", + "EventCode": "0x2E01C", "EventName": "PM_EXEC_STALL_TLBIE", "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit." }, { - "EventCode": "201E6", + "EventCode": "0x201E6", "EventName": "PM_THRESH_EXC_32", "BriefDescription": "Threshold counter exceeded a value of 32." }, { - "EventCode": "200F0", + "EventCode": "0x200F0", "EventName": "PM_ST_CMPL", "BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)." }, { - "EventCode": "200FE", + "EventCode": "0x200FE", "EventName": "PM_DATA_FROM_L2MISS", "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss." }, { - "EventCode": "30010", + "EventCode": "0x30010", "EventName": "PM_PMC2_OVERFLOW", "BriefDescription": "The event selected for PMC2 caused the event counter to overflow." }, { - "EventCode": "4D010", + "EventCode": "0x4D010", "EventName": "PM_PMC1_SAVED", "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged." }, { - "EventCode": "4D05C", + "EventCode": "0x4D05C", "EventName": "PM_DPP_FLOP_CMPL", "BriefDescription": "Double-Precision or Quad-Precision instructions completed." } diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c index 7422b0ea8790..9604446f8360 100644 --- a/tools/perf/pmu-events/jevents.c +++ b/tools/perf/pmu-events/jevents.c @@ -960,7 +960,7 @@ static int get_maxfds(void) struct rlimit rlim; if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) - return min((int)rlim.rlim_max / 2, 512); + return min(rlim.rlim_max / 2, (rlim_t)512); return 512; } diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record index 4a7b8deef3fd..8c10955eff93 100644 --- a/tools/perf/tests/attr/base-record +++ b/tools/perf/tests/attr/base-record @@ -16,7 +16,7 @@ pinned=0 exclusive=0 exclude_user=0 exclude_kernel=0|1 -exclude_hv=0 +exclude_hv=0|1 exclude_idle=0 mmap=1 comm=1 diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index ddb52f748c8e..5ed674a2f55e 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -451,10 +451,10 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd, goto out; } - err = -1; link = bpf_program__attach(skel->progs.on_switch); - if (!link) { + if (IS_ERR(link)) { pr_err("Failed to attach leader program\n"); + err = PTR_ERR(link); goto out; } @@ -521,9 +521,10 @@ static int bperf__load(struct evsel *evsel, struct target *target) evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id); if (evsel->bperf_leader_link_fd < 0 && - bperf_reload_leader_program(evsel, attr_map_fd, &entry)) + bperf_reload_leader_program(evsel, attr_map_fd, &entry)) { + err = -1; goto out; - + } /* * The bpf_link holds reference to the leader program, and the * leader program holds reference to the maps. Therefore, if @@ -550,6 +551,7 @@ static int bperf__load(struct evsel *evsel, struct target *target) /* Step 2: load the follower skeleton */ evsel->follower_skel = bperf_follower_bpf__open(); if (!evsel->follower_skel) { + err = -1; pr_err("Failed to open follower skeleton\n"); goto out; } diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index b2f4920e19a6..7d2ba8419b0c 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) if ((tag == DW_TAG_formal_parameter || tag == DW_TAG_variable) && die_compare_name(die_mem, fvp->name) && - /* Does the DIE have location information or external instance? */ + /* + * Does the DIE have location information or const value + * or external instance? + */ (dwarf_attr(die_mem, DW_AT_external, &attr) || - dwarf_attr(die_mem, DW_AT_location, &attr))) + dwarf_attr(die_mem, DW_AT_location, &attr) || + dwarf_attr(die_mem, DW_AT_const_value, &attr))) return DIE_FIND_CB_END; if (dwarf_haspc(die_mem, fvp->addr)) return DIE_FIND_CB_CONTINUE; diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index 9130f6fad8d5..bc5e4f294e9e 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -144,6 +144,7 @@ static void perf_env__purge_bpf(struct perf_env *env) node = rb_entry(next, struct bpf_prog_info_node, rb_node); next = rb_next(&node->rb_node); rb_erase(&node->rb_node, root); + free(node->info_linear); free(node); } diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 4a3cd1b5bb33..a8d8463f8ee5 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -428,6 +428,7 @@ struct evsel *evsel__clone(struct evsel *orig) evsel->auto_merge_stats = orig->auto_merge_stats; evsel->collect_stat = orig->collect_stat; evsel->weak_group = orig->weak_group; + evsel->use_config_name = orig->use_config_name; if (evsel__copy_config_terms(evsel, orig) < 0) goto out_err; diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 75cf5dbfe208..bdad52a06438 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -83,8 +83,10 @@ struct evsel { bool collect_stat; bool weak_group; bool bpf_counter; + bool use_config_name; int bpf_fd; struct bpf_object *bpf_obj; + struct list_head config_terms; }; /* @@ -116,10 +118,8 @@ struct evsel { bool merged_stat; bool reset_group; bool errored; - bool use_config_name; struct hashmap *per_pkg_mask; struct evsel *leader; - struct list_head config_terms; int err; int cpu_iter; struct { diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c index 829af17a0867..020411682a3c 100644 --- a/tools/perf/util/perf_api_probe.c +++ b/tools/perf/util/perf_api_probe.c @@ -103,6 +103,11 @@ static void perf_probe_build_id(struct evsel *evsel) evsel->core.attr.build_id = 1; } +static void perf_probe_cgroup(struct evsel *evsel) +{ + evsel->core.attr.cgroup = 1; +} + bool perf_can_sample_identifier(void) { return perf_probe_api(perf_probe_sample_identifier); @@ -182,3 +187,8 @@ bool perf_can_record_build_id(void) { return perf_probe_api(perf_probe_build_id); } + +bool perf_can_record_cgroup(void) +{ + return perf_probe_api(perf_probe_cgroup); +} diff --git a/tools/perf/util/perf_api_probe.h b/tools/perf/util/perf_api_probe.h index f12ca55f509a..b104168efb15 100644 --- a/tools/perf/util/perf_api_probe.h +++ b/tools/perf/util/perf_api_probe.h @@ -12,5 +12,6 @@ bool perf_can_record_switch_events(void); bool perf_can_record_text_poke_events(void); bool perf_can_sample_identifier(void); bool perf_can_record_build_id(void); +bool perf_can_record_cgroup(void); #endif // __PERF_API_PROBE_H diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 866f2d514d72..b029c29ce227 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, immediate_value_is_supported()) { Dwarf_Sword snum; + if (!tvar) + return 0; + dwarf_formsdata(&attr, &snum); ret = asprintf(&tvar->value, "\\%ld", (long)snum); diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index a76fff5e7d83..ca326f98c7a2 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -541,7 +541,7 @@ static void uniquify_event_name(struct evsel *counter) char *config; int ret = 0; - if (counter->uniquified_name || + if (counter->uniquified_name || counter->use_config_name || !counter->pmu_name || !strncmp(counter->name, counter->pmu_name, strlen(counter->pmu_name))) return; @@ -555,10 +555,8 @@ static void uniquify_event_name(struct evsel *counter) } } else { if (perf_pmu__has_hybrid()) { - if (!counter->use_config_name) { - ret = asprintf(&new_name, "%s/%s/", - counter->pmu_name, counter->name); - } + ret = asprintf(&new_name, "%s/%s/", + counter->pmu_name, counter->name); } else { ret = asprintf(&new_name, "%s [%s]", counter->name, counter->pmu_name); diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 4c56aa837434..a73345730ba9 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -2412,6 +2412,7 @@ int cleanup_sdt_note_list(struct list_head *sdt_notes) list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) { list_del_init(&pos->note_list); + zfree(&pos->args); zfree(&pos->name); zfree(&pos->provider); free(pos); diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index bd83158e0e0b..524c857a049c 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -41,5 +41,6 @@ /kvm_create_max_vcpus /kvm_page_table_test /memslot_modification_stress_test +/memslot_perf_test /set_memory_region_test /steal_time diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index e439d027939d..daaee1888b12 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -33,7 +33,7 @@ ifeq ($(ARCH),s390) UNAME_M := s390x endif -LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c +LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c @@ -74,6 +74,7 @@ TEST_GEN_PROGS_x86_64 += hardware_disable_test TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus TEST_GEN_PROGS_x86_64 += kvm_page_table_test TEST_GEN_PROGS_x86_64 += memslot_modification_stress_test +TEST_GEN_PROGS_x86_64 += memslot_perf_test TEST_GEN_PROGS_x86_64 += set_memory_region_test TEST_GEN_PROGS_x86_64 += steal_time diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c index 5f7a229c3af1..b74704305835 100644 --- a/tools/testing/selftests/kvm/demand_paging_test.c +++ b/tools/testing/selftests/kvm/demand_paging_test.c @@ -9,6 +9,7 @@ #define _GNU_SOURCE /* for pipe2 */ +#include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <time.h> @@ -38,6 +39,7 @@ static int nr_vcpus = 1; static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE; +static size_t demand_paging_size; static char *guest_data_prototype; static void *vcpu_worker(void *data) @@ -71,36 +73,51 @@ static void *vcpu_worker(void *data) return NULL; } -static int handle_uffd_page_request(int uffd, uint64_t addr) +static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t addr) { - pid_t tid; + pid_t tid = syscall(__NR_gettid); struct timespec start; struct timespec ts_diff; - struct uffdio_copy copy; int r; - tid = syscall(__NR_gettid); + clock_gettime(CLOCK_MONOTONIC, &start); - copy.src = (uint64_t)guest_data_prototype; - copy.dst = addr; - copy.len = perf_test_args.host_page_size; - copy.mode = 0; + if (uffd_mode == UFFDIO_REGISTER_MODE_MISSING) { + struct uffdio_copy copy; - clock_gettime(CLOCK_MONOTONIC, &start); + copy.src = (uint64_t)guest_data_prototype; + copy.dst = addr; + copy.len = demand_paging_size; + copy.mode = 0; - r = ioctl(uffd, UFFDIO_COPY, ©); - if (r == -1) { - pr_info("Failed Paged in 0x%lx from thread %d with errno: %d\n", - addr, tid, errno); - return r; + r = ioctl(uffd, UFFDIO_COPY, ©); + if (r == -1) { + pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d with errno: %d\n", + addr, tid, errno); + return r; + } + } else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) { + struct uffdio_continue cont = {0}; + + cont.range.start = addr; + cont.range.len = demand_paging_size; + + r = ioctl(uffd, UFFDIO_CONTINUE, &cont); + if (r == -1) { + pr_info("Failed UFFDIO_CONTINUE in 0x%lx from thread %d with errno: %d\n", + addr, tid, errno); + return r; + } + } else { + TEST_FAIL("Invalid uffd mode %d", uffd_mode); } ts_diff = timespec_elapsed(start); - PER_PAGE_DEBUG("UFFDIO_COPY %d \t%ld ns\n", tid, + PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid, timespec_to_ns(ts_diff)); PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n", - perf_test_args.host_page_size, addr, tid); + demand_paging_size, addr, tid); return 0; } @@ -108,6 +125,7 @@ static int handle_uffd_page_request(int uffd, uint64_t addr) bool quit_uffd_thread; struct uffd_handler_args { + int uffd_mode; int uffd; int pipefd; useconds_t delay; @@ -169,7 +187,7 @@ static void *uffd_handler_thread_fn(void *arg) if (r == -1) { if (errno == EAGAIN) continue; - pr_info("Read of uffd gor errno %d", errno); + pr_info("Read of uffd got errno %d\n", errno); return NULL; } @@ -184,7 +202,7 @@ static void *uffd_handler_thread_fn(void *arg) if (delay) usleep(delay); addr = msg.arg.pagefault.address; - r = handle_uffd_page_request(uffd, addr); + r = handle_uffd_page_request(uffd_args->uffd_mode, uffd, addr); if (r < 0) return NULL; pages++; @@ -198,43 +216,53 @@ static void *uffd_handler_thread_fn(void *arg) return NULL; } -static int setup_demand_paging(struct kvm_vm *vm, - pthread_t *uffd_handler_thread, int pipefd, - useconds_t uffd_delay, - struct uffd_handler_args *uffd_args, - void *hva, uint64_t len) +static void setup_demand_paging(struct kvm_vm *vm, + pthread_t *uffd_handler_thread, int pipefd, + int uffd_mode, useconds_t uffd_delay, + struct uffd_handler_args *uffd_args, + void *hva, void *alias, uint64_t len) { + bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR); int uffd; struct uffdio_api uffdio_api; struct uffdio_register uffdio_register; + uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY; - uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); - if (uffd == -1) { - pr_info("uffd creation failed\n"); - return -1; + PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n", + is_minor ? "MINOR" : "MISSING", + is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY"); + + /* In order to get minor faults, prefault via the alias. */ + if (is_minor) { + size_t p; + + expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE; + + TEST_ASSERT(alias != NULL, "Alias required for minor faults"); + for (p = 0; p < (len / demand_paging_size); ++p) { + memcpy(alias + (p * demand_paging_size), + guest_data_prototype, demand_paging_size); + } } + uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); + TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno); + uffdio_api.api = UFFD_API; uffdio_api.features = 0; - if (ioctl(uffd, UFFDIO_API, &uffdio_api) == -1) { - pr_info("ioctl uffdio_api failed\n"); - return -1; - } + TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1, + "ioctl UFFDIO_API failed: %" PRIu64, + (uint64_t)uffdio_api.api); uffdio_register.range.start = (uint64_t)hva; uffdio_register.range.len = len; - uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; - if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) == -1) { - pr_info("ioctl uffdio_register failed\n"); - return -1; - } - - if ((uffdio_register.ioctls & UFFD_API_RANGE_IOCTLS) != - UFFD_API_RANGE_IOCTLS) { - pr_info("unexpected userfaultfd ioctl set\n"); - return -1; - } + uffdio_register.mode = uffd_mode; + TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1, + "ioctl UFFDIO_REGISTER failed"); + TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) == + expected_ioctls, "missing userfaultfd ioctls"); + uffd_args->uffd_mode = uffd_mode; uffd_args->uffd = uffd; uffd_args->pipefd = pipefd; uffd_args->delay = uffd_delay; @@ -243,13 +271,12 @@ static int setup_demand_paging(struct kvm_vm *vm, PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n", hva, hva + len); - - return 0; } struct test_params { - bool use_uffd; + int uffd_mode; useconds_t uffd_delay; + enum vm_mem_backing_src_type src_type; bool partition_vcpu_memory_access; }; @@ -267,14 +294,16 @@ static void run_test(enum vm_guest_mode mode, void *arg) int r; vm = perf_test_create_vm(mode, nr_vcpus, guest_percpu_mem_size, - VM_MEM_SRC_ANONYMOUS); + p->src_type); perf_test_args.wr_fract = 1; - guest_data_prototype = malloc(perf_test_args.host_page_size); + demand_paging_size = get_backing_src_pagesz(p->src_type); + + guest_data_prototype = malloc(demand_paging_size); TEST_ASSERT(guest_data_prototype, "Failed to allocate buffer for guest data pattern"); - memset(guest_data_prototype, 0xAB, perf_test_args.host_page_size); + memset(guest_data_prototype, 0xAB, demand_paging_size); vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads)); TEST_ASSERT(vcpu_threads, "Memory allocation failed"); @@ -282,7 +311,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size, p->partition_vcpu_memory_access); - if (p->use_uffd) { + if (p->uffd_mode) { uffd_handler_threads = malloc(nr_vcpus * sizeof(*uffd_handler_threads)); TEST_ASSERT(uffd_handler_threads, "Memory allocation failed"); @@ -296,6 +325,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { vm_paddr_t vcpu_gpa; void *vcpu_hva; + void *vcpu_alias; uint64_t vcpu_mem_size; @@ -310,8 +340,9 @@ static void run_test(enum vm_guest_mode mode, void *arg) PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n", vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size); - /* Cache the HVA pointer of the region */ + /* Cache the host addresses of the region */ vcpu_hva = addr_gpa2hva(vm, vcpu_gpa); + vcpu_alias = addr_gpa2alias(vm, vcpu_gpa); /* * Set up user fault fd to handle demand paging @@ -321,13 +352,11 @@ static void run_test(enum vm_guest_mode mode, void *arg) O_CLOEXEC | O_NONBLOCK); TEST_ASSERT(!r, "Failed to set up pipefd"); - r = setup_demand_paging(vm, - &uffd_handler_threads[vcpu_id], - pipefds[vcpu_id * 2], - p->uffd_delay, &uffd_args[vcpu_id], - vcpu_hva, vcpu_mem_size); - if (r < 0) - exit(-r); + setup_demand_paging(vm, &uffd_handler_threads[vcpu_id], + pipefds[vcpu_id * 2], p->uffd_mode, + p->uffd_delay, &uffd_args[vcpu_id], + vcpu_hva, vcpu_alias, + vcpu_mem_size); } } @@ -355,7 +384,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) pr_info("All vCPU threads joined\n"); - if (p->use_uffd) { + if (p->uffd_mode) { char c; /* Tell the user fault fd handler threads to quit */ @@ -377,7 +406,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) free(guest_data_prototype); free(vcpu_threads); - if (p->use_uffd) { + if (p->uffd_mode) { free(uffd_handler_threads); free(uffd_args); free(pipefds); @@ -387,17 +416,19 @@ static void run_test(enum vm_guest_mode mode, void *arg) static void help(char *name) { puts(""); - printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n" - " [-b memory] [-v vcpus] [-o]\n", name); + printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-d uffd_delay_usec]\n" + " [-b memory] [-t type] [-v vcpus] [-o]\n", name); guest_modes_help(); - printf(" -u: use User Fault FD to handle vCPU page\n" - " faults.\n"); + printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n" + " UFFD registration mode: 'MISSING' or 'MINOR'.\n"); printf(" -d: add a delay in usec to the User Fault\n" " FD handler to simulate demand paging\n" " overheads. Ignored without -u.\n"); printf(" -b: specify the size of the memory region which should be\n" " demand paged by each vCPU. e.g. 10M or 3G.\n" " Default: 1G\n"); + printf(" -t: The type of backing memory to use. Default: anonymous\n"); + backing_src_help(); printf(" -v: specify the number of vCPUs to run.\n"); printf(" -o: Overlap guest memory accesses instead of partitioning\n" " them into a separate region of memory for each vCPU.\n"); @@ -409,19 +440,24 @@ int main(int argc, char *argv[]) { int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS); struct test_params p = { + .src_type = VM_MEM_SRC_ANONYMOUS, .partition_vcpu_memory_access = true, }; int opt; guest_modes_append_default(); - while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) { + while ((opt = getopt(argc, argv, "hm:u:d:b:t:v:o")) != -1) { switch (opt) { case 'm': guest_modes_cmdline(optarg); break; case 'u': - p.use_uffd = true; + if (!strcmp("MISSING", optarg)) + p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING; + else if (!strcmp("MINOR", optarg)) + p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR; + TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'."); break; case 'd': p.uffd_delay = strtoul(optarg, NULL, 0); @@ -430,6 +466,9 @@ int main(int argc, char *argv[]) case 'b': guest_percpu_mem_size = parse_size(optarg); break; + case 't': + p.src_type = parse_backing_src_type(optarg); + break; case 'v': nr_vcpus = atoi(optarg); TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus, @@ -445,6 +484,11 @@ int main(int argc, char *argv[]) } } + if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR && + !backing_src_is_shared(p.src_type)) { + TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -t"); + } + for_each_guest_mode(run_test, &p); return 0; diff --git a/tools/testing/selftests/kvm/hardware_disable_test.c b/tools/testing/selftests/kvm/hardware_disable_test.c index 5aadf84c91c0..4b8db3bce610 100644 --- a/tools/testing/selftests/kvm/hardware_disable_test.c +++ b/tools/testing/selftests/kvm/hardware_disable_test.c @@ -132,6 +132,36 @@ static void run_test(uint32_t run) TEST_ASSERT(false, "%s: [%d] child escaped the ninja\n", __func__, run); } +void wait_for_child_setup(pid_t pid) +{ + /* + * Wait for the child to post to the semaphore, but wake up periodically + * to check if the child exited prematurely. + */ + for (;;) { + const struct timespec wait_period = { .tv_sec = 1 }; + int status; + + if (!sem_timedwait(sem, &wait_period)) + return; + + /* Child is still running, keep waiting. */ + if (pid != waitpid(pid, &status, WNOHANG)) + continue; + + /* + * Child is no longer running, which is not expected. + * + * If it exited with a non-zero status, we explicitly forward + * the child's status in case it exited with KSFT_SKIP. + */ + if (WIFEXITED(status)) + exit(WEXITSTATUS(status)); + else + TEST_ASSERT(false, "Child exited unexpectedly"); + } +} + int main(int argc, char **argv) { uint32_t i; @@ -148,7 +178,7 @@ int main(int argc, char **argv) run_test(i); /* This function always exits */ pr_debug("%s: [%d] waiting semaphore\n", __func__, i); - sem_wait(sem); + wait_for_child_setup(pid); r = (rand() % DELAY_US_MAX) + 1; pr_debug("%s: [%d] waiting %dus\n", __func__, i, r); usleep(r); diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index a8f022794ce3..fcd8e3855111 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -77,6 +77,7 @@ struct vm_guest_mode_params { }; extern const struct vm_guest_mode_params vm_guest_mode_params[]; +int open_kvm_dev_path_or_exit(void); int kvm_check_cap(long cap); int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap); int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id, @@ -146,6 +147,7 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa); void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva); vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva); +void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa); /* * Address Guest Virtual to Guest Physical @@ -302,7 +304,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm); unsigned int vm_get_page_size(struct kvm_vm *vm); unsigned int vm_get_page_shift(struct kvm_vm *vm); -unsigned int vm_get_max_gfn(struct kvm_vm *vm); +uint64_t vm_get_max_gfn(struct kvm_vm *vm); int vm_get_fd(struct kvm_vm *vm); unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size); diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index fade3130eb01..d79be15dd3d2 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -17,6 +17,7 @@ #include <errno.h> #include <unistd.h> #include <fcntl.h> +#include <sys/mman.h> #include "kselftest.h" static inline int _no_printf(const char *format, ...) { return 0; } @@ -84,6 +85,8 @@ enum vm_mem_backing_src_type { VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB, VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB, VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB, + VM_MEM_SRC_SHMEM, + VM_MEM_SRC_SHARED_HUGETLB, NUM_SRC_TYPES, }; @@ -100,4 +103,13 @@ size_t get_backing_src_pagesz(uint32_t i); void backing_src_help(void); enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name); +/* + * Whether or not the given source type is shared memory (as opposed to + * anonymous). + */ +static inline bool backing_src_is_shared(enum vm_mem_backing_src_type t) +{ + return vm_mem_backing_src_alias(t)->flag & MAP_SHARED; +} + #endif /* SELFTEST_KVM_TEST_UTIL_H */ diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index fc83f6c5902d..28e528c19d28 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -32,6 +32,34 @@ static void *align(void *x, size_t size) } /* + * Open KVM_DEV_PATH if available, otherwise exit the entire program. + * + * Input Args: + * flags - The flags to pass when opening KVM_DEV_PATH. + * + * Return: + * The opened file descriptor of /dev/kvm. + */ +static int _open_kvm_dev_path_or_exit(int flags) +{ + int fd; + + fd = open(KVM_DEV_PATH, flags); + if (fd < 0) { + print_skip("%s not available, is KVM loaded? (errno: %d)", + KVM_DEV_PATH, errno); + exit(KSFT_SKIP); + } + + return fd; +} + +int open_kvm_dev_path_or_exit(void) +{ + return _open_kvm_dev_path_or_exit(O_RDONLY); +} + +/* * Capability * * Input Args: @@ -52,10 +80,7 @@ int kvm_check_cap(long cap) int ret; int kvm_fd; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); - + kvm_fd = open_kvm_dev_path_or_exit(); ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" " rc: %i errno: %i", ret, errno); @@ -128,9 +153,7 @@ void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) static void vm_open(struct kvm_vm *vm, int perm) { - vm->kvm_fd = open(KVM_DEV_PATH, perm); - if (vm->kvm_fd < 0) - exit(KSFT_SKIP); + vm->kvm_fd = _open_kvm_dev_path_or_exit(perm); if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) { print_skip("immediate_exit not available"); @@ -203,7 +226,9 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) TEST_ASSERT(vm != NULL, "Insufficient Memory"); INIT_LIST_HEAD(&vm->vcpus); - INIT_LIST_HEAD(&vm->userspace_mem_regions); + vm->regions.gpa_tree = RB_ROOT; + vm->regions.hva_tree = RB_ROOT; + hash_init(vm->regions.slot_hash); vm->mode = mode; vm->type = 0; @@ -295,7 +320,7 @@ struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, */ uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus; uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2; - uint64_t pages = DEFAULT_GUEST_PHY_PAGES + vcpu_pages + extra_pg_pages; + uint64_t pages = DEFAULT_GUEST_PHY_PAGES + extra_mem_pages + vcpu_pages + extra_pg_pages; struct kvm_vm *vm; int i; @@ -355,13 +380,14 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages, */ void kvm_vm_restart(struct kvm_vm *vmp, int perm) { + int ctr; struct userspace_mem_region *region; vm_open(vmp, perm); if (vmp->has_irqchip) vm_create_irqchip(vmp); - list_for_each_entry(region, &vmp->userspace_mem_regions, list) { + hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" " rc: %i errno: %i\n" @@ -424,14 +450,21 @@ uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) static struct userspace_mem_region * userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) { - struct userspace_mem_region *region; + struct rb_node *node; - list_for_each_entry(region, &vm->userspace_mem_regions, list) { + for (node = vm->regions.gpa_tree.rb_node; node; ) { + struct userspace_mem_region *region = + container_of(node, struct userspace_mem_region, gpa_node); uint64_t existing_start = region->region.guest_phys_addr; uint64_t existing_end = region->region.guest_phys_addr + region->region.memory_size - 1; if (start <= existing_end && end >= existing_start) return region; + + if (start < existing_start) + node = node->rb_left; + else + node = node->rb_right; } return NULL; @@ -546,11 +579,16 @@ void kvm_vm_release(struct kvm_vm *vmp) } static void __vm_mem_region_delete(struct kvm_vm *vm, - struct userspace_mem_region *region) + struct userspace_mem_region *region, + bool unlink) { int ret; - list_del(®ion->list); + if (unlink) { + rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); + rb_erase(®ion->hva_node, &vm->regions.hva_tree); + hash_del(®ion->slot_node); + } region->region.memory_size = 0; ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); @@ -569,14 +607,16 @@ static void __vm_mem_region_delete(struct kvm_vm *vm, */ void kvm_vm_free(struct kvm_vm *vmp) { - struct userspace_mem_region *region, *tmp; + int ctr; + struct hlist_node *node; + struct userspace_mem_region *region; if (vmp == NULL) return; /* Free userspace_mem_regions. */ - list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list) - __vm_mem_region_delete(vmp, region); + hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) + __vm_mem_region_delete(vmp, region, false); /* Free sparsebit arrays. */ sparsebit_free(&vmp->vpages_valid); @@ -658,13 +698,64 @@ int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) return 0; } +static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree, + struct userspace_mem_region *region) +{ + struct rb_node **cur, *parent; + + for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { + struct userspace_mem_region *cregion; + + cregion = container_of(*cur, typeof(*cregion), gpa_node); + parent = *cur; + if (region->region.guest_phys_addr < + cregion->region.guest_phys_addr) + cur = &(*cur)->rb_left; + else { + TEST_ASSERT(region->region.guest_phys_addr != + cregion->region.guest_phys_addr, + "Duplicate GPA in region tree"); + + cur = &(*cur)->rb_right; + } + } + + rb_link_node(®ion->gpa_node, parent, cur); + rb_insert_color(®ion->gpa_node, gpa_tree); +} + +static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree, + struct userspace_mem_region *region) +{ + struct rb_node **cur, *parent; + + for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { + struct userspace_mem_region *cregion; + + cregion = container_of(*cur, typeof(*cregion), hva_node); + parent = *cur; + if (region->host_mem < cregion->host_mem) + cur = &(*cur)->rb_left; + else { + TEST_ASSERT(region->host_mem != + cregion->host_mem, + "Duplicate HVA in region tree"); + + cur = &(*cur)->rb_right; + } + } + + rb_link_node(®ion->hva_node, parent, cur); + rb_insert_color(®ion->hva_node, hva_tree); +} + /* * VM Userspace Memory Region Add * * Input Args: * vm - Virtual Machine - * backing_src - Storage source for this region. - * NULL to use anonymous memory. + * src_type - Storage source for this region. + * NULL to use anonymous memory. * guest_paddr - Starting guest physical address * slot - KVM region slot * npages - Number of physical pages @@ -722,7 +813,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, (uint64_t) region->region.memory_size); /* Confirm no region with the requested slot already exists. */ - list_for_each_entry(region, &vm->userspace_mem_regions, list) { + hash_for_each_possible(vm->regions.slot_hash, region, slot_node, + slot) { if (region->region.slot != slot) continue; @@ -755,11 +847,30 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, if (alignment > 1) region->mmap_size += alignment; + region->fd = -1; + if (backing_src_is_shared(src_type)) { + int memfd_flags = MFD_CLOEXEC; + + if (src_type == VM_MEM_SRC_SHARED_HUGETLB) + memfd_flags |= MFD_HUGETLB; + + region->fd = memfd_create("kvm_selftest", memfd_flags); + TEST_ASSERT(region->fd != -1, + "memfd_create failed, errno: %i", errno); + + ret = ftruncate(region->fd, region->mmap_size); + TEST_ASSERT(ret == 0, "ftruncate failed, errno: %i", errno); + + ret = fallocate(region->fd, + FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, + region->mmap_size); + TEST_ASSERT(ret == 0, "fallocate failed, errno: %i", errno); + } + region->mmap_start = mmap(NULL, region->mmap_size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS - | vm_mem_backing_src_alias(src_type)->flag, - -1, 0); + vm_mem_backing_src_alias(src_type)->flag, + region->fd, 0); TEST_ASSERT(region->mmap_start != MAP_FAILED, "test_malloc failed, mmap_start: %p errno: %i", region->mmap_start, errno); @@ -793,8 +904,23 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm, ret, errno, slot, flags, guest_paddr, (uint64_t) region->region.memory_size); - /* Add to linked-list of memory regions. */ - list_add(®ion->list, &vm->userspace_mem_regions); + /* Add to quick lookup data structures */ + vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); + vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); + hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); + + /* If shared memory, create an alias. */ + if (region->fd >= 0) { + region->mmap_alias = mmap(NULL, region->mmap_size, + PROT_READ | PROT_WRITE, + vm_mem_backing_src_alias(src_type)->flag, + region->fd, 0); + TEST_ASSERT(region->mmap_alias != MAP_FAILED, + "mmap of alias failed, errno: %i", errno); + + /* Align host alias address */ + region->host_alias = align(region->mmap_alias, alignment); + } } /* @@ -817,10 +943,10 @@ memslot2region(struct kvm_vm *vm, uint32_t memslot) { struct userspace_mem_region *region; - list_for_each_entry(region, &vm->userspace_mem_regions, list) { + hash_for_each_possible(vm->regions.slot_hash, region, slot_node, + memslot) if (region->region.slot == memslot) return region; - } fprintf(stderr, "No mem region with the requested slot found,\n" " requested slot: %u\n", memslot); @@ -905,7 +1031,7 @@ void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) */ void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) { - __vm_mem_region_delete(vm, memslot2region(vm, slot)); + __vm_mem_region_delete(vm, memslot2region(vm, slot), true); } /* @@ -925,9 +1051,7 @@ static int vcpu_mmap_sz(void) { int dev_fd, ret; - dev_fd = open(KVM_DEV_PATH, O_RDONLY); - if (dev_fd < 0) - exit(KSFT_SKIP); + dev_fd = open_kvm_dev_path_or_exit(); ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); TEST_ASSERT(ret >= sizeof(struct kvm_run), @@ -1099,6 +1223,9 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); virt_pgd_alloc(vm, pgd_memslot); + vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, + KVM_UTIL_MIN_PFN * vm->page_size, + data_memslot); /* * Find an unused range of virtual page addresses of at least @@ -1108,11 +1235,7 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, /* Map the virtual pages. */ for (vm_vaddr_t vaddr = vaddr_start; pages > 0; - pages--, vaddr += vm->page_size) { - vm_paddr_t paddr; - - paddr = vm_phy_page_alloc(vm, - KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); + pages--, vaddr += vm->page_size, paddr += vm->page_size) { virt_pg_map(vm, vaddr, paddr, pgd_memslot); @@ -1177,16 +1300,14 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) { struct userspace_mem_region *region; - list_for_each_entry(region, &vm->userspace_mem_regions, list) { - if ((gpa >= region->region.guest_phys_addr) - && (gpa <= (region->region.guest_phys_addr - + region->region.memory_size - 1))) - return (void *) ((uintptr_t) region->host_mem - + (gpa - region->region.guest_phys_addr)); + region = userspace_mem_region_find(vm, gpa, gpa); + if (!region) { + TEST_FAIL("No vm physical memory at 0x%lx", gpa); + return NULL; } - TEST_FAIL("No vm physical memory at 0x%lx", gpa); - return NULL; + return (void *)((uintptr_t)region->host_mem + + (gpa - region->region.guest_phys_addr)); } /* @@ -1208,15 +1329,22 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) */ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) { - struct userspace_mem_region *region; + struct rb_node *node; - list_for_each_entry(region, &vm->userspace_mem_regions, list) { - if ((hva >= region->host_mem) - && (hva <= (region->host_mem - + region->region.memory_size - 1))) - return (vm_paddr_t) ((uintptr_t) - region->region.guest_phys_addr - + (hva - (uintptr_t) region->host_mem)); + for (node = vm->regions.hva_tree.rb_node; node; ) { + struct userspace_mem_region *region = + container_of(node, struct userspace_mem_region, hva_node); + + if (hva >= region->host_mem) { + if (hva <= (region->host_mem + + region->region.memory_size - 1)) + return (vm_paddr_t)((uintptr_t) + region->region.guest_phys_addr + + (hva - (uintptr_t)region->host_mem)); + + node = node->rb_right; + } else + node = node->rb_left; } TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); @@ -1224,6 +1352,42 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) } /* + * Address VM physical to Host Virtual *alias*. + * + * Input Args: + * vm - Virtual Machine + * gpa - VM physical address + * + * Output Args: None + * + * Return: + * Equivalent address within the host virtual *alias* area, or NULL + * (without failing the test) if the guest memory is not shared (so + * no alias exists). + * + * When vm_create() and related functions are called with a shared memory + * src_type, we also create a writable, shared alias mapping of the + * underlying guest memory. This allows the host to manipulate guest memory + * without mapping that memory in the guest's address space. And, for + * userfaultfd-based demand paging, we can do so without triggering userfaults. + */ +void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) +{ + struct userspace_mem_region *region; + uintptr_t offset; + + region = userspace_mem_region_find(vm, gpa, gpa); + if (!region) + return NULL; + + if (!region->host_alias) + return NULL; + + offset = gpa - region->region.guest_phys_addr; + return (void *) ((uintptr_t) region->host_alias + offset); +} + +/* * VM Create IRQ Chip * * Input Args: @@ -1822,6 +1986,7 @@ int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, */ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) { + int ctr; struct userspace_mem_region *region; struct vcpu *vcpu; @@ -1829,7 +1994,7 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); fprintf(stream, "%*sMem Regions:\n", indent, ""); - list_for_each_entry(region, &vm->userspace_mem_regions, list) { + hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " "host_virt: %p\n", indent + 2, "", (uint64_t) region->region.guest_phys_addr, @@ -2015,10 +2180,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm) if (vm == NULL) { /* Ensure that the KVM vendor-specific module is loaded. */ - f = fopen(KVM_DEV_PATH, "r"); - TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d", - errno); - fclose(f); + close(open_kvm_dev_path_or_exit()); } f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r"); @@ -2041,7 +2203,7 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm) return vm->page_shift; } -unsigned int vm_get_max_gfn(struct kvm_vm *vm) +uint64_t vm_get_max_gfn(struct kvm_vm *vm) { return vm->max_gfn; } diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h index 91ce1b5d480b..a03febc24ba6 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h +++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h @@ -8,6 +8,9 @@ #ifndef SELFTEST_KVM_UTIL_INTERNAL_H #define SELFTEST_KVM_UTIL_INTERNAL_H +#include "linux/hashtable.h" +#include "linux/rbtree.h" + #include "sparsebit.h" struct userspace_mem_region { @@ -16,9 +19,13 @@ struct userspace_mem_region { int fd; off_t offset; void *host_mem; + void *host_alias; void *mmap_start; + void *mmap_alias; size_t mmap_size; - struct list_head list; + struct rb_node gpa_node; + struct rb_node hva_node; + struct hlist_node slot_node; }; struct vcpu { @@ -31,6 +38,12 @@ struct vcpu { uint32_t dirty_gfns_count; }; +struct userspace_mem_regions { + struct rb_root gpa_tree; + struct rb_root hva_tree; + DECLARE_HASHTABLE(slot_hash, 9); +}; + struct kvm_vm { int mode; unsigned long type; @@ -43,7 +56,7 @@ struct kvm_vm { unsigned int va_bits; uint64_t max_gfn; struct list_head vcpus; - struct list_head userspace_mem_regions; + struct userspace_mem_regions regions; struct sparsebit *vpages_valid; struct sparsebit *vpages_mapped; bool has_irqchip; diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c index 81490b9b4e32..abf381800a59 100644 --- a/tools/testing/selftests/kvm/lib/perf_test_util.c +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2020, Google LLC. */ +#include <inttypes.h> #include "kvm_util.h" #include "perf_test_util.h" @@ -80,7 +81,8 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus, */ TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm), "Requested more guest memory than address space allows.\n" - " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n", + " guest pages: %" PRIx64 " max gfn: %" PRIx64 + " vcpus: %d wss: %" PRIx64 "]\n", guest_num_pages, vm_get_max_gfn(vm), vcpus, vcpu_memory_bytes); diff --git a/tools/testing/selftests/kvm/lib/rbtree.c b/tools/testing/selftests/kvm/lib/rbtree.c new file mode 100644 index 000000000000..a703f0194ea3 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/rbtree.c @@ -0,0 +1 @@ +#include "../../../../lib/rbtree.c" diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 63d2bc7d757b..6ad6c8276b2e 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -168,70 +168,87 @@ size_t get_def_hugetlb_pagesz(void) const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) { + static const int anon_flags = MAP_PRIVATE | MAP_ANONYMOUS; + static const int anon_huge_flags = anon_flags | MAP_HUGETLB; + static const struct vm_mem_backing_src_alias aliases[] = { [VM_MEM_SRC_ANONYMOUS] = { .name = "anonymous", - .flag = 0, + .flag = anon_flags, }, [VM_MEM_SRC_ANONYMOUS_THP] = { .name = "anonymous_thp", - .flag = 0, + .flag = anon_flags, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB] = { .name = "anonymous_hugetlb", - .flag = MAP_HUGETLB, + .flag = anon_huge_flags, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = { .name = "anonymous_hugetlb_16kb", - .flag = MAP_HUGETLB | MAP_HUGE_16KB, + .flag = anon_huge_flags | MAP_HUGE_16KB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = { .name = "anonymous_hugetlb_64kb", - .flag = MAP_HUGETLB | MAP_HUGE_64KB, + .flag = anon_huge_flags | MAP_HUGE_64KB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = { .name = "anonymous_hugetlb_512kb", - .flag = MAP_HUGETLB | MAP_HUGE_512KB, + .flag = anon_huge_flags | MAP_HUGE_512KB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = { .name = "anonymous_hugetlb_1mb", - .flag = MAP_HUGETLB | MAP_HUGE_1MB, + .flag = anon_huge_flags | MAP_HUGE_1MB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = { .name = "anonymous_hugetlb_2mb", - .flag = MAP_HUGETLB | MAP_HUGE_2MB, + .flag = anon_huge_flags | MAP_HUGE_2MB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = { .name = "anonymous_hugetlb_8mb", - .flag = MAP_HUGETLB | MAP_HUGE_8MB, + .flag = anon_huge_flags | MAP_HUGE_8MB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = { .name = "anonymous_hugetlb_16mb", - .flag = MAP_HUGETLB | MAP_HUGE_16MB, + .flag = anon_huge_flags | MAP_HUGE_16MB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = { .name = "anonymous_hugetlb_32mb", - .flag = MAP_HUGETLB | MAP_HUGE_32MB, + .flag = anon_huge_flags | MAP_HUGE_32MB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = { .name = "anonymous_hugetlb_256mb", - .flag = MAP_HUGETLB | MAP_HUGE_256MB, + .flag = anon_huge_flags | MAP_HUGE_256MB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = { .name = "anonymous_hugetlb_512mb", - .flag = MAP_HUGETLB | MAP_HUGE_512MB, + .flag = anon_huge_flags | MAP_HUGE_512MB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = { .name = "anonymous_hugetlb_1gb", - .flag = MAP_HUGETLB | MAP_HUGE_1GB, + .flag = anon_huge_flags | MAP_HUGE_1GB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = { .name = "anonymous_hugetlb_2gb", - .flag = MAP_HUGETLB | MAP_HUGE_2GB, + .flag = anon_huge_flags | MAP_HUGE_2GB, }, [VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = { .name = "anonymous_hugetlb_16gb", - .flag = MAP_HUGETLB | MAP_HUGE_16GB, + .flag = anon_huge_flags | MAP_HUGE_16GB, + }, + [VM_MEM_SRC_SHMEM] = { + .name = "shmem", + .flag = MAP_SHARED, + }, + [VM_MEM_SRC_SHARED_HUGETLB] = { + .name = "shared_hugetlb", + /* + * No MAP_HUGETLB, we use MFD_HUGETLB instead. Since + * we're using "file backed" memory, we need to specify + * this when the FD is created, not when the area is + * mapped. + */ + .flag = MAP_SHARED, }, }; _Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES, @@ -250,10 +267,12 @@ size_t get_backing_src_pagesz(uint32_t i) switch (i) { case VM_MEM_SRC_ANONYMOUS: + case VM_MEM_SRC_SHMEM: return getpagesize(); case VM_MEM_SRC_ANONYMOUS_THP: return get_trans_hugepagesz(); case VM_MEM_SRC_ANONYMOUS_HUGETLB: + case VM_MEM_SRC_SHARED_HUGETLB: return get_def_hugetlb_pagesz(); default: return MAP_HUGE_PAGE_SIZE(flag); diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index a8906e60a108..efe235044421 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -657,9 +657,7 @@ struct kvm_cpuid2 *kvm_get_supported_cpuid(void) return cpuid; cpuid = allocate_kvm_cpuid2(); - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid); TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n", @@ -691,9 +689,7 @@ uint64_t kvm_get_feature_msr(uint64_t msr_index) buffer.header.nmsrs = 1; buffer.entry.index = msr_index; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); r = ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header); TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n" @@ -986,9 +982,7 @@ struct kvm_msr_list *kvm_get_msr_index_list(void) struct kvm_msr_list *list; int nmsrs, r, kvm_fd; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); nmsrs = kvm_get_num_msrs_fd(kvm_fd); list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0])); @@ -1312,9 +1306,7 @@ struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) return cpuid; cpuid = allocate_kvm_cpuid2(); - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid); TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_HV_CPUID failed %d %d\n", diff --git a/tools/testing/selftests/kvm/memslot_modification_stress_test.c b/tools/testing/selftests/kvm/memslot_modification_stress_test.c index 6096bf0a5b34..98351ba0933c 100644 --- a/tools/testing/selftests/kvm/memslot_modification_stress_test.c +++ b/tools/testing/selftests/kvm/memslot_modification_stress_test.c @@ -71,14 +71,22 @@ struct memslot_antagonist_args { }; static void add_remove_memslot(struct kvm_vm *vm, useconds_t delay, - uint64_t nr_modifications, uint64_t gpa) + uint64_t nr_modifications) { + const uint64_t pages = 1; + uint64_t gpa; int i; + /* + * Add the dummy memslot just below the perf_test_util memslot, which is + * at the top of the guest physical address space. + */ + gpa = guest_test_phys_mem - pages * vm_get_page_size(vm); + for (i = 0; i < nr_modifications; i++) { usleep(delay); vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, gpa, - DUMMY_MEMSLOT_INDEX, 1, 0); + DUMMY_MEMSLOT_INDEX, pages, 0); vm_mem_region_delete(vm, DUMMY_MEMSLOT_INDEX); } @@ -120,11 +128,7 @@ static void run_test(enum vm_guest_mode mode, void *arg) pr_info("Started all vCPUs\n"); add_remove_memslot(vm, p->memslot_modification_delay, - p->nr_memslot_modifications, - guest_test_phys_mem + - (guest_percpu_mem_size * nr_vcpus) + - perf_test_args.host_page_size + - perf_test_args.guest_page_size); + p->nr_memslot_modifications); run_vcpus = false; diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c new file mode 100644 index 000000000000..9307f25d8130 --- /dev/null +++ b/tools/testing/selftests/kvm/memslot_perf_test.c @@ -0,0 +1,1037 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * A memslot-related performance benchmark. + * + * Copyright (C) 2021 Oracle and/or its affiliates. + * + * Basic guest setup / host vCPU thread code lifted from set_memory_region_test. + */ +#include <pthread.h> +#include <sched.h> +#include <semaphore.h> +#include <stdatomic.h> +#include <stdbool.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/mman.h> +#include <time.h> +#include <unistd.h> + +#include <linux/compiler.h> + +#include <test_util.h> +#include <kvm_util.h> +#include <processor.h> + +#define VCPU_ID 0 + +#define MEM_SIZE ((512U << 20) + 4096) +#define MEM_SIZE_PAGES (MEM_SIZE / 4096) +#define MEM_GPA 0x10000000UL +#define MEM_AUX_GPA MEM_GPA +#define MEM_SYNC_GPA MEM_AUX_GPA +#define MEM_TEST_GPA (MEM_AUX_GPA + 4096) +#define MEM_TEST_SIZE (MEM_SIZE - 4096) +static_assert(MEM_SIZE % 4096 == 0, "invalid mem size"); +static_assert(MEM_TEST_SIZE % 4096 == 0, "invalid mem test size"); + +/* + * 32 MiB is max size that gets well over 100 iterations on 509 slots. + * Considering that each slot needs to have at least one page up to + * 8194 slots in use can then be tested (although with slightly + * limited resolution). + */ +#define MEM_SIZE_MAP ((32U << 20) + 4096) +#define MEM_SIZE_MAP_PAGES (MEM_SIZE_MAP / 4096) +#define MEM_TEST_MAP_SIZE (MEM_SIZE_MAP - 4096) +#define MEM_TEST_MAP_SIZE_PAGES (MEM_TEST_MAP_SIZE / 4096) +static_assert(MEM_SIZE_MAP % 4096 == 0, "invalid map test region size"); +static_assert(MEM_TEST_MAP_SIZE % 4096 == 0, "invalid map test region size"); +static_assert(MEM_TEST_MAP_SIZE_PAGES % 2 == 0, "invalid map test region size"); +static_assert(MEM_TEST_MAP_SIZE_PAGES > 2, "invalid map test region size"); + +/* + * 128 MiB is min size that fills 32k slots with at least one page in each + * while at the same time gets 100+ iterations in such test + */ +#define MEM_TEST_UNMAP_SIZE (128U << 20) +#define MEM_TEST_UNMAP_SIZE_PAGES (MEM_TEST_UNMAP_SIZE / 4096) +/* 2 MiB chunk size like a typical huge page */ +#define MEM_TEST_UNMAP_CHUNK_PAGES (2U << (20 - 12)) +static_assert(MEM_TEST_UNMAP_SIZE <= MEM_TEST_SIZE, + "invalid unmap test region size"); +static_assert(MEM_TEST_UNMAP_SIZE % 4096 == 0, + "invalid unmap test region size"); +static_assert(MEM_TEST_UNMAP_SIZE_PAGES % + (2 * MEM_TEST_UNMAP_CHUNK_PAGES) == 0, + "invalid unmap test region size"); + +/* + * For the move active test the middle of the test area is placed on + * a memslot boundary: half lies in the memslot being moved, half in + * other memslot(s). + * + * When running this test with 32k memslots (32764, really) each memslot + * contains 4 pages. + * The last one additionally contains the remaining 21 pages of memory, + * for the total size of 25 pages. + * Hence, the maximum size here is 50 pages. + */ +#define MEM_TEST_MOVE_SIZE_PAGES (50) +#define MEM_TEST_MOVE_SIZE (MEM_TEST_MOVE_SIZE_PAGES * 4096) +#define MEM_TEST_MOVE_GPA_DEST (MEM_GPA + MEM_SIZE) +static_assert(MEM_TEST_MOVE_SIZE <= MEM_TEST_SIZE, + "invalid move test region size"); + +#define MEM_TEST_VAL_1 0x1122334455667788 +#define MEM_TEST_VAL_2 0x99AABBCCDDEEFF00 + +struct vm_data { + struct kvm_vm *vm; + pthread_t vcpu_thread; + uint32_t nslots; + uint64_t npages; + uint64_t pages_per_slot; + void **hva_slots; + bool mmio_ok; + uint64_t mmio_gpa_min; + uint64_t mmio_gpa_max; +}; + +struct sync_area { + atomic_bool start_flag; + atomic_bool exit_flag; + atomic_bool sync_flag; + void *move_area_ptr; +}; + +/* + * Technically, we need also for the atomic bool to be address-free, which + * is recommended, but not strictly required, by C11 for lockless + * implementations. + * However, in practice both GCC and Clang fulfill this requirement on + * all KVM-supported platforms. + */ +static_assert(ATOMIC_BOOL_LOCK_FREE == 2, "atomic bool is not lockless"); + +static sem_t vcpu_ready; + +static bool map_unmap_verify; + +static bool verbose; +#define pr_info_v(...) \ + do { \ + if (verbose) \ + pr_info(__VA_ARGS__); \ + } while (0) + +static void *vcpu_worker(void *data) +{ + struct vm_data *vm = data; + struct kvm_run *run; + struct ucall uc; + uint64_t cmd; + + run = vcpu_state(vm->vm, VCPU_ID); + while (1) { + vcpu_run(vm->vm, VCPU_ID); + + if (run->exit_reason == KVM_EXIT_IO) { + cmd = get_ucall(vm->vm, VCPU_ID, &uc); + if (cmd != UCALL_SYNC) + break; + + sem_post(&vcpu_ready); + continue; + } + + if (run->exit_reason != KVM_EXIT_MMIO) + break; + + TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit"); + TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read"); + TEST_ASSERT(run->mmio.len == 8, + "Unexpected exit mmio size = %u", run->mmio.len); + TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min && + run->mmio.phys_addr <= vm->mmio_gpa_max, + "Unexpected exit mmio address = 0x%llx", + run->mmio.phys_addr); + } + + if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT) + TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0], + __FILE__, uc.args[1], uc.args[2]); + + return NULL; +} + +static void wait_for_vcpu(void) +{ + struct timespec ts; + + TEST_ASSERT(!clock_gettime(CLOCK_REALTIME, &ts), + "clock_gettime() failed: %d\n", errno); + + ts.tv_sec += 2; + TEST_ASSERT(!sem_timedwait(&vcpu_ready, &ts), + "sem_timedwait() failed: %d\n", errno); +} + +static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages) +{ + uint64_t gpage, pgoffs; + uint32_t slot, slotoffs; + void *base; + + TEST_ASSERT(gpa >= MEM_GPA, "Too low gpa to translate"); + TEST_ASSERT(gpa < MEM_GPA + data->npages * 4096, + "Too high gpa to translate"); + gpa -= MEM_GPA; + + gpage = gpa / 4096; + pgoffs = gpa % 4096; + slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1); + slotoffs = gpage - (slot * data->pages_per_slot); + + if (rempages) { + uint64_t slotpages; + + if (slot == data->nslots - 1) + slotpages = data->npages - slot * data->pages_per_slot; + else + slotpages = data->pages_per_slot; + + TEST_ASSERT(!pgoffs, + "Asking for remaining pages in slot but gpa not page aligned"); + *rempages = slotpages - slotoffs; + } + + base = data->hva_slots[slot]; + return (uint8_t *)base + slotoffs * 4096 + pgoffs; +} + +static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot) +{ + TEST_ASSERT(slot < data->nslots, "Too high slot number"); + + return MEM_GPA + slot * data->pages_per_slot * 4096; +} + +static struct vm_data *alloc_vm(void) +{ + struct vm_data *data; + + data = malloc(sizeof(*data)); + TEST_ASSERT(data, "malloc(vmdata) failed"); + + data->vm = NULL; + data->hva_slots = NULL; + + return data; +} + +static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots, + void *guest_code, uint64_t mempages, + struct timespec *slot_runtime) +{ + uint32_t max_mem_slots; + uint64_t rempages; + uint64_t guest_addr; + uint32_t slot; + struct timespec tstart; + struct sync_area *sync; + + max_mem_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); + TEST_ASSERT(max_mem_slots > 1, + "KVM_CAP_NR_MEMSLOTS should be greater than 1"); + TEST_ASSERT(nslots > 1 || nslots == -1, + "Slot count cap should be greater than 1"); + if (nslots != -1) + max_mem_slots = min(max_mem_slots, (uint32_t)nslots); + pr_info_v("Allowed number of memory slots: %"PRIu32"\n", max_mem_slots); + + TEST_ASSERT(mempages > 1, + "Can't test without any memory"); + + data->npages = mempages; + data->nslots = max_mem_slots - 1; + data->pages_per_slot = mempages / data->nslots; + if (!data->pages_per_slot) { + *maxslots = mempages + 1; + return false; + } + + rempages = mempages % data->nslots; + data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots); + TEST_ASSERT(data->hva_slots, "malloc() fail"); + + data->vm = vm_create_default(VCPU_ID, 1024, guest_code); + + pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n", + max_mem_slots - 1, data->pages_per_slot, rempages); + + clock_gettime(CLOCK_MONOTONIC, &tstart); + for (slot = 1, guest_addr = MEM_GPA; slot < max_mem_slots; slot++) { + uint64_t npages; + + npages = data->pages_per_slot; + if (slot == max_mem_slots - 1) + npages += rempages; + + vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS, + guest_addr, slot, npages, + 0); + guest_addr += npages * 4096; + } + *slot_runtime = timespec_elapsed(tstart); + + for (slot = 0, guest_addr = MEM_GPA; slot < max_mem_slots - 1; slot++) { + uint64_t npages; + uint64_t gpa; + + npages = data->pages_per_slot; + if (slot == max_mem_slots - 2) + npages += rempages; + + gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, + slot + 1); + TEST_ASSERT(gpa == guest_addr, + "vm_phy_pages_alloc() failed\n"); + + data->hva_slots[slot] = addr_gpa2hva(data->vm, guest_addr); + memset(data->hva_slots[slot], 0, npages * 4096); + + guest_addr += npages * 4096; + } + + virt_map(data->vm, MEM_GPA, MEM_GPA, mempages, 0); + + sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); + atomic_init(&sync->start_flag, false); + atomic_init(&sync->exit_flag, false); + atomic_init(&sync->sync_flag, false); + + data->mmio_ok = false; + + return true; +} + +static void launch_vm(struct vm_data *data) +{ + pr_info_v("Launching the test VM\n"); + + pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data); + + /* Ensure the guest thread is spun up. */ + wait_for_vcpu(); +} + +static void free_vm(struct vm_data *data) +{ + kvm_vm_free(data->vm); + free(data->hva_slots); + free(data); +} + +static void wait_guest_exit(struct vm_data *data) +{ + pthread_join(data->vcpu_thread, NULL); +} + +static void let_guest_run(struct sync_area *sync) +{ + atomic_store_explicit(&sync->start_flag, true, memory_order_release); +} + +static void guest_spin_until_start(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + + while (!atomic_load_explicit(&sync->start_flag, memory_order_acquire)) + ; +} + +static void make_guest_exit(struct sync_area *sync) +{ + atomic_store_explicit(&sync->exit_flag, true, memory_order_release); +} + +static bool _guest_should_exit(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + + return atomic_load_explicit(&sync->exit_flag, memory_order_acquire); +} + +#define guest_should_exit() unlikely(_guest_should_exit()) + +/* + * noinline so we can easily see how much time the host spends waiting + * for the guest. + * For the same reason use alarm() instead of polling clock_gettime() + * to implement a wait timeout. + */ +static noinline void host_perform_sync(struct sync_area *sync) +{ + alarm(2); + + atomic_store_explicit(&sync->sync_flag, true, memory_order_release); + while (atomic_load_explicit(&sync->sync_flag, memory_order_acquire)) + ; + + alarm(0); +} + +static bool guest_perform_sync(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + bool expected; + + do { + if (guest_should_exit()) + return false; + + expected = true; + } while (!atomic_compare_exchange_weak_explicit(&sync->sync_flag, + &expected, false, + memory_order_acq_rel, + memory_order_relaxed)); + + return true; +} + +static void guest_code_test_memslot_move(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + uintptr_t base = (typeof(base))READ_ONCE(sync->move_area_ptr); + + GUEST_SYNC(0); + + guest_spin_until_start(); + + while (!guest_should_exit()) { + uintptr_t ptr; + + for (ptr = base; ptr < base + MEM_TEST_MOVE_SIZE; + ptr += 4096) + *(uint64_t *)ptr = MEM_TEST_VAL_1; + + /* + * No host sync here since the MMIO exits are so expensive + * that the host would spend most of its time waiting for + * the guest and so instead of measuring memslot move + * performance we would measure the performance and + * likelihood of MMIO exits + */ + } + + GUEST_DONE(); +} + +static void guest_code_test_memslot_map(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + + GUEST_SYNC(0); + + guest_spin_until_start(); + + while (1) { + uintptr_t ptr; + + for (ptr = MEM_TEST_GPA; + ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; ptr += 4096) + *(uint64_t *)ptr = MEM_TEST_VAL_1; + + if (!guest_perform_sync()) + break; + + for (ptr = MEM_TEST_GPA + MEM_TEST_MAP_SIZE / 2; + ptr < MEM_TEST_GPA + MEM_TEST_MAP_SIZE; ptr += 4096) + *(uint64_t *)ptr = MEM_TEST_VAL_2; + + if (!guest_perform_sync()) + break; + } + + GUEST_DONE(); +} + +static void guest_code_test_memslot_unmap(void) +{ + struct sync_area *sync = (typeof(sync))MEM_SYNC_GPA; + + GUEST_SYNC(0); + + guest_spin_until_start(); + + while (1) { + uintptr_t ptr = MEM_TEST_GPA; + + /* + * We can afford to access (map) just a small number of pages + * per host sync as otherwise the host will spend + * a significant amount of its time waiting for the guest + * (instead of doing unmap operations), so this will + * effectively turn this test into a map performance test. + * + * Just access a single page to be on the safe side. + */ + *(uint64_t *)ptr = MEM_TEST_VAL_1; + + if (!guest_perform_sync()) + break; + + ptr += MEM_TEST_UNMAP_SIZE / 2; + *(uint64_t *)ptr = MEM_TEST_VAL_2; + + if (!guest_perform_sync()) + break; + } + + GUEST_DONE(); +} + +static void guest_code_test_memslot_rw(void) +{ + GUEST_SYNC(0); + + guest_spin_until_start(); + + while (1) { + uintptr_t ptr; + + for (ptr = MEM_TEST_GPA; + ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) + *(uint64_t *)ptr = MEM_TEST_VAL_1; + + if (!guest_perform_sync()) + break; + + for (ptr = MEM_TEST_GPA + 4096 / 2; + ptr < MEM_TEST_GPA + MEM_TEST_SIZE; ptr += 4096) { + uint64_t val = *(uint64_t *)ptr; + + GUEST_ASSERT_1(val == MEM_TEST_VAL_2, val); + *(uint64_t *)ptr = 0; + } + + if (!guest_perform_sync()) + break; + } + + GUEST_DONE(); +} + +static bool test_memslot_move_prepare(struct vm_data *data, + struct sync_area *sync, + uint64_t *maxslots, bool isactive) +{ + uint64_t movesrcgpa, movetestgpa; + + movesrcgpa = vm_slot2gpa(data, data->nslots - 1); + + if (isactive) { + uint64_t lastpages; + + vm_gpa2hva(data, movesrcgpa, &lastpages); + if (lastpages < MEM_TEST_MOVE_SIZE_PAGES / 2) { + *maxslots = 0; + return false; + } + } + + movetestgpa = movesrcgpa - (MEM_TEST_MOVE_SIZE / (isactive ? 2 : 1)); + sync->move_area_ptr = (void *)movetestgpa; + + if (isactive) { + data->mmio_ok = true; + data->mmio_gpa_min = movesrcgpa; + data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1; + } + + return true; +} + +static bool test_memslot_move_prepare_active(struct vm_data *data, + struct sync_area *sync, + uint64_t *maxslots) +{ + return test_memslot_move_prepare(data, sync, maxslots, true); +} + +static bool test_memslot_move_prepare_inactive(struct vm_data *data, + struct sync_area *sync, + uint64_t *maxslots) +{ + return test_memslot_move_prepare(data, sync, maxslots, false); +} + +static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync) +{ + uint64_t movesrcgpa; + + movesrcgpa = vm_slot2gpa(data, data->nslots - 1); + vm_mem_region_move(data->vm, data->nslots - 1 + 1, + MEM_TEST_MOVE_GPA_DEST); + vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa); +} + +static void test_memslot_do_unmap(struct vm_data *data, + uint64_t offsp, uint64_t count) +{ + uint64_t gpa, ctr; + + for (gpa = MEM_TEST_GPA + offsp * 4096, ctr = 0; ctr < count; ) { + uint64_t npages; + void *hva; + int ret; + + hva = vm_gpa2hva(data, gpa, &npages); + TEST_ASSERT(npages, "Empty memory slot at gptr 0x%"PRIx64, gpa); + npages = min(npages, count - ctr); + ret = madvise(hva, npages * 4096, MADV_DONTNEED); + TEST_ASSERT(!ret, + "madvise(%p, MADV_DONTNEED) on VM memory should not fail for gptr 0x%"PRIx64, + hva, gpa); + ctr += npages; + gpa += npages * 4096; + } + TEST_ASSERT(ctr == count, + "madvise(MADV_DONTNEED) should exactly cover all of the requested area"); +} + +static void test_memslot_map_unmap_check(struct vm_data *data, + uint64_t offsp, uint64_t valexp) +{ + uint64_t gpa; + uint64_t *val; + + if (!map_unmap_verify) + return; + + gpa = MEM_TEST_GPA + offsp * 4096; + val = (typeof(val))vm_gpa2hva(data, gpa, NULL); + TEST_ASSERT(*val == valexp, + "Guest written values should read back correctly before unmap (%"PRIu64" vs %"PRIu64" @ %"PRIx64")", + *val, valexp, gpa); + *val = 0; +} + +static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync) +{ + /* + * Unmap the second half of the test area while guest writes to (maps) + * the first half. + */ + test_memslot_do_unmap(data, MEM_TEST_MAP_SIZE_PAGES / 2, + MEM_TEST_MAP_SIZE_PAGES / 2); + + /* + * Wait for the guest to finish writing the first half of the test + * area, verify the written value on the first and the last page of + * this area and then unmap it. + * Meanwhile, the guest is writing to (mapping) the second half of + * the test area. + */ + host_perform_sync(sync); + test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1); + test_memslot_map_unmap_check(data, + MEM_TEST_MAP_SIZE_PAGES / 2 - 1, + MEM_TEST_VAL_1); + test_memslot_do_unmap(data, 0, MEM_TEST_MAP_SIZE_PAGES / 2); + + + /* + * Wait for the guest to finish writing the second half of the test + * area and verify the written value on the first and the last page + * of this area. + * The area will be unmapped at the beginning of the next loop + * iteration. + * Meanwhile, the guest is writing to (mapping) the first half of + * the test area. + */ + host_perform_sync(sync); + test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES / 2, + MEM_TEST_VAL_2); + test_memslot_map_unmap_check(data, MEM_TEST_MAP_SIZE_PAGES - 1, + MEM_TEST_VAL_2); +} + +static void test_memslot_unmap_loop_common(struct vm_data *data, + struct sync_area *sync, + uint64_t chunk) +{ + uint64_t ctr; + + /* + * Wait for the guest to finish mapping page(s) in the first half + * of the test area, verify the written value and then perform unmap + * of this area. + * Meanwhile, the guest is writing to (mapping) page(s) in the second + * half of the test area. + */ + host_perform_sync(sync); + test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1); + for (ctr = 0; ctr < MEM_TEST_UNMAP_SIZE_PAGES / 2; ctr += chunk) + test_memslot_do_unmap(data, ctr, chunk); + + /* Likewise, but for the opposite host / guest areas */ + host_perform_sync(sync); + test_memslot_map_unmap_check(data, MEM_TEST_UNMAP_SIZE_PAGES / 2, + MEM_TEST_VAL_2); + for (ctr = MEM_TEST_UNMAP_SIZE_PAGES / 2; + ctr < MEM_TEST_UNMAP_SIZE_PAGES; ctr += chunk) + test_memslot_do_unmap(data, ctr, chunk); +} + +static void test_memslot_unmap_loop(struct vm_data *data, + struct sync_area *sync) +{ + test_memslot_unmap_loop_common(data, sync, 1); +} + +static void test_memslot_unmap_loop_chunked(struct vm_data *data, + struct sync_area *sync) +{ + test_memslot_unmap_loop_common(data, sync, MEM_TEST_UNMAP_CHUNK_PAGES); +} + +static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync) +{ + uint64_t gptr; + + for (gptr = MEM_TEST_GPA + 4096 / 2; + gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) + *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2; + + host_perform_sync(sync); + + for (gptr = MEM_TEST_GPA; + gptr < MEM_TEST_GPA + MEM_TEST_SIZE; gptr += 4096) { + uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL); + uint64_t val = *vptr; + + TEST_ASSERT(val == MEM_TEST_VAL_1, + "Guest written values should read back correctly (is %"PRIu64" @ %"PRIx64")", + val, gptr); + *vptr = 0; + } + + host_perform_sync(sync); +} + +struct test_data { + const char *name; + uint64_t mem_size; + void (*guest_code)(void); + bool (*prepare)(struct vm_data *data, struct sync_area *sync, + uint64_t *maxslots); + void (*loop)(struct vm_data *data, struct sync_area *sync); +}; + +static bool test_execute(int nslots, uint64_t *maxslots, + unsigned int maxtime, + const struct test_data *tdata, + uint64_t *nloops, + struct timespec *slot_runtime, + struct timespec *guest_runtime) +{ + uint64_t mem_size = tdata->mem_size ? : MEM_SIZE_PAGES; + struct vm_data *data; + struct sync_area *sync; + struct timespec tstart; + bool ret = true; + + data = alloc_vm(); + if (!prepare_vm(data, nslots, maxslots, tdata->guest_code, + mem_size, slot_runtime)) { + ret = false; + goto exit_free; + } + + sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL); + + if (tdata->prepare && + !tdata->prepare(data, sync, maxslots)) { + ret = false; + goto exit_free; + } + + launch_vm(data); + + clock_gettime(CLOCK_MONOTONIC, &tstart); + let_guest_run(sync); + + while (1) { + *guest_runtime = timespec_elapsed(tstart); + if (guest_runtime->tv_sec >= maxtime) + break; + + tdata->loop(data, sync); + + (*nloops)++; + } + + make_guest_exit(sync); + wait_guest_exit(data); + +exit_free: + free_vm(data); + + return ret; +} + +static const struct test_data tests[] = { + { + .name = "map", + .mem_size = MEM_SIZE_MAP_PAGES, + .guest_code = guest_code_test_memslot_map, + .loop = test_memslot_map_loop, + }, + { + .name = "unmap", + .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1, + .guest_code = guest_code_test_memslot_unmap, + .loop = test_memslot_unmap_loop, + }, + { + .name = "unmap chunked", + .mem_size = MEM_TEST_UNMAP_SIZE_PAGES + 1, + .guest_code = guest_code_test_memslot_unmap, + .loop = test_memslot_unmap_loop_chunked, + }, + { + .name = "move active area", + .guest_code = guest_code_test_memslot_move, + .prepare = test_memslot_move_prepare_active, + .loop = test_memslot_move_loop, + }, + { + .name = "move inactive area", + .guest_code = guest_code_test_memslot_move, + .prepare = test_memslot_move_prepare_inactive, + .loop = test_memslot_move_loop, + }, + { + .name = "RW", + .guest_code = guest_code_test_memslot_rw, + .loop = test_memslot_rw_loop + }, +}; + +#define NTESTS ARRAY_SIZE(tests) + +struct test_args { + int tfirst; + int tlast; + int nslots; + int seconds; + int runs; +}; + +static void help(char *name, struct test_args *targs) +{ + int ctr; + + pr_info("usage: %s [-h] [-v] [-d] [-s slots] [-f first_test] [-e last_test] [-l test_length] [-r run_count]\n", + name); + pr_info(" -h: print this help screen.\n"); + pr_info(" -v: enable verbose mode (not for benchmarking).\n"); + pr_info(" -d: enable extra debug checks.\n"); + pr_info(" -s: specify memslot count cap (-1 means no cap; currently: %i)\n", + targs->nslots); + pr_info(" -f: specify the first test to run (currently: %i; max %zu)\n", + targs->tfirst, NTESTS - 1); + pr_info(" -e: specify the last test to run (currently: %i; max %zu)\n", + targs->tlast, NTESTS - 1); + pr_info(" -l: specify the test length in seconds (currently: %i)\n", + targs->seconds); + pr_info(" -r: specify the number of runs per test (currently: %i)\n", + targs->runs); + + pr_info("\nAvailable tests:\n"); + for (ctr = 0; ctr < NTESTS; ctr++) + pr_info("%d: %s\n", ctr, tests[ctr].name); +} + +static bool parse_args(int argc, char *argv[], + struct test_args *targs) +{ + int opt; + + while ((opt = getopt(argc, argv, "hvds:f:e:l:r:")) != -1) { + switch (opt) { + case 'h': + default: + help(argv[0], targs); + return false; + case 'v': + verbose = true; + break; + case 'd': + map_unmap_verify = true; + break; + case 's': + targs->nslots = atoi(optarg); + if (targs->nslots <= 0 && targs->nslots != -1) { + pr_info("Slot count cap has to be positive or -1 for no cap\n"); + return false; + } + break; + case 'f': + targs->tfirst = atoi(optarg); + if (targs->tfirst < 0) { + pr_info("First test to run has to be non-negative\n"); + return false; + } + break; + case 'e': + targs->tlast = atoi(optarg); + if (targs->tlast < 0 || targs->tlast >= NTESTS) { + pr_info("Last test to run has to be non-negative and less than %zu\n", + NTESTS); + return false; + } + break; + case 'l': + targs->seconds = atoi(optarg); + if (targs->seconds < 0) { + pr_info("Test length in seconds has to be non-negative\n"); + return false; + } + break; + case 'r': + targs->runs = atoi(optarg); + if (targs->runs <= 0) { + pr_info("Runs per test has to be positive\n"); + return false; + } + break; + } + } + + if (optind < argc) { + help(argv[0], targs); + return false; + } + + if (targs->tfirst > targs->tlast) { + pr_info("First test to run cannot be greater than the last test to run\n"); + return false; + } + + return true; +} + +struct test_result { + struct timespec slot_runtime, guest_runtime, iter_runtime; + int64_t slottimens, runtimens; + uint64_t nloops; +}; + +static bool test_loop(const struct test_data *data, + const struct test_args *targs, + struct test_result *rbestslottime, + struct test_result *rbestruntime) +{ + uint64_t maxslots; + struct test_result result; + + result.nloops = 0; + if (!test_execute(targs->nslots, &maxslots, targs->seconds, data, + &result.nloops, + &result.slot_runtime, &result.guest_runtime)) { + if (maxslots) + pr_info("Memslot count too high for this test, decrease the cap (max is %"PRIu64")\n", + maxslots); + else + pr_info("Memslot count may be too high for this test, try adjusting the cap\n"); + + return false; + } + + pr_info("Test took %ld.%.9lds for slot setup + %ld.%.9lds all iterations\n", + result.slot_runtime.tv_sec, result.slot_runtime.tv_nsec, + result.guest_runtime.tv_sec, result.guest_runtime.tv_nsec); + if (!result.nloops) { + pr_info("No full loops done - too short test time or system too loaded?\n"); + return true; + } + + result.iter_runtime = timespec_div(result.guest_runtime, + result.nloops); + pr_info("Done %"PRIu64" iterations, avg %ld.%.9lds each\n", + result.nloops, + result.iter_runtime.tv_sec, + result.iter_runtime.tv_nsec); + result.slottimens = timespec_to_ns(result.slot_runtime); + result.runtimens = timespec_to_ns(result.iter_runtime); + + /* + * Only rank the slot setup time for tests using the whole test memory + * area so they are comparable + */ + if (!data->mem_size && + (!rbestslottime->slottimens || + result.slottimens < rbestslottime->slottimens)) + *rbestslottime = result; + if (!rbestruntime->runtimens || + result.runtimens < rbestruntime->runtimens) + *rbestruntime = result; + + return true; +} + +int main(int argc, char *argv[]) +{ + struct test_args targs = { + .tfirst = 0, + .tlast = NTESTS - 1, + .nslots = -1, + .seconds = 5, + .runs = 1, + }; + struct test_result rbestslottime; + int tctr; + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + if (!parse_args(argc, argv, &targs)) + return -1; + + rbestslottime.slottimens = 0; + for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) { + const struct test_data *data = &tests[tctr]; + unsigned int runctr; + struct test_result rbestruntime; + + if (tctr > targs.tfirst) + pr_info("\n"); + + pr_info("Testing %s performance with %i runs, %d seconds each\n", + data->name, targs.runs, targs.seconds); + + rbestruntime.runtimens = 0; + for (runctr = 0; runctr < targs.runs; runctr++) + if (!test_loop(data, &targs, + &rbestslottime, &rbestruntime)) + break; + + if (rbestruntime.runtimens) + pr_info("Best runtime result was %ld.%.9lds per iteration (with %"PRIu64" iterations)\n", + rbestruntime.iter_runtime.tv_sec, + rbestruntime.iter_runtime.tv_nsec, + rbestruntime.nloops); + } + + if (rbestslottime.slottimens) + pr_info("Best slot setup time for the whole test area was %ld.%.9lds\n", + rbestslottime.slot_runtime.tv_sec, + rbestslottime.slot_runtime.tv_nsec); + + return 0; +} diff --git a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c index 9b78e8889638..8c77537af5a1 100644 --- a/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c +++ b/tools/testing/selftests/kvm/x86_64/get_cpuid_test.c @@ -19,7 +19,12 @@ struct { u32 function; u32 index; } mangled_cpuids[] = { + /* + * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR, + * which are not controlled for by this test. + */ {.function = 0xd, .index = 0}, + {.function = 0xd, .index = 1}, }; static void test_guest_cpuids(struct kvm_cpuid2 *guest_cpuid) diff --git a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c index cb953df4d7d0..8aed0db1331d 100644 --- a/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c +++ b/tools/testing/selftests/kvm/x86_64/get_msr_index_features.c @@ -37,9 +37,7 @@ static void test_get_msr_index(void) int old_res, res, kvm_fd, r; struct kvm_msr_list *list; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); old_res = kvm_num_index_msrs(kvm_fd, 0); TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0"); @@ -101,9 +99,7 @@ static void test_get_msr_feature(void) int res, old_res, i, kvm_fd; struct kvm_msr_list *feature_list; - kvm_fd = open(KVM_DEV_PATH, O_RDONLY); - if (kvm_fd < 0) - exit(KSFT_SKIP); + kvm_fd = open_kvm_dev_path_or_exit(); old_res = kvm_num_feature_msrs(kvm_fd, 0); TEST_ASSERT(old_res != 0, "Expecting nmsrs to be > 0"); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 6b4feb92dc79..6a6bc7af0e28 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -307,6 +307,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) { return kvm_make_all_cpus_request_except(kvm, req, NULL); } +EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL void kvm_flush_remote_tlbs(struct kvm *kvm) @@ -2929,6 +2930,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) goto out; if (signal_pending(current)) goto out; + if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu)) + goto out; ret = 0; out: @@ -2973,8 +2976,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) goto out; } poll_end = cur = ktime_get(); - } while (single_task_running() && !need_resched() && - ktime_before(cur, stop)); + } while (kvm_vcpu_can_poll(cur, stop)); } prepare_to_rcuwait(&vcpu->wait); diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c index c9bb3957f58a..28fda42e471b 100644 --- a/virt/lib/irqbypass.c +++ b/virt/lib/irqbypass.c @@ -40,21 +40,17 @@ static int __connect(struct irq_bypass_producer *prod, if (prod->add_consumer) ret = prod->add_consumer(prod, cons); - if (ret) - goto err_add_consumer; - - ret = cons->add_producer(cons, prod); - if (ret) - goto err_add_producer; + if (!ret) { + ret = cons->add_producer(cons, prod); + if (ret && prod->del_consumer) + prod->del_consumer(prod, cons); + } if (cons->start) cons->start(cons); if (prod->start) prod->start(prod); -err_add_producer: - if (prod->del_consumer) - prod->del_consumer(prod, cons); -err_add_consumer: + return ret; } |