diff options
-rw-r--r-- | .clang-format | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 21 | ||||
-rw-r--r-- | arch/powerpc/mm/nohash/tlb.c | 1 | ||||
-rw-r--r-- | arch/x86/hyperv/mmu.c | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/bootparam_utils.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/uaccess.h | 4 | ||||
-rw-r--r-- | drivers/net/virtio_net.c | 2 | ||||
-rw-r--r-- | drivers/vhost/test.c | 13 | ||||
-rw-r--r-- | drivers/vhost/vhost.c | 520 | ||||
-rw-r--r-- | drivers/vhost/vhost.h | 41 | ||||
-rw-r--r-- | fs/nfs/inode.c | 18 | ||||
-rw-r--r-- | kernel/sched/core.c | 78 | ||||
-rw-r--r-- | kernel/sched/fair.c | 5 | ||||
-rw-r--r-- | mm/balloon_compaction.c | 3 | ||||
-rw-r--r-- | security/keys/request_key_auth.c | 6 | ||||
-rw-r--r-- | sound/pci/hda/hda_auto_parser.c | 4 | ||||
-rw-r--r-- | sound/pci/hda/hda_generic.c | 3 | ||||
-rw-r--r-- | sound/pci/hda/hda_generic.h | 1 | ||||
-rw-r--r-- | sound/pci/hda/patch_realtek.c | 17 |
19 files changed, 126 insertions, 637 deletions
diff --git a/.clang-format b/.clang-format index 2ffd69afc1a8..196ca317bd1f 100644 --- a/.clang-format +++ b/.clang-format @@ -107,10 +107,13 @@ ForEachMacros: - 'css_for_each_descendant_post' - 'css_for_each_descendant_pre' - 'device_for_each_child_node' + - 'dma_fence_chain_for_each' - 'drm_atomic_crtc_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane' - 'drm_atomic_crtc_state_for_each_plane_state' - 'drm_atomic_for_each_plane_damage' + - 'drm_client_for_each_connector_iter' + - 'drm_client_for_each_modeset' - 'drm_connector_for_each_possible_encoder' - 'drm_for_each_connector_iter' - 'drm_for_each_crtc' @@ -126,6 +129,7 @@ ForEachMacros: - 'drm_mm_for_each_node_in_range' - 'drm_mm_for_each_node_safe' - 'flow_action_for_each' + - 'for_each_active_dev_scope' - 'for_each_active_drhd_unit' - 'for_each_active_iommu' - 'for_each_available_child_of_node' @@ -153,6 +157,8 @@ ForEachMacros: - 'for_each_cpu_not' - 'for_each_cpu_wrap' - 'for_each_dev_addr' + - 'for_each_dev_scope' + - 'for_each_displayid_db' - 'for_each_dma_cap_mask' - 'for_each_dpcm_be' - 'for_each_dpcm_be_rollback' @@ -169,6 +175,8 @@ ForEachMacros: - 'for_each_evictable_lru' - 'for_each_fib6_node_rt_rcu' - 'for_each_fib6_walker_rt' + - 'for_each_free_mem_pfn_range_in_zone' + - 'for_each_free_mem_pfn_range_in_zone_from' - 'for_each_free_mem_range' - 'for_each_free_mem_range_reverse' - 'for_each_func_rsrc' @@ -178,6 +186,7 @@ ForEachMacros: - 'for_each_ip_tunnel_rcu' - 'for_each_irq_nr' - 'for_each_link_codecs' + - 'for_each_link_platforms' - 'for_each_lru' - 'for_each_matching_node' - 'for_each_matching_node_and_match' @@ -302,7 +311,10 @@ ForEachMacros: - 'ide_port_for_each_present_dev' - 'idr_for_each_entry' - 'idr_for_each_entry_continue' + - 'idr_for_each_entry_continue_ul' - 'idr_for_each_entry_ul' + - 'in_dev_for_each_ifa_rcu' + - 'in_dev_for_each_ifa_rtnl' - 'inet_bind_bucket_for_each' - 'inet_lhash2_for_each_icsk_rcu' - 'key_for_each' @@ -343,8 +355,6 @@ ForEachMacros: - 'media_device_for_each_intf' - 'media_device_for_each_link' - 'media_device_for_each_pad' - - 'mp_bvec_for_each_page' - - 'mp_bvec_for_each_segment' - 'nanddev_io_for_each_page' - 'netdev_for_each_lower_dev' - 'netdev_for_each_lower_private' @@ -381,18 +391,19 @@ ForEachMacros: - 'radix_tree_for_each_slot' - 'radix_tree_for_each_tagged' - 'rbtree_postorder_for_each_entry_safe' + - 'rdma_for_each_block' - 'rdma_for_each_port' - 'resource_list_for_each_entry' - 'resource_list_for_each_entry_safe' - 'rhl_for_each_entry_rcu' - 'rhl_for_each_rcu' - 'rht_for_each' - - 'rht_for_each_from' - 'rht_for_each_entry' - 'rht_for_each_entry_from' - 'rht_for_each_entry_rcu' - 'rht_for_each_entry_rcu_from' - 'rht_for_each_entry_safe' + - 'rht_for_each_from' - 'rht_for_each_rcu' - 'rht_for_each_rcu_from' - '__rq_for_each_bio' diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8fc4de0d22b4..7a84c9f1778e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk) } } -static bool tm_active_with_fp(struct task_struct *tsk) -{ - return MSR_TM_ACTIVE(tsk->thread.regs->msr) && - (tsk->thread.ckpt_regs.msr & MSR_FP); -} - -static bool tm_active_with_altivec(struct task_struct *tsk) -{ - return MSR_TM_ACTIVE(tsk->thread.regs->msr) && - (tsk->thread.ckpt_regs.msr & MSR_VEC); -} #else static inline void check_if_tm_restore_required(struct task_struct *tsk) { } -static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; } -static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; } #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ bool strict_msr_control; @@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp); static int restore_fp(struct task_struct *tsk) { - if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { + if (tsk->thread.load_fp) { load_fp_state(¤t->thread.fp_state); current->thread.load_fp++; return 1; @@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); static int restore_altivec(struct task_struct *tsk) { - if (cpu_has_feature(CPU_FTR_ALTIVEC) && - (tsk->thread.load_vec || tm_active_with_altivec(tsk))) { + if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) { load_vr_state(&tsk->thread.vr_state); tsk->thread.used_vr = 1; tsk->thread.load_vec++; @@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk) if (!tsk->thread.regs) return; + check_if_tm_restore_required(tsk); + usermsr = tsk->thread.regs->msr; if ((usermsr & msr_all_available) == 0) return; msr_check_and_set(msr_all_available); - check_if_tm_restore_required(tsk); WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c index d4acf6fa0596..bf60983a58c7 100644 --- a/arch/powerpc/mm/nohash/tlb.c +++ b/arch/powerpc/mm/nohash/tlb.c @@ -630,7 +630,6 @@ static void early_init_this_mmu(void) #ifdef CONFIG_PPC_FSL_BOOK3E if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { unsigned int num_cams; - int __maybe_unused cpu = smp_processor_id(); bool map = true; /* use a quarter of the TLBCAM for bolted linear map */ diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index e65d7fe6489f..5208ba49c89a 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c @@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset, * Lower 12 bits encode the number of additional * pages to flush (in addition to the 'cur' page). */ - if (diff >= HV_TLB_FLUSH_UNIT) + if (diff >= HV_TLB_FLUSH_UNIT) { gva_list[gva_n] |= ~PAGE_MASK; - else if (diff) + cur += HV_TLB_FLUSH_UNIT; + } else if (diff) { gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; + cur = end; + } - cur += HV_TLB_FLUSH_UNIT; gva_n++; } while (cur < end); diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 9e5f3c722c33..981fe923a59f 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params) BOOT_PARAM_PRESERVE(eddbuf_entries), BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), + BOOT_PARAM_PRESERVE(secure_boot), BOOT_PARAM_PRESERVE(hdr), BOOT_PARAM_PRESERVE(e820_table), BOOT_PARAM_PRESERVE(eddbuf), diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 9c4435307ff8..35c225ede0e4 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -444,8 +444,10 @@ __pu_label: \ ({ \ int __gu_err; \ __inttype(*(ptr)) __gu_val; \ + __typeof__(ptr) __gu_ptr = (ptr); \ + __typeof__(size) __gu_size = (size); \ __uaccess_begin_nospec(); \ - __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ + __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \ __uaccess_end(); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __builtin_expect(__gu_err, 0); \ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4f3de0ac8b0b..ba98e0971b84 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, } } - if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { + if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { if (!try_fill_recv(vi, rq, GFP_ATOMIC)) schedule_delayed_work(&vi->refill, 0); } diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c index 9e90e969af55..7804869c6a31 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -22,6 +22,12 @@ * Using this limit prevents one virtqueue from starving others. */ #define VHOST_TEST_WEIGHT 0x80000 +/* Max number of packets transferred before requeueing the job. + * Using this limit prevents one virtqueue from starving others with + * pkts. + */ +#define VHOST_TEST_PKT_WEIGHT 256 + enum { VHOST_TEST_VQ = 0, VHOST_TEST_VQ_MAX = 1, @@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n) } vhost_add_used_and_signal(&n->dev, vq, head, 0); total_len += len; - if (unlikely(total_len >= VHOST_TEST_WEIGHT)) { - vhost_poll_queue(&vq->poll); + if (unlikely(vhost_exceeds_weight(vq, 0, total_len))) break; - } } mutex_unlock(&vq->mutex); @@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f) dev = &n->dev; vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; - vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); + vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV, + VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT); f->private_data = n; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 0536f8526359..5dc174ac8cac 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init); int vhost_poll_start(struct vhost_poll *poll, struct file *file) { __poll_t mask; - int ret = 0; if (poll->wqh) return 0; @@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); if (mask & EPOLLERR) { vhost_poll_stop(poll); - ret = -EINVAL; + return -EINVAL; } - return ret; + return 0; } EXPORT_SYMBOL_GPL(vhost_poll_start); @@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) __vhost_vq_meta_reset(d->vqs[i]); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_map_unprefetch(struct vhost_map *map) -{ - kfree(map->pages); - map->pages = NULL; - map->npages = 0; - map->addr = NULL; -} - -static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) -{ - struct vhost_map *map[VHOST_NUM_ADDRS]; - int i; - - spin_lock(&vq->mmu_lock); - for (i = 0; i < VHOST_NUM_ADDRS; i++) { - map[i] = rcu_dereference_protected(vq->maps[i], - lockdep_is_held(&vq->mmu_lock)); - if (map[i]) - rcu_assign_pointer(vq->maps[i], NULL); - } - spin_unlock(&vq->mmu_lock); - - synchronize_rcu(); - - for (i = 0; i < VHOST_NUM_ADDRS; i++) - if (map[i]) - vhost_map_unprefetch(map[i]); - -} - -static void vhost_reset_vq_maps(struct vhost_virtqueue *vq) -{ - int i; - - vhost_uninit_vq_maps(vq); - for (i = 0; i < VHOST_NUM_ADDRS; i++) - vq->uaddrs[i].size = 0; -} - -static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, - unsigned long start, - unsigned long end) -{ - if (unlikely(!uaddr->size)) - return false; - - return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); -} - -static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq, - int index, - unsigned long start, - unsigned long end) -{ - struct vhost_uaddr *uaddr = &vq->uaddrs[index]; - struct vhost_map *map; - int i; - - if (!vhost_map_range_overlap(uaddr, start, end)) - return; - - spin_lock(&vq->mmu_lock); - ++vq->invalidate_count; - - map = rcu_dereference_protected(vq->maps[index], - lockdep_is_held(&vq->mmu_lock)); - if (map) { - if (uaddr->write) { - for (i = 0; i < map->npages; i++) - set_page_dirty(map->pages[i]); - } - rcu_assign_pointer(vq->maps[index], NULL); - } - spin_unlock(&vq->mmu_lock); - - if (map) { - synchronize_rcu(); - vhost_map_unprefetch(map); - } -} - -static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq, - int index, - unsigned long start, - unsigned long end) -{ - if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end)) - return; - - spin_lock(&vq->mmu_lock); - --vq->invalidate_count; - spin_unlock(&vq->mmu_lock); -} - -static int vhost_invalidate_range_start(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct vhost_dev *dev = container_of(mn, struct vhost_dev, - mmu_notifier); - int i, j; - - if (!mmu_notifier_range_blockable(range)) - return -EAGAIN; - - for (i = 0; i < dev->nvqs; i++) { - struct vhost_virtqueue *vq = dev->vqs[i]; - - for (j = 0; j < VHOST_NUM_ADDRS; j++) - vhost_invalidate_vq_start(vq, j, - range->start, - range->end); - } - - return 0; -} - -static void vhost_invalidate_range_end(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct vhost_dev *dev = container_of(mn, struct vhost_dev, - mmu_notifier); - int i, j; - - for (i = 0; i < dev->nvqs; i++) { - struct vhost_virtqueue *vq = dev->vqs[i]; - - for (j = 0; j < VHOST_NUM_ADDRS; j++) - vhost_invalidate_vq_end(vq, j, - range->start, - range->end); - } -} - -static const struct mmu_notifier_ops vhost_mmu_notifier_ops = { - .invalidate_range_start = vhost_invalidate_range_start, - .invalidate_range_end = vhost_invalidate_range_end, -}; - -static void vhost_init_maps(struct vhost_dev *dev) -{ - struct vhost_virtqueue *vq; - int i, j; - - dev->mmu_notifier.ops = &vhost_mmu_notifier_ops; - - for (i = 0; i < dev->nvqs; ++i) { - vq = dev->vqs[i]; - for (j = 0; j < VHOST_NUM_ADDRS; j++) - RCU_INIT_POINTER(vq->maps[j], NULL); - } -} -#endif - static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; - vq->invalidate_count = 0; __vhost_vq_meta_reset(vq); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_reset_vq_maps(vq); -#endif } static int vhost_worker(void *data) @@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev, INIT_LIST_HEAD(&dev->read_list); INIT_LIST_HEAD(&dev->pending_list); spin_lock_init(&dev->iotlb_lock); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_init_maps(dev); -#endif + for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; @@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev, vq->heads = NULL; vq->dev = dev; mutex_init(&vq->mutex); - spin_lock_init(&vq->mmu_lock); vhost_vq_reset(dev, vq); if (vq->handle_kick) vhost_poll_init(&vq->poll, vq->handle_kick, @@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev) if (err) goto err_cgroup; -#if VHOST_ARCH_CAN_ACCEL_UACCESS - err = mmu_notifier_register(&dev->mmu_notifier, dev->mm); - if (err) - goto err_mmu_notifier; -#endif - return 0; - -#if VHOST_ARCH_CAN_ACCEL_UACCESS -err_mmu_notifier: - vhost_dev_free_iovecs(dev); -#endif err_cgroup: kthread_stop(worker); dev->worker = NULL; @@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev) spin_unlock(&dev->iotlb_lock); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_setup_uaddr(struct vhost_virtqueue *vq, - int index, unsigned long uaddr, - size_t size, bool write) -{ - struct vhost_uaddr *addr = &vq->uaddrs[index]; - - addr->uaddr = uaddr; - addr->size = size; - addr->write = write; -} - -static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq) -{ - vhost_setup_uaddr(vq, VHOST_ADDR_DESC, - (unsigned long)vq->desc, - vhost_get_desc_size(vq, vq->num), - false); - vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL, - (unsigned long)vq->avail, - vhost_get_avail_size(vq, vq->num), - false); - vhost_setup_uaddr(vq, VHOST_ADDR_USED, - (unsigned long)vq->used, - vhost_get_used_size(vq, vq->num), - true); -} - -static int vhost_map_prefetch(struct vhost_virtqueue *vq, - int index) -{ - struct vhost_map *map; - struct vhost_uaddr *uaddr = &vq->uaddrs[index]; - struct page **pages; - int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE); - int npinned; - void *vaddr, *v; - int err; - int i; - - spin_lock(&vq->mmu_lock); - - err = -EFAULT; - if (vq->invalidate_count) - goto err; - - err = -ENOMEM; - map = kmalloc(sizeof(*map), GFP_ATOMIC); - if (!map) - goto err; - - pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC); - if (!pages) - goto err_pages; - - err = EFAULT; - npinned = __get_user_pages_fast(uaddr->uaddr, npages, - uaddr->write, pages); - if (npinned > 0) - release_pages(pages, npinned); - if (npinned != npages) - goto err_gup; - - for (i = 0; i < npinned; i++) - if (PageHighMem(pages[i])) - goto err_gup; - - vaddr = v = page_address(pages[0]); - - /* For simplicity, fallback to userspace address if VA is not - * contigious. - */ - for (i = 1; i < npinned; i++) { - v += PAGE_SIZE; - if (v != page_address(pages[i])) - goto err_gup; - } - - map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1)); - map->npages = npages; - map->pages = pages; - - rcu_assign_pointer(vq->maps[index], map); - /* No need for a synchronize_rcu(). This function should be - * called by dev->worker so we are serialized with all - * readers. - */ - spin_unlock(&vq->mmu_lock); - - return 0; - -err_gup: - kfree(pages); -err_pages: - kfree(map); -err: - spin_unlock(&vq->mmu_lock); - return err; -} -#endif - void vhost_dev_cleanup(struct vhost_dev *dev) { int i; @@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev) kthread_stop(dev->worker); dev->worker = NULL; } - if (dev->mm) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - mmu_notifier_unregister(&dev->mmu_notifier, dev->mm); -#endif + if (dev->mm) mmput(dev->mm); - } -#if VHOST_ARCH_CAN_ACCEL_UACCESS - for (i = 0; i < dev->nvqs; i++) - vhost_uninit_vq_maps(dev->vqs[i]); -#endif dev->mm = NULL; } EXPORT_SYMBOL_GPL(vhost_dev_cleanup); @@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - *((__virtio16 *)&used->ring[vq->num]) = - cpu_to_vhost16(vq, vq->avail_idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)); } @@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq, struct vring_used_elem *head, int idx, int count) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - size_t size; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - size = count * sizeof(*head); - memcpy(used->ring + idx, head, size); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_copy_to_user(vq, vq->used->ring + idx, head, count * sizeof(*head)); } @@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq, static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - used->flags = cpu_to_vhost16(vq, vq->used_flags); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags); } @@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - used->idx = cpu_to_vhost16(vq, vq->last_used_idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx); } @@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d) static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, __virtio16 *idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *idx = avail->idx; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *idx, &vq->avail->idx); } static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, __virtio16 *head, int idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *head = avail->ring[idx & (vq->num - 1)]; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *head, &vq->avail->ring[idx & (vq->num - 1)]); } @@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, __virtio16 *flags) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *flags = avail->flags; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *flags, &vq->avail->flags); } static inline int vhost_get_used_event(struct vhost_virtqueue *vq, __virtio16 *event) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_avail *avail; - - if (!vq->iotlb) { - rcu_read_lock(); - map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]); - if (likely(map)) { - avail = map->addr; - *event = (__virtio16)avail->ring[vq->num]; - rcu_read_unlock(); - return 0; - } - rcu_read_unlock(); - } -#endif - return vhost_get_avail(vq, *event, vhost_used_event(vq)); } static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, __virtio16 *idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_used *used; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_USED]); - if (likely(map)) { - used = map->addr; - *idx = used->idx; - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_get_used(vq, *idx, &vq->used->idx); } static inline int vhost_get_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, int idx) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - struct vhost_map *map; - struct vring_desc *d; - - if (!vq->iotlb) { - rcu_read_lock(); - - map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]); - if (likely(map)) { - d = map->addr; - *desc = *(d + idx); - rcu_read_unlock(); - return 0; - } - - rcu_read_unlock(); - } -#endif - return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); } @@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq, return true; } -#if VHOST_ARCH_CAN_ACCEL_UACCESS -static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq) -{ - struct vhost_map __rcu *map; - int i; - - for (i = 0; i < VHOST_NUM_ADDRS; i++) { - rcu_read_lock(); - map = rcu_dereference(vq->maps[i]); - rcu_read_unlock(); - if (unlikely(!map)) - vhost_map_prefetch(vq, i); - } -} -#endif - int vq_meta_prefetch(struct vhost_virtqueue *vq) { unsigned int num = vq->num; - if (!vq->iotlb) { -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_vq_map_prefetch(vq); -#endif + if (!vq->iotlb) return 1; - } return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && @@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d, mutex_lock(&vq->mutex); -#if VHOST_ARCH_CAN_ACCEL_UACCESS - /* Unregister MMU notifer to allow invalidation callback - * can access vq->uaddrs[] without holding a lock. - */ - if (d->mm) - mmu_notifier_unregister(&d->mmu_notifier, d->mm); - - vhost_uninit_vq_maps(vq); -#endif - switch (ioctl) { case VHOST_SET_VRING_NUM: r = vhost_vring_set_num(d, vq, argp); @@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d, BUG(); } -#if VHOST_ARCH_CAN_ACCEL_UACCESS - vhost_setup_vq_uaddr(vq); - - if (d->mm) - mmu_notifier_register(&d->mmu_notifier, d->mm); -#endif - mutex_unlock(&vq->mutex); return r; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 42a8c2a13ab1..e9ed2722b633 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -12,9 +12,6 @@ #include <linux/virtio_config.h> #include <linux/virtio_ring.h> #include <linux/atomic.h> -#include <linux/pagemap.h> -#include <linux/mmu_notifier.h> -#include <asm/cacheflush.h> struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); @@ -83,24 +80,6 @@ enum vhost_uaddr_type { VHOST_NUM_ADDRS = 3, }; -struct vhost_map { - int npages; - void *addr; - struct page **pages; -}; - -struct vhost_uaddr { - unsigned long uaddr; - size_t size; - bool write; -}; - -#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 -#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 -#else -#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 -#endif - /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; @@ -111,22 +90,7 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; - -#if VHOST_ARCH_CAN_ACCEL_UACCESS - /* Read by memory accessors, modified by meta data - * prefetching, MMU notifier and vring ioctl(). - * Synchonrized through mmu_lock (writers) and RCU (writers - * and readers). - */ - struct vhost_map __rcu *maps[VHOST_NUM_ADDRS]; - /* Read by MMU notifier, modified by vring ioctl(), - * synchronized through MMU notifier - * registering/unregistering. - */ - struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS]; -#endif const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; - struct file *kick; struct eventfd_ctx *call_ctx; struct eventfd_ctx *error_ctx; @@ -181,8 +145,6 @@ struct vhost_virtqueue { bool user_be; #endif u32 busyloop_timeout; - spinlock_t mmu_lock; - int invalidate_count; }; struct vhost_msg_node { @@ -196,9 +158,6 @@ struct vhost_msg_node { struct vhost_dev { struct mm_struct *mm; -#ifdef CONFIG_MMU_NOTIFIER - struct mmu_notifier mmu_notifier; -#endif struct mutex mutex; struct vhost_virtqueue **vqs; int nvqs; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index c764cfe456e5..2a03bfeec10a 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1403,11 +1403,12 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) return 0; - /* No fileid? Just exit */ - if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) - return 0; + if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) { + /* Only a mounted-on-fileid? Just exit */ + if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) + return 0; /* Has the inode gone and changed behind our back? */ - if (nfsi->fileid != fattr->fileid) { + } else if (nfsi->fileid != fattr->fileid) { /* Is this perhaps the mounted-on fileid? */ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) && nfsi->fileid == fattr->mounted_on_fileid) @@ -1807,11 +1808,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) nfs_display_fhandle_hash(NFS_FH(inode)), atomic_read(&inode->i_count), fattr->valid); - /* No fileid? Just exit */ - if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) - return 0; + if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) { + /* Only a mounted-on-fileid? Just exit */ + if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) + return 0; /* Has the inode gone and changed behind our back? */ - if (nfsi->fileid != fattr->fileid) { + } else if (nfsi->fileid != fattr->fileid) { /* Is this perhaps the mounted-on fileid? */ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) && nfsi->fileid == fattr->mounted_on_fileid) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 010d578118d6..df9f1fe5689b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5105,37 +5105,40 @@ out_unlock: return retval; } -static int sched_read_attr(struct sched_attr __user *uattr, - struct sched_attr *attr, - unsigned int usize) +/* + * Copy the kernel size attribute structure (which might be larger + * than what user-space knows about) to user-space. + * + * Note that all cases are valid: user-space buffer can be larger or + * smaller than the kernel-space buffer. The usual case is that both + * have the same size. + */ +static int +sched_attr_copy_to_user(struct sched_attr __user *uattr, + struct sched_attr *kattr, + unsigned int usize) { - int ret; + unsigned int ksize = sizeof(*kattr); if (!access_ok(uattr, usize)) return -EFAULT; /* - * If we're handed a smaller struct than we know of, - * ensure all the unknown bits are 0 - i.e. old - * user-space does not get uncomplete information. + * sched_getattr() ABI forwards and backwards compatibility: + * + * If usize == ksize then we just copy everything to user-space and all is good. + * + * If usize < ksize then we only copy as much as user-space has space for, + * this keeps ABI compatibility as well. We skip the rest. + * + * If usize > ksize then user-space is using a newer version of the ABI, + * which part the kernel doesn't know about. Just ignore it - tooling can + * detect the kernel's knowledge of attributes from the attr->size value + * which is set to ksize in this case. */ - if (usize < sizeof(*attr)) { - unsigned char *addr; - unsigned char *end; + kattr->size = min(usize, ksize); - addr = (void *)attr + usize; - end = (void *)attr + sizeof(*attr); - - for (; addr < end; addr++) { - if (*addr) - return -EFBIG; - } - - attr->size = usize; - } - - ret = copy_to_user(uattr, attr, attr->size); - if (ret) + if (copy_to_user(uattr, kattr, kattr->size)) return -EFAULT; return 0; @@ -5145,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr, * sys_sched_getattr - similar to sched_getparam, but with sched_attr * @pid: the pid in question. * @uattr: structure containing the extended parameters. - * @size: sizeof(attr) for fwd/bwd comp. + * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility. * @flags: for future extension. */ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, - unsigned int, size, unsigned int, flags) + unsigned int, usize, unsigned int, flags) { - struct sched_attr attr = { - .size = sizeof(struct sched_attr), - }; + struct sched_attr kattr = { }; struct task_struct *p; int retval; - if (!uattr || pid < 0 || size > PAGE_SIZE || - size < SCHED_ATTR_SIZE_VER0 || flags) + if (!uattr || pid < 0 || usize > PAGE_SIZE || + usize < SCHED_ATTR_SIZE_VER0 || flags) return -EINVAL; rcu_read_lock(); @@ -5171,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, if (retval) goto out_unlock; - attr.sched_policy = p->policy; + kattr.sched_policy = p->policy; if (p->sched_reset_on_fork) - attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; + kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; if (task_has_dl_policy(p)) - __getparam_dl(p, &attr); + __getparam_dl(p, &kattr); else if (task_has_rt_policy(p)) - attr.sched_priority = p->rt_priority; + kattr.sched_priority = p->rt_priority; else - attr.sched_nice = task_nice(p); + kattr.sched_nice = task_nice(p); #ifdef CONFIG_UCLAMP_TASK - attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; - attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; + kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; + kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; #endif rcu_read_unlock(); - retval = sched_read_attr(uattr, &attr, size); - return retval; + return sched_attr_copy_to_user(uattr, &kattr, usize); out_unlock: rcu_read_unlock(); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bc9cfeaac8bd..500f5db0de0b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) if (likely(cfs_rq->runtime_remaining > 0)) return; + if (cfs_rq->throttled) + return; /* * if we're unable to extend our runtime we resched so that the active * hierarchy can be throttled @@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, if (!cfs_rq_throttled(cfs_rq)) goto next; + /* By the above check, this should never be true */ + SCHED_WARN_ON(cfs_rq->runtime_remaining > 0); + runtime = -cfs_rq->runtime_remaining + 1; if (runtime > remaining) runtime = remaining; diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 798275a51887..26de020aae7b 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); struct page *balloon_page_alloc(void) { struct page *page = alloc_page(balloon_mapping_gfp_mask() | - __GFP_NOMEMALLOC | __GFP_NORETRY); + __GFP_NOMEMALLOC | __GFP_NORETRY | + __GFP_NOWARN); return page; } EXPORT_SYMBOL_GPL(balloon_page_alloc); diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index e73ec040e250..ecba39c93fd9 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key, { struct request_key_auth *rka = dereference_key_rcu(key); + if (!rka) + return; + seq_puts(m, "key:"); seq_puts(m, key->description); if (key_is_positive(key)) @@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key, size_t datalen; long ret; + if (!rka) + return -EKEYREVOKED; + datalen = rka->callout_len; ret = datalen; diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 92390d457567..18e6546b4467 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -824,6 +824,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) while (id >= 0) { const struct hda_fixup *fix = codec->fixup_list + id; + if (++depth > 10) + break; if (fix->chained_before) apply_fixup(codec, fix->chain_id, action, depth + 1); @@ -863,8 +865,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth) } if (!fix->chained || fix->chained_before) break; - if (++depth > 10) - break; id = fix->chain_id; } } diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 5bf24fb819d2..10d502328b76 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -6009,7 +6009,8 @@ int snd_hda_gen_init(struct hda_codec *codec) if (spec->init_hook) spec->init_hook(codec); - snd_hda_apply_verbs(codec); + if (!spec->skip_verbs) + snd_hda_apply_verbs(codec); init_multi_out(codec); init_extra_out(codec); diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 5f199dcb0d18..fb9f1a90238b 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -243,6 +243,7 @@ struct hda_gen_spec { unsigned int indep_hp_enabled:1; /* independent HP enabled */ unsigned int have_aamix_ctl:1; unsigned int hp_mic_jack_modes:1; + unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */ /* additional mute flags (only effective with auto_mute_via_amp=1) */ u64 mute_bits; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e333b3e30e31..c1ddfd2fac52 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -837,9 +837,11 @@ static int alc_init(struct hda_codec *codec) if (spec->init_hook) spec->init_hook(codec); + spec->gen.skip_verbs = 1; /* applied in below */ snd_hda_gen_init(codec); alc_fix_pll(codec); alc_auto_init_amp(codec, spec->init_amp); + snd_hda_apply_verbs(codec); /* apply verbs here after own init */ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT); @@ -5797,6 +5799,7 @@ enum { ALC286_FIXUP_ACER_AIO_HEADSET_MIC, ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, ALC299_FIXUP_PREDATOR_SPK, + ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC, }; static const struct hda_fixup alc269_fixups[] = { @@ -6837,6 +6840,16 @@ static const struct hda_fixup alc269_fixups[] = { { } } }, + [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x411111f0 }, /* disable confusing internal speaker */ + { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -6979,6 +6992,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -6995,6 +7009,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), @@ -7072,6 +7087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), @@ -8946,6 +8962,7 @@ static int patch_alc680(struct hda_codec *codec) static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269), HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269), HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269), HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269), HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), |