From da142f3d373a6ddaca0119615a8db2175ddc4121 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 5 Dec 2025 15:26:55 -0800 Subject: KVM: Remove subtle "struct kvm_stats_desc" pseudo-overlay Remove KVM's internal pseudo-overlay of kvm_stats_desc, which subtly aliases the flexible name[] in the uAPI definition with a fixed-size array of the same name. The unusual embedded structure results in compiler warnings due to -Wflex-array-member-not-at-end, and also necessitates an extra level of dereferencing in KVM. To avoid the "overlay", define the uAPI structure to have a fixed-size name when building for the kernel. Opportunistically clean up the indentation for the stats macros, and replace spaces with tabs. No functional change intended. Reported-by: Gustavo A. R. Silva Closes: https://lore.kernel.org/all/aPfNKRpLfhmhYqfP@kspp Acked-by: Marc Zyngier Acked-by: Christian Borntraeger [..] Acked-by: Anup Patel Reviewed-by: Bibo Mao Acked-by: Gustavo A. R. Silva Link: https://patch.msgid.link/20251205232655.445294-1-seanjc@google.com Signed-off-by: Sean Christopherson --- include/linux/kvm_host.h | 83 ++++++++++++++++++++---------------------------- include/uapi/linux/kvm.h | 8 +++++ 2 files changed, 43 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index d93f75b05ae2..7428d9949382 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1927,56 +1927,43 @@ enum kvm_stat_kind { struct kvm_stat_data { struct kvm *kvm; - const struct _kvm_stats_desc *desc; + const struct kvm_stats_desc *desc; enum kvm_stat_kind kind; }; -struct _kvm_stats_desc { - struct kvm_stats_desc desc; - char name[KVM_STATS_NAME_SIZE]; -}; - -#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ - .flags = type | unit | base | \ - BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ - BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ - BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ - .exponent = exp, \ - .size = sz, \ +#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ + .flags = type | unit | base | \ + BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ + BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ + BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ + .exponent = exp, \ + .size = sz, \ .bucket_size = bsz -#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ - { \ - { \ - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ - .offset = offsetof(struct kvm_vm_stat, generic.stat) \ - }, \ - .name = #stat, \ - } -#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ - { \ - { \ - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ - .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ - }, \ - .name = #stat, \ - } -#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ - { \ - { \ - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ - .offset = offsetof(struct kvm_vm_stat, stat) \ - }, \ - .name = #stat, \ - } -#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ - { \ - { \ - STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ - .offset = offsetof(struct kvm_vcpu_stat, stat) \ - }, \ - .name = #stat, \ - } +#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ +{ \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ + .offset = offsetof(struct kvm_vm_stat, generic.stat), \ + .name = #stat, \ +} +#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ +{ \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ + .offset = offsetof(struct kvm_vcpu_stat, generic.stat), \ + .name = #stat, \ +} +#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ +{ \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ + .offset = offsetof(struct kvm_vm_stat, stat), \ + .name = #stat, \ +} +#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ +{ \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ + .offset = offsetof(struct kvm_vcpu_stat, stat), \ + .name = #stat, \ +} /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ #define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) @@ -2053,7 +2040,7 @@ struct _kvm_stats_desc { STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, - const struct _kvm_stats_desc *desc, + const struct kvm_stats_desc *desc, void *stats, size_t size_stats, char __user *user_buffer, size_t size, loff_t *offset); @@ -2098,9 +2085,9 @@ static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) extern const struct kvm_stats_header kvm_vm_stats_header; -extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; +extern const struct kvm_stats_desc kvm_vm_stats_desc[]; extern const struct kvm_stats_header kvm_vcpu_stats_header; -extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; +extern const struct kvm_stats_desc kvm_vcpu_stats_desc[]; #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index dddb781b0507..76bd54848b11 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -14,6 +14,10 @@ #include #include +#ifdef __KERNEL__ +#include +#endif + #define KVM_API_VERSION 12 /* @@ -1579,7 +1583,11 @@ struct kvm_stats_desc { __u16 size; __u32 offset; __u32 bucket_size; +#ifdef __KERNEL__ + char name[KVM_STATS_NAME_SIZE]; +#else char name[]; +#endif }; #define KVM_GET_STATS_FD _IO(KVMIO, 0xce) -- cgit v1.2.3 From fa655a9ca73f7df32b8ca4d14ce11742f9578288 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 3 Mar 2026 22:31:01 +0100 Subject: nvme: Annotate struct nvme_dhchap_key with __counted_by Add the __counted_by() compiler attribute to the flexible array member 'key' to improve access bounds-checking via CONFIG_UBSAN_BOUNDS and CONFIG_FORTIFY_SOURCE. Reviewed-by: Christoph Hellwig Signed-off-by: Thorsten Blum Signed-off-by: Keith Busch --- include/linux/nvme-auth.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h index 60e069a6757f..e75c29c51464 100644 --- a/include/linux/nvme-auth.h +++ b/include/linux/nvme-auth.h @@ -11,7 +11,7 @@ struct nvme_dhchap_key { size_t len; u8 hash; - u8 key[]; + u8 key[] __counted_by(len); }; u32 nvme_auth_get_seqnum(void); -- cgit v1.2.3 From 22fd7f7fed2ae3702f90d1985c326354e86b9c75 Mon Sep 17 00:00:00 2001 From: Muhammad Amirul Asyraf Mohamad Jamian Date: Thu, 5 Mar 2026 01:31:51 -0800 Subject: firmware: stratix10-svc: Add Multi SVC clients support In the current implementation, SVC client drivers such as socfpga-hwmon, intel_fcs, stratix10-soc, stratix10-rsu each send an SMC command that triggers a single thread in the stratix10-svc driver. Upon receiving a callback, the initiating client driver sends a stratix10-svc-done signal, terminating the thread without waiting for other pending SMC commands to complete. This leads to a timeout issue in the firmware SVC mailbox service when multiple client drivers send SMC commands concurrently. To resolve this issue, a dedicated thread is now created per channel. The stratix10-svc driver will support up to the number of channels defined by SVC_NUM_CHANNEL. Thread synchronization is handled using a mutex to prevent simultaneous issuance of SMC commands by multiple threads. SVC_NUM_DATA_IN_FIFO is reduced from 32 to 8, since each channel now has its own dedicated FIFO and the SDM processes commands one at a time. 8 entries per channel is sufficient while keeping the total aggregate capacity the same (4 channels x 8 = 32 entries). Additionally, a thread task is now validated before invoking kthread_stop when the user aborts, ensuring safe termination. Timeout values have also been adjusted to accommodate the increased load from concurrent client driver activity. Fixes: 7ca5ce896524 ("firmware: add Intel Stratix10 service layer driver") Cc: stable@vger.kernel.org Signed-off-by: Ang Tien Sung Signed-off-by: Fong, Yan Kei Signed-off-by: Muhammad Amirul Asyraf Mohamad Jamian Link: https://lore.kernel.org/all/20260305093151.2678-1-muhammad.amirul.asyraf.mohamad.jamian@altera.com Signed-off-by: Dinh Nguyen --- drivers/firmware/stratix10-svc.c | 228 ++++++++++++--------- .../linux/firmware/intel/stratix10-svc-client.h | 8 +- 2 files changed, 130 insertions(+), 106 deletions(-) (limited to 'include') diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index 6f5c298582ab..e9e35d67ef96 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -37,15 +37,14 @@ * service layer will return error to FPGA manager when timeout occurs, * timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC. */ -#define SVC_NUM_DATA_IN_FIFO 32 +#define SVC_NUM_DATA_IN_FIFO 8 #define SVC_NUM_CHANNEL 4 -#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200 +#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 2000 #define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30 #define BYTE_TO_WORD_SIZE 4 /* stratix10 service layer clients */ #define STRATIX10_RSU "stratix10-rsu" -#define INTEL_FCS "intel-fcs" /* Maximum number of SDM client IDs. */ #define MAX_SDM_CLIENT_IDS 16 @@ -105,11 +104,9 @@ struct stratix10_svc_chan; /** * struct stratix10_svc - svc private data * @stratix10_svc_rsu: pointer to stratix10 RSU device - * @intel_svc_fcs: pointer to the FCS device */ struct stratix10_svc { struct platform_device *stratix10_svc_rsu; - struct platform_device *intel_svc_fcs; }; /** @@ -251,12 +248,10 @@ struct stratix10_async_ctrl { * @num_active_client: number of active service client * @node: list management * @genpool: memory pool pointing to the memory region - * @task: pointer to the thread task which handles SMC or HVC call - * @svc_fifo: a queue for storing service message data * @complete_status: state for completion - * @svc_fifo_lock: protect access to service message data queue * @invoke_fn: function to issue secure monitor call or hypervisor call * @svc: manages the list of client svc drivers + * @sdm_lock: only allows a single command single response to SDM * @actrl: async control structure * * This struct is used to create communication channels for service clients, to @@ -269,12 +264,10 @@ struct stratix10_svc_controller { int num_active_client; struct list_head node; struct gen_pool *genpool; - struct task_struct *task; - struct kfifo svc_fifo; struct completion complete_status; - spinlock_t svc_fifo_lock; svc_invoke_fn *invoke_fn; struct stratix10_svc *svc; + struct mutex sdm_lock; struct stratix10_async_ctrl actrl; }; @@ -283,6 +276,9 @@ struct stratix10_svc_controller { * @ctrl: pointer to service controller which is the provider of this channel * @scl: pointer to service client which owns the channel * @name: service client name associated with the channel + * @task: pointer to the thread task which handles SMC or HVC call + * @svc_fifo: a queue for storing service message data (separate fifo for every channel) + * @svc_fifo_lock: protect access to service message data queue (locking pending fifo) * @lock: protect access to the channel * @async_chan: reference to asynchronous channel object for this channel * @@ -293,6 +289,9 @@ struct stratix10_svc_chan { struct stratix10_svc_controller *ctrl; struct stratix10_svc_client *scl; char *name; + struct task_struct *task; + struct kfifo svc_fifo; + spinlock_t svc_fifo_lock; spinlock_t lock; struct stratix10_async_chan *async_chan; }; @@ -527,10 +526,10 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data, */ static int svc_normal_to_secure_thread(void *data) { - struct stratix10_svc_controller - *ctrl = (struct stratix10_svc_controller *)data; - struct stratix10_svc_data *pdata; - struct stratix10_svc_cb_data *cbdata; + struct stratix10_svc_chan *chan = (struct stratix10_svc_chan *)data; + struct stratix10_svc_controller *ctrl = chan->ctrl; + struct stratix10_svc_data *pdata = NULL; + struct stratix10_svc_cb_data *cbdata = NULL; struct arm_smccc_res res; unsigned long a0, a1, a2, a3, a4, a5, a6, a7; int ret_fifo = 0; @@ -555,12 +554,12 @@ static int svc_normal_to_secure_thread(void *data) a6 = 0; a7 = 0; - pr_debug("smc_hvc_shm_thread is running\n"); + pr_debug("%s: %s: Thread is running!\n", __func__, chan->name); while (!kthread_should_stop()) { - ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo, + ret_fifo = kfifo_out_spinlocked(&chan->svc_fifo, pdata, sizeof(*pdata), - &ctrl->svc_fifo_lock); + &chan->svc_fifo_lock); if (!ret_fifo) continue; @@ -569,9 +568,25 @@ static int svc_normal_to_secure_thread(void *data) (unsigned int)pdata->paddr, pdata->command, (unsigned int)pdata->size); + /* SDM can only process one command at a time */ + pr_debug("%s: %s: Thread is waiting for mutex!\n", + __func__, chan->name); + if (mutex_lock_interruptible(&ctrl->sdm_lock)) { + /* item already dequeued; notify client to unblock it */ + cbdata->status = BIT(SVC_STATUS_ERROR); + cbdata->kaddr1 = NULL; + cbdata->kaddr2 = NULL; + cbdata->kaddr3 = NULL; + if (pdata->chan->scl) + pdata->chan->scl->receive_cb(pdata->chan->scl, + cbdata); + break; + } + switch (pdata->command) { case COMMAND_RECONFIG_DATA_CLAIM: svc_thread_cmd_data_claim(ctrl, pdata, cbdata); + mutex_unlock(&ctrl->sdm_lock); continue; case COMMAND_RECONFIG: a0 = INTEL_SIP_SMC_FPGA_CONFIG_START; @@ -700,10 +715,11 @@ static int svc_normal_to_secure_thread(void *data) break; default: pr_warn("it shouldn't happen\n"); - break; + mutex_unlock(&ctrl->sdm_lock); + continue; } - pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x", - __func__, + pr_debug("%s: %s: before SMC call -- a0=0x%016x a1=0x%016x", + __func__, chan->name, (unsigned int)a0, (unsigned int)a1); pr_debug(" a2=0x%016x\n", (unsigned int)a2); @@ -712,8 +728,8 @@ static int svc_normal_to_secure_thread(void *data) pr_debug(" a5=0x%016x\n", (unsigned int)a5); ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res); - pr_debug("%s: after SMC call -- res.a0=0x%016x", - __func__, (unsigned int)res.a0); + pr_debug("%s: %s: after SMC call -- res.a0=0x%016x", + __func__, chan->name, (unsigned int)res.a0); pr_debug(" res.a1=0x%016x, res.a2=0x%016x", (unsigned int)res.a1, (unsigned int)res.a2); pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3); @@ -728,6 +744,7 @@ static int svc_normal_to_secure_thread(void *data) cbdata->kaddr2 = NULL; cbdata->kaddr3 = NULL; pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata); + mutex_unlock(&ctrl->sdm_lock); continue; } @@ -801,6 +818,8 @@ static int svc_normal_to_secure_thread(void *data) break; } + + mutex_unlock(&ctrl->sdm_lock); } kfree(cbdata); @@ -1696,22 +1715,33 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) if (!p_data) return -ENOMEM; - /* first client will create kernel thread */ - if (!chan->ctrl->task) { - chan->ctrl->task = - kthread_run_on_cpu(svc_normal_to_secure_thread, - (void *)chan->ctrl, - cpu, "svc_smc_hvc_thread"); - if (IS_ERR(chan->ctrl->task)) { + /* first caller creates the per-channel kthread */ + if (!chan->task) { + struct task_struct *task; + + task = kthread_run_on_cpu(svc_normal_to_secure_thread, + (void *)chan, + cpu, "svc_smc_hvc_thread"); + if (IS_ERR(task)) { dev_err(chan->ctrl->dev, "failed to create svc_smc_hvc_thread\n"); kfree(p_data); return -EINVAL; } + + spin_lock(&chan->lock); + if (chan->task) { + /* another caller won the race; discard our thread */ + spin_unlock(&chan->lock); + kthread_stop(task); + } else { + chan->task = task; + spin_unlock(&chan->lock); + } } - pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, - p_msg->payload, p_msg->command, + pr_debug("%s: %s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__, + chan->name, p_msg->payload, p_msg->command, (unsigned int)p_msg->payload_length); if (list_empty(&svc_data_mem)) { @@ -1747,12 +1777,16 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg) p_data->arg[2] = p_msg->arg[2]; p_data->size = p_msg->payload_length; p_data->chan = chan; - pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__, - (unsigned int)p_data->paddr, p_data->command, - (unsigned int)p_data->size); - ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data, + pr_debug("%s: %s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", + __func__, + chan->name, + (unsigned int)p_data->paddr, + p_data->command, + (unsigned int)p_data->size); + + ret = kfifo_in_spinlocked(&chan->svc_fifo, p_data, sizeof(*p_data), - &chan->ctrl->svc_fifo_lock); + &chan->svc_fifo_lock); kfree(p_data); @@ -1773,11 +1807,12 @@ EXPORT_SYMBOL_GPL(stratix10_svc_send); */ void stratix10_svc_done(struct stratix10_svc_chan *chan) { - /* stop thread when thread is running AND only one active client */ - if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) { - pr_debug("svc_smc_hvc_shm_thread is stopped\n"); - kthread_stop(chan->ctrl->task); - chan->ctrl->task = NULL; + /* stop thread when thread is running */ + if (chan->task) { + pr_debug("%s: %s: svc_smc_hvc_shm_thread is stopping\n", + __func__, chan->name); + kthread_stop(chan->task); + chan->task = NULL; } } EXPORT_SYMBOL_GPL(stratix10_svc_done); @@ -1817,8 +1852,8 @@ void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan, pmem->paddr = pa; pmem->size = s; list_add_tail(&pmem->node, &svc_data_mem); - pr_debug("%s: va=%p, pa=0x%016x\n", __func__, - pmem->vaddr, (unsigned int)pmem->paddr); + pr_debug("%s: %s: va=%p, pa=0x%016x\n", __func__, + chan->name, pmem->vaddr, (unsigned int)pmem->paddr); return (void *)va; } @@ -1855,6 +1890,13 @@ static const struct of_device_id stratix10_svc_drv_match[] = { {}, }; +static const char * const chan_names[SVC_NUM_CHANNEL] = { + SVC_CLIENT_FPGA, + SVC_CLIENT_RSU, + SVC_CLIENT_FCS, + SVC_CLIENT_HWMON +}; + static int stratix10_svc_drv_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1862,11 +1904,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) struct stratix10_svc_chan *chans; struct gen_pool *genpool; struct stratix10_svc_sh_memory *sh_memory; - struct stratix10_svc *svc; + struct stratix10_svc *svc = NULL; svc_invoke_fn *invoke_fn; size_t fifo_size; - int ret; + int ret, i = 0; /* get SMC or HVC function */ invoke_fn = get_invoke_func(dev); @@ -1905,8 +1947,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) controller->num_active_client = 0; controller->chans = chans; controller->genpool = genpool; - controller->task = NULL; controller->invoke_fn = invoke_fn; + INIT_LIST_HEAD(&controller->node); init_completion(&controller->complete_status); ret = stratix10_svc_async_init(controller); @@ -1917,32 +1959,20 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) } fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO; - ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL); - if (ret) { - dev_err(dev, "failed to allocate FIFO\n"); - goto err_async_exit; - } - spin_lock_init(&controller->svc_fifo_lock); - - chans[0].scl = NULL; - chans[0].ctrl = controller; - chans[0].name = SVC_CLIENT_FPGA; - spin_lock_init(&chans[0].lock); + mutex_init(&controller->sdm_lock); - chans[1].scl = NULL; - chans[1].ctrl = controller; - chans[1].name = SVC_CLIENT_RSU; - spin_lock_init(&chans[1].lock); - - chans[2].scl = NULL; - chans[2].ctrl = controller; - chans[2].name = SVC_CLIENT_FCS; - spin_lock_init(&chans[2].lock); - - chans[3].scl = NULL; - chans[3].ctrl = controller; - chans[3].name = SVC_CLIENT_HWMON; - spin_lock_init(&chans[3].lock); + for (i = 0; i < SVC_NUM_CHANNEL; i++) { + chans[i].scl = NULL; + chans[i].ctrl = controller; + chans[i].name = (char *)chan_names[i]; + spin_lock_init(&chans[i].lock); + ret = kfifo_alloc(&chans[i].svc_fifo, fifo_size, GFP_KERNEL); + if (ret) { + dev_err(dev, "failed to allocate FIFO %d\n", i); + goto err_free_fifos; + } + spin_lock_init(&chans[i].svc_fifo_lock); + } list_add_tail(&controller->node, &svc_ctrl); platform_set_drvdata(pdev, controller); @@ -1951,7 +1981,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL); if (!svc) { ret = -ENOMEM; - goto err_free_kfifo; + goto err_free_fifos; } controller->svc = svc; @@ -1959,51 +1989,43 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev) if (!svc->stratix10_svc_rsu) { dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU); ret = -ENOMEM; - goto err_free_kfifo; + goto err_free_fifos; } ret = platform_device_add(svc->stratix10_svc_rsu); - if (ret) { - platform_device_put(svc->stratix10_svc_rsu); - goto err_free_kfifo; - } - - svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1); - if (!svc->intel_svc_fcs) { - dev_err(dev, "failed to allocate %s device\n", INTEL_FCS); - ret = -ENOMEM; - goto err_unregister_rsu_dev; - } - - ret = platform_device_add(svc->intel_svc_fcs); - if (ret) { - platform_device_put(svc->intel_svc_fcs); - goto err_unregister_rsu_dev; - } + if (ret) + goto err_put_device; ret = of_platform_default_populate(dev_of_node(dev), NULL, dev); if (ret) - goto err_unregister_fcs_dev; + goto err_unregister_rsu_dev; pr_info("Intel Service Layer Driver Initialized\n"); return 0; -err_unregister_fcs_dev: - platform_device_unregister(svc->intel_svc_fcs); err_unregister_rsu_dev: platform_device_unregister(svc->stratix10_svc_rsu); -err_free_kfifo: - kfifo_free(&controller->svc_fifo); -err_async_exit: + goto err_free_fifos; +err_put_device: + platform_device_put(svc->stratix10_svc_rsu); +err_free_fifos: + /* only remove from list if list_add_tail() was reached */ + if (!list_empty(&controller->node)) + list_del(&controller->node); + /* free only the FIFOs that were successfully allocated */ + while (i--) + kfifo_free(&chans[i].svc_fifo); stratix10_svc_async_exit(controller); err_destroy_pool: gen_pool_destroy(genpool); + return ret; } static void stratix10_svc_drv_remove(struct platform_device *pdev) { + int i; struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); struct stratix10_svc *svc = ctrl->svc; @@ -2011,14 +2033,16 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev) of_platform_depopulate(ctrl->dev); - platform_device_unregister(svc->intel_svc_fcs); platform_device_unregister(svc->stratix10_svc_rsu); - kfifo_free(&ctrl->svc_fifo); - if (ctrl->task) { - kthread_stop(ctrl->task); - ctrl->task = NULL; + for (i = 0; i < SVC_NUM_CHANNEL; i++) { + if (ctrl->chans[i].task) { + kthread_stop(ctrl->chans[i].task); + ctrl->chans[i].task = NULL; + } + kfifo_free(&ctrl->chans[i].svc_fifo); } + if (ctrl->genpool) gen_pool_destroy(ctrl->genpool); list_del(&ctrl->node); diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h index d290060f4c73..91013161e9db 100644 --- a/include/linux/firmware/intel/stratix10-svc-client.h +++ b/include/linux/firmware/intel/stratix10-svc-client.h @@ -68,12 +68,12 @@ * timeout value used in Stratix10 FPGA manager driver. * timeout value used in RSU driver */ -#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300 -#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720 -#define SVC_RSU_REQUEST_TIMEOUT_MS 300 +#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 5000 +#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 5000 +#define SVC_RSU_REQUEST_TIMEOUT_MS 2000 #define SVC_FCS_REQUEST_TIMEOUT_MS 2000 #define SVC_COMPLETED_TIMEOUT_MS 30000 -#define SVC_HWMON_REQUEST_TIMEOUT_MS 300 +#define SVC_HWMON_REQUEST_TIMEOUT_MS 2000 struct stratix10_svc_chan; -- cgit v1.2.3 From 6ffd853b0b10e1e292cef0bfd0997986471254de Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 1 Mar 2026 16:51:44 -0800 Subject: build_bug.h: correct function parameters names in kernel-doc Use the correct function (or macro) names to avoid kernel-doc warnings: Warning: include/linux/build_bug.h:38 function parameter 'cond' not described in 'BUILD_BUG_ON_MSG' Warning: include/linux/build_bug.h:38 function parameter 'msg' not described in 'BUILD_BUG_ON_MSG' Warning: include/linux/build_bug.h:76 function parameter 'expr' not described in 'static_assert' Link: https://lkml.kernel.org/r/20260302005144.3467019-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/build_bug.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index 2cfbb4c65c78..d3dc5dc5f916 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h @@ -32,7 +32,8 @@ /** * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied * error message. - * @condition: the condition which the compiler should know is false. + * @cond: the condition which the compiler should know is false. + * @msg: build-time error message * * See BUILD_BUG_ON for description. */ @@ -60,6 +61,7 @@ /** * static_assert - check integer constant expression at build time + * @expr: expression to be checked * * static_assert() is a wrapper for the C11 _Static_assert, with a * little macro magic to make the message optional (defaulting to the -- cgit v1.2.3 From b2e48c429ec54715d16fefa719dd2fbded2e65be Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 10 Mar 2026 21:28:53 +0100 Subject: sched/mmcid: Prevent CID stalls due to concurrent forks A newly forked task is accounted as MMCID user before the task is visible in the process' thread list and the global task list. This creates the following problem: CPU1 CPU2 fork() sched_mm_cid_fork(tnew1) tnew1->mm.mm_cid_users++; tnew1->mm_cid.cid = getcid() -> preemption fork() sched_mm_cid_fork(tnew2) tnew2->mm.mm_cid_users++; // Reaches the per CPU threshold mm_cid_fixup_tasks_to_cpus() for_each_other(current, p) .... As tnew1 is not visible yet, this fails to fix up the already allocated CID of tnew1. As a consequence a subsequent schedule in might fail to acquire a (transitional) CID and the machine stalls. Move the invocation of sched_mm_cid_fork() after the new task becomes visible in the thread and the task list to prevent this. This also makes it symmetrical vs. exit() where the task is removed as CID user before the task is removed from the thread and task lists. Fixes: fbd0e71dc370 ("sched/mmcid: Provide CID ownership mode fixup functions") Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Tested-by: Matthieu Baerts (NGI0) Link: https://patch.msgid.link/20260310202525.969061974@kernel.org --- include/linux/sched.h | 2 -- kernel/fork.c | 2 -- kernel/sched/core.c | 22 +++++++++++++++------- 3 files changed, 15 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index a7b4a980eb2f..5a5d3dbc9cdf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2354,7 +2354,6 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo #ifdef CONFIG_SCHED_MM_CID void sched_mm_cid_before_execve(struct task_struct *t); void sched_mm_cid_after_execve(struct task_struct *t); -void sched_mm_cid_fork(struct task_struct *t); void sched_mm_cid_exit(struct task_struct *t); static __always_inline int task_mm_cid(struct task_struct *t) { @@ -2363,7 +2362,6 @@ static __always_inline int task_mm_cid(struct task_struct *t) #else static inline void sched_mm_cid_before_execve(struct task_struct *t) { } static inline void sched_mm_cid_after_execve(struct task_struct *t) { } -static inline void sched_mm_cid_fork(struct task_struct *t) { } static inline void sched_mm_cid_exit(struct task_struct *t) { } static __always_inline int task_mm_cid(struct task_struct *t) { diff --git a/kernel/fork.c b/kernel/fork.c index 65113a304518..7febf4c2889e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1586,7 +1586,6 @@ static int copy_mm(u64 clone_flags, struct task_struct *tsk) tsk->mm = mm; tsk->active_mm = mm; - sched_mm_cid_fork(tsk); return 0; } @@ -2498,7 +2497,6 @@ bad_fork_cleanup_namespaces: exit_nsproxy_namespaces(p); bad_fork_cleanup_mm: if (p->mm) { - sched_mm_cid_exit(p); mm_clear_owner(p->mm, p); mmput(p->mm); } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7f77c165a6e..d25427855b5d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4729,8 +4729,11 @@ void sched_cancel_fork(struct task_struct *p) scx_cancel_fork(p); } +static void sched_mm_cid_fork(struct task_struct *t); + void sched_post_fork(struct task_struct *p) { + sched_mm_cid_fork(p); uclamp_post_fork(p); scx_post_fork(p); } @@ -10646,12 +10649,13 @@ static void mm_cid_do_fixup_tasks_to_cpus(struct mm_struct *mm) * possible switch back to per task mode happens either in the * deferred handler function or in the next fork()/exit(). * - * The caller has already transferred. The newly incoming task is - * already accounted for, but not yet visible. + * The caller has already transferred so remove it from the users + * count. The incoming task is already visible and has mm_cid.active, + * but has task::mm_cid::cid == UNSET. Still it needs to be accounted + * for. Concurrent fork()s might add more threads, but all of them have + * task::mm_cid::active = 0, so they don't affect the accounting here. */ - users = mm->mm_cid.users - 2; - if (!users) - return; + users = mm->mm_cid.users - 1; guard(rcu)(); for_other_threads(current, t) { @@ -10688,12 +10692,15 @@ static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm) return mm_update_max_cids(mm); } -void sched_mm_cid_fork(struct task_struct *t) +static void sched_mm_cid_fork(struct task_struct *t) { struct mm_struct *mm = t->mm; bool percpu; - WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET); + if (!mm) + return; + + WARN_ON_ONCE(t->mm_cid.cid != MM_CID_UNSET); guard(mutex)(&mm->mm_cid.mutex); scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) { @@ -10885,6 +10892,7 @@ void mm_init_cid(struct mm_struct *mm, struct task_struct *p) } #else /* CONFIG_SCHED_MM_CID */ static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { } +static inline void sched_mm_cid_fork(struct task_struct *t) { } #endif /* !CONFIG_SCHED_MM_CID */ static DEFINE_PER_CPU(struct sched_change_ctx, sched_change_ctx); -- cgit v1.2.3 From 192d852129b1b7c4f0ddbab95d0de1efd5ee1405 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 10 Mar 2026 21:29:09 +0100 Subject: sched/mmcid: Avoid full tasklist walks Chasing vfork()'ed tasks on a CID ownership mode switch requires a full task list walk, which is obviously expensive on large systems. Avoid that by keeping a list of tasks using a mm MMCID entity in mm::mm_cid and walk this list instead. This removes the proven to be flaky counting logic and avoids a full task list walk in the case of vfork()'ed tasks. Fixes: fbd0e71dc370 ("sched/mmcid: Provide CID ownership mode fixup functions") Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Tested-by: Matthieu Baerts (NGI0) Link: https://patch.msgid.link/20260310202526.183824481@kernel.org --- include/linux/rseq_types.h | 6 +++++- kernel/fork.c | 1 + kernel/sched/core.c | 54 +++++++++++----------------------------------- 3 files changed, 18 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h index da5fa6f40294..0b42045988db 100644 --- a/include/linux/rseq_types.h +++ b/include/linux/rseq_types.h @@ -133,10 +133,12 @@ struct rseq_data { }; * @active: MM CID is active for the task * @cid: The CID associated to the task either permanently or * borrowed from the CPU + * @node: Queued in the per MM MMCID list */ struct sched_mm_cid { unsigned int active; unsigned int cid; + struct hlist_node node; }; /** @@ -157,6 +159,7 @@ struct mm_cid_pcpu { * @work: Regular work to handle the affinity mode change case * @lock: Spinlock to protect against affinity setting which can't take @mutex * @mutex: Mutex to serialize forks and exits related to this mm + * @user_list: List of the MM CID users of a MM * @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map * is growth only. * @users: The number of tasks sharing this MM. Separate from mm::mm_users @@ -177,13 +180,14 @@ struct mm_mm_cid { raw_spinlock_t lock; struct mutex mutex; + struct hlist_head user_list; /* Low frequency modified */ unsigned int nr_cpus_allowed; unsigned int users; unsigned int pcpu_thrs; unsigned int update_deferred; -}____cacheline_aligned_in_smp; +} ____cacheline_aligned; #else /* CONFIG_SCHED_MM_CID */ struct mm_mm_cid { }; struct sched_mm_cid { }; diff --git a/kernel/fork.c b/kernel/fork.c index 7febf4c2889e..bc2bf58b93b6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1000,6 +1000,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) #ifdef CONFIG_SCHED_MM_CID tsk->mm_cid.cid = MM_CID_UNSET; tsk->mm_cid.active = 0; + INIT_HLIST_NODE(&tsk->mm_cid.node); #endif return tsk; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f56156f91d08..496dff740dca 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10620,13 +10620,10 @@ static inline void mm_cid_transit_to_cpu(struct task_struct *t, struct mm_cid_pc } } -static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm) +static void mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm) { /* Remote access to mm::mm_cid::pcpu requires rq_lock */ guard(task_rq_lock)(t); - /* If the task is not active it is not in the users count */ - if (!t->mm_cid.active) - return false; if (cid_on_task(t->mm_cid.cid)) { /* If running on the CPU, put the CID in transit mode, otherwise drop it */ if (task_rq(t)->curr == t) @@ -10634,51 +10631,21 @@ static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm else mm_unset_cid_on_task(t); } - return true; } -static void mm_cid_do_fixup_tasks_to_cpus(struct mm_struct *mm) +static void mm_cid_fixup_tasks_to_cpus(void) { - struct task_struct *p, *t; - unsigned int users; - - /* - * This can obviously race with a concurrent affinity change, which - * increases the number of allowed CPUs for this mm, but that does - * not affect the mode and only changes the CID constraints. A - * possible switch back to per task mode happens either in the - * deferred handler function or in the next fork()/exit(). - * - * The caller has already transferred so remove it from the users - * count. The incoming task is already visible and has mm_cid.active, - * but has task::mm_cid::cid == UNSET. Still it needs to be accounted - * for. Concurrent fork()s might add more threads, but all of them have - * task::mm_cid::active = 0, so they don't affect the accounting here. - */ - users = mm->mm_cid.users - 1; - - guard(rcu)(); - for_other_threads(current, t) { - if (mm_cid_fixup_task_to_cpu(t, mm)) - users--; - } + struct mm_struct *mm = current->mm; + struct task_struct *t; - if (!users) - return; + lockdep_assert_held(&mm->mm_cid.mutex); - /* Happens only for VM_CLONE processes. */ - for_each_process_thread(p, t) { - if (t == current || t->mm != mm) - continue; - mm_cid_fixup_task_to_cpu(t, mm); + hlist_for_each_entry(t, &mm->mm_cid.user_list, mm_cid.node) { + /* Current has already transferred before invoking the fixup. */ + if (t != current) + mm_cid_fixup_task_to_cpu(t, mm); } -} - -static void mm_cid_fixup_tasks_to_cpus(void) -{ - struct mm_struct *mm = current->mm; - mm_cid_do_fixup_tasks_to_cpus(mm); mm_cid_complete_transit(mm, MM_CID_ONCPU); } @@ -10687,6 +10654,7 @@ static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm) lockdep_assert_held(&mm->mm_cid.lock); t->mm_cid.active = 1; + hlist_add_head(&t->mm_cid.node, &mm->mm_cid.user_list); mm->mm_cid.users++; return mm_update_max_cids(mm); } @@ -10744,6 +10712,7 @@ static bool sched_mm_cid_remove_user(struct task_struct *t) /* Clear the transition bit */ t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid); mm_unset_cid_on_task(t); + hlist_del_init(&t->mm_cid.node); t->mm->mm_cid.users--; return mm_update_max_cids(t->mm); } @@ -10886,6 +10855,7 @@ void mm_init_cid(struct mm_struct *mm, struct task_struct *p) mutex_init(&mm->mm_cid.mutex); mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work); INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn); + INIT_HLIST_HEAD(&mm->mm_cid.user_list); cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); bitmap_zero(mm_cidmask(mm), num_possible_cpus()); } -- cgit v1.2.3 From 227312b4a65c373d5d8b4683b7fc36203fedc516 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 28 Feb 2026 15:52:58 +0100 Subject: HID: input: Add HID_BATTERY_QUIRK_DYNAMIC for Elan touchscreens Elan touchscreens have a HID-battery device for the stylus which is always there even if there is no stylus. This is causing upower to report an empty battery for the stylus and some desktop-environments will show a notification about this, which is quite annoying. Because of this the HID-battery is being ignored on all Elan I2c and USB touchscreens, but this causes there to be no battery reporting for the stylus at all. This adds a new HID_BATTERY_QUIRK_DYNAMIC and uses these for the Elan touchscreens. This new quirks causes the present value of the battery to start at 0, which will make userspace ignore it and only sets present to 1 after receiving a battery input report which only happens when the stylus gets in range. Reported-by: ggrundik@gmail.com Closes: https://bugzilla.kernel.org/show_bug.cgi?id=221118 Signed-off-by: Hans de Goede Reviewed-by: Sebastian Reichel Signed-off-by: Jiri Kosina --- drivers/hid/hid-input.c | 14 +++++++++++--- include/linux/hid.h | 1 + 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 67ca1e88ce13..8fc20df99b97 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -354,6 +354,7 @@ static enum power_supply_property hidinput_battery_props[] = { #define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */ #define HID_BATTERY_QUIRK_IGNORE (1 << 2) /* completely ignore the battery */ #define HID_BATTERY_QUIRK_AVOID_QUERY (1 << 3) /* do not query the battery */ +#define HID_BATTERY_QUIRK_DYNAMIC (1 << 4) /* report present only after life signs */ static const struct hid_device_id hid_battery_quirks[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, @@ -398,8 +399,8 @@ static const struct hid_device_id hid_battery_quirks[] = { * Elan HID touchscreens seem to all report a non present battery, * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C and USB HID devices. */ - { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE }, - { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE }, + { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_DYNAMIC }, + { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_DYNAMIC }, {} }; @@ -456,11 +457,14 @@ static int hidinput_get_battery_property(struct power_supply *psy, int ret = 0; switch (prop) { - case POWER_SUPPLY_PROP_PRESENT: case POWER_SUPPLY_PROP_ONLINE: val->intval = 1; break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = dev->battery_present; + break; + case POWER_SUPPLY_PROP_CAPACITY: if (dev->battery_status != HID_BATTERY_REPORTED && !dev->battery_avoid_query) { @@ -573,6 +577,8 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, if (quirks & HID_BATTERY_QUIRK_AVOID_QUERY) dev->battery_avoid_query = true; + dev->battery_present = (quirks & HID_BATTERY_QUIRK_DYNAMIC) ? false : true; + dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); if (IS_ERR(dev->battery)) { error = PTR_ERR(dev->battery); @@ -628,6 +634,7 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage, return; if (hidinput_update_battery_charge_status(dev, usage, value)) { + dev->battery_present = true; power_supply_changed(dev->battery); return; } @@ -643,6 +650,7 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage, if (dev->battery_status != HID_BATTERY_REPORTED || capacity != dev->battery_capacity || ktime_after(ktime_get_coarse(), dev->battery_ratelimit_time)) { + dev->battery_present = true; dev->battery_capacity = capacity; dev->battery_status = HID_BATTERY_REPORTED; dev->battery_ratelimit_time = diff --git a/include/linux/hid.h b/include/linux/hid.h index 2990b9f94cb5..31324609af4d 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -682,6 +682,7 @@ struct hid_device { __s32 battery_charge_status; enum hid_battery_status battery_status; bool battery_avoid_query; + bool battery_present; ktime_t battery_ratelimit_time; #endif -- cgit v1.2.3 From 416909962e7cdf29fd01ac523c953f37708df93d Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 17 Feb 2026 22:07:47 -0500 Subject: USB: usbcore: Introduce usb_bulk_msg_killable() The synchronous message API in usbcore (usb_control_msg(), usb_bulk_msg(), and so on) uses uninterruptible waits. However, drivers may call these routines in the context of a user thread, which means it ought to be possible to at least kill them. For this reason, introduce a new usb_bulk_msg_killable() function which behaves the same as usb_bulk_msg() except for using wait_for_completion_killable_timeout() instead of wait_for_completion_timeout(). The same can be done later for usb_control_msg() later on, if it turns out to be needed. Signed-off-by: Alan Stern Suggested-by: Oliver Neukum Link: https://lore.kernel.org/linux-usb/3acfe838-6334-4f6d-be7c-4bb01704b33d@rowland.harvard.edu/ Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") CC: stable@vger.kernel.org Link: https://patch.msgid.link/248628b4-cc83-4e81-a620-3ce4e0376d41@rowland.harvard.edu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/message.c | 79 ++++++++++++++++++++++++++++++++++++++++------ include/linux/usb.h | 5 +-- 2 files changed, 72 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index ea970ddf8879..d97ec7e8c280 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -42,16 +42,17 @@ static void usb_api_blocking_completion(struct urb *urb) /* - * Starts urb and waits for completion or timeout. Note that this call - * is NOT interruptible. Many device driver i/o requests should be - * interruptible and therefore these drivers should implement their - * own interruptible routines. + * Starts urb and waits for completion or timeout. + * Whether or not the wait is killable depends on the flag passed in. + * For example, compare usb_bulk_msg() and usb_bulk_msg_killable(). */ -static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) +static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length, + bool killable) { struct api_context ctx; unsigned long expire; int retval; + long rc; init_completion(&ctx.done); urb->context = &ctx; @@ -61,12 +62,21 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length) goto out; expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; - if (!wait_for_completion_timeout(&ctx.done, expire)) { + if (killable) + rc = wait_for_completion_killable_timeout(&ctx.done, expire); + else + rc = wait_for_completion_timeout(&ctx.done, expire); + if (rc <= 0) { usb_kill_urb(urb); - retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status); + if (ctx.status != -ENOENT) + retval = ctx.status; + else if (rc == 0) + retval = -ETIMEDOUT; + else + retval = rc; dev_dbg(&urb->dev->dev, - "%s timed out on ep%d%s len=%u/%u\n", + "%s timed out or killed on ep%d%s len=%u/%u\n", current->comm, usb_endpoint_num(&urb->ep->desc), usb_urb_dir_in(urb) ? "in" : "out", @@ -100,7 +110,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev, usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data, len, usb_api_blocking_completion, NULL); - retv = usb_start_wait_urb(urb, timeout, &length); + retv = usb_start_wait_urb(urb, timeout, &length, false); if (retv < 0) return retv; else @@ -385,10 +395,59 @@ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, usb_api_blocking_completion, NULL); - return usb_start_wait_urb(urb, timeout, actual_length); + return usb_start_wait_urb(urb, timeout, actual_length, false); } EXPORT_SYMBOL_GPL(usb_bulk_msg); +/** + * usb_bulk_msg_killable - Builds a bulk urb, sends it off and waits for completion in a killable state + * @usb_dev: pointer to the usb device to send the message to + * @pipe: endpoint "pipe" to send the message to + * @data: pointer to the data to send + * @len: length in bytes of the data to send + * @actual_length: pointer to a location to put the actual length transferred + * in bytes + * @timeout: time in msecs to wait for the message to complete before + * timing out (if 0 the wait is forever) + * + * Context: task context, might sleep. + * + * This function is just like usb_blk_msg() except that it waits in a + * killable state. + * + * Return: + * If successful, 0. Otherwise a negative error number. The number of actual + * bytes transferred will be stored in the @actual_length parameter. + * + */ +int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe, + void *data, int len, int *actual_length, int timeout) +{ + struct urb *urb; + struct usb_host_endpoint *ep; + + ep = usb_pipe_endpoint(usb_dev, pipe); + if (!ep || len < 0) + return -EINVAL; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_INT) { + pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); + usb_fill_int_urb(urb, usb_dev, pipe, data, len, + usb_api_blocking_completion, NULL, + ep->desc.bInterval); + } else + usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, + usb_api_blocking_completion, NULL); + + return usb_start_wait_urb(urb, timeout, actual_length, true); +} +EXPORT_SYMBOL_GPL(usb_bulk_msg_killable); + /*-------------------------------------------------------------------*/ static void sg_clean(struct usb_sg_request *io) diff --git a/include/linux/usb.h b/include/linux/usb.h index fbfcc70b07fb..57ceeb02a7cb 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1868,8 +1868,9 @@ extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe, void *data, int len, int *actual_length, int timeout); extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, - void *data, int len, int *actual_length, - int timeout); + void *data, int len, int *actual_length, int timeout); +extern int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe, + void *data, int len, int *actual_length, int timeout); /* wrappers around usb_control_msg() for the most common standard requests */ int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, -- cgit v1.2.3 From 1015c27a5e1a63efae2b18a9901494474b4d1dc3 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 17 Feb 2026 22:10:32 -0500 Subject: USB: core: Limit the length of unkillable synchronous timeouts The usb_control_msg(), usb_bulk_msg(), and usb_interrupt_msg() APIs in usbcore allow unlimited timeout durations. And since they use uninterruptible waits, this leaves open the possibility of hanging a task for an indefinitely long time, with no way to kill it short of unplugging the target device. To prevent this sort of problem, enforce a maximum limit on the length of these unkillable timeouts. The limit chosen here, somewhat arbitrarily, is 60 seconds. On many systems (although not all) this is short enough to avoid triggering the kernel's hung-task detector. In addition, clear up the ambiguity of negative timeout values by treating them the same as 0, i.e., using the maximum allowed timeout. Signed-off-by: Alan Stern Link: https://lore.kernel.org/linux-usb/3acfe838-6334-4f6d-be7c-4bb01704b33d@rowland.harvard.edu/ Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") CC: stable@vger.kernel.org Link: https://patch.msgid.link/15fc9773-a007-47b0-a703-df89a8cf83dd@rowland.harvard.edu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/message.c | 27 +++++++++++++-------------- include/linux/usb.h | 3 +++ 2 files changed, 16 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index d97ec7e8c280..2ab120ce2fa8 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -45,6 +45,8 @@ static void usb_api_blocking_completion(struct urb *urb) * Starts urb and waits for completion or timeout. * Whether or not the wait is killable depends on the flag passed in. * For example, compare usb_bulk_msg() and usb_bulk_msg_killable(). + * + * For non-killable waits, we enforce a maximum limit on the timeout value. */ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length, bool killable) @@ -61,7 +63,9 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length, if (unlikely(retval)) goto out; - expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; + if (!killable && (timeout <= 0 || timeout > USB_MAX_SYNCHRONOUS_TIMEOUT)) + timeout = USB_MAX_SYNCHRONOUS_TIMEOUT; + expire = (timeout > 0) ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; if (killable) rc = wait_for_completion_killable_timeout(&ctx.done, expire); else @@ -127,8 +131,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev, * @index: USB message index value * @data: pointer to the data to send * @size: length in bytes of the data to send - * @timeout: time in msecs to wait for the message to complete before timing - * out (if 0 the wait is forever) + * @timeout: time in msecs to wait for the message to complete before timing out * * Context: task context, might sleep. * @@ -183,8 +186,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg); * @index: USB message index value * @driver_data: pointer to the data to send * @size: length in bytes of the data to send - * @timeout: time in msecs to wait for the message to complete before timing - * out (if 0 the wait is forever) + * @timeout: time in msecs to wait for the message to complete before timing out * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () @@ -242,8 +244,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg_send); * @index: USB message index value * @driver_data: pointer to the data to be filled in by the message * @size: length in bytes of the data to be received - * @timeout: time in msecs to wait for the message to complete before timing - * out (if 0 the wait is forever) + * @timeout: time in msecs to wait for the message to complete before timing out * @memflags: the flags for memory allocation for buffers * * Context: !in_interrupt () @@ -314,8 +315,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg_recv); * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes - * @timeout: time in msecs to wait for the message to complete before - * timing out (if 0 the wait is forever) + * @timeout: time in msecs to wait for the message to complete before timing out * * Context: task context, might sleep. * @@ -347,8 +347,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg); * @len: length in bytes of the data to send * @actual_length: pointer to a location to put the actual length transferred * in bytes - * @timeout: time in msecs to wait for the message to complete before - * timing out (if 0 the wait is forever) + * @timeout: time in msecs to wait for the message to complete before timing out * * Context: task context, might sleep. * @@ -408,12 +407,12 @@ EXPORT_SYMBOL_GPL(usb_bulk_msg); * @actual_length: pointer to a location to put the actual length transferred * in bytes * @timeout: time in msecs to wait for the message to complete before - * timing out (if 0 the wait is forever) + * timing out (if <= 0, the wait is as long as possible) * * Context: task context, might sleep. * - * This function is just like usb_blk_msg() except that it waits in a - * killable state. + * This function is just like usb_blk_msg(), except that it waits in a + * killable state and there is no limit on the timeout length. * * Return: * If successful, 0. Otherwise a negative error number. The number of actual diff --git a/include/linux/usb.h b/include/linux/usb.h index 57ceeb02a7cb..04277af4bb9d 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1862,6 +1862,9 @@ void usb_free_noncoherent(struct usb_device *dev, size_t size, * SYNCHRONOUS CALL SUPPORT * *-------------------------------------------------------------------*/ +/* Maximum value allowed for timeout in synchronous routines below */ +#define USB_MAX_SYNCHRONOUS_TIMEOUT 60000 /* ms */ + extern int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size, int timeout); -- cgit v1.2.3 From 9f6a983cfa22ac662c86e60816d3a357d4b551e9 Mon Sep 17 00:00:00 2001 From: Jie Deng Date: Fri, 27 Feb 2026 16:49:31 +0800 Subject: usb: core: new quirk to handle devices with zero configurations Some USB devices incorrectly report bNumConfigurations as 0 in their device descriptor, which causes the USB core to reject them during enumeration. logs: usb 1-2: device descriptor read/64, error -71 usb 1-2: no configurations usb 1-2: can't read configurations, error -22 However, these devices actually work correctly when treated as having a single configuration. Add a new quirk USB_QUIRK_FORCE_ONE_CONFIG to handle such devices. When this quirk is set, assume the device has 1 configuration instead of failing with -EINVAL. This quirk is applied to the device with VID:PID 5131:2007 which exhibits this behavior. Signed-off-by: Jie Deng Link: https://patch.msgid.link/20260227084931.1527461-1-dengjie03@kylinos.cn Signed-off-by: Greg Kroah-Hartman --- Documentation/admin-guide/kernel-parameters.txt | 3 +++ drivers/usb/core/config.c | 6 +++++- drivers/usb/core/quirks.c | 5 +++++ include/linux/usb/quirks.h | 3 +++ 4 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index cb850e5290c2..7d907efe9f49 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -8183,6 +8183,9 @@ Kernel parameters p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT (Reduce timeout of the SET_ADDRESS request from 5000 ms to 500 ms); + q = USB_QUIRK_FORCE_ONE_CONFIG (Device + claims zero configurations, + forcing to 1); Example: quirks=0781:5580:bk,0a5c:5834:gij usbhid.mousepoll= diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 1cd5fa61dc76..6a1fd967e0a6 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -927,7 +927,11 @@ int usb_get_configuration(struct usb_device *dev) dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG; } - if (ncfg < 1) { + if (ncfg < 1 && dev->quirks & USB_QUIRK_FORCE_ONE_CONFIG) { + dev_info(ddev, "Device claims zero configurations, forcing to 1\n"); + dev->descriptor.bNumConfigurations = 1; + ncfg = 1; + } else if (ncfg < 1) { dev_err(ddev, "no configurations\n"); return -EINVAL; } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index e347236d83e8..7bd408db05f4 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -140,6 +140,8 @@ static int quirks_param_set(const char *value, const struct kernel_param *kp) case 'p': flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT; break; + case 'q': + flags |= USB_QUIRK_FORCE_ONE_CONFIG; /* Ignore unrecognized flag characters */ } } @@ -589,6 +591,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* VCOM device */ { USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Noji-MCS SmartCard Reader */ + { USB_DEVICE(0x5131, 0x2007), .driver_info = USB_QUIRK_FORCE_ONE_CONFIG }, + /* INTEL VALUE SSD */ { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 2f7bd2fdc616..b3cc7beab4a3 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -78,4 +78,7 @@ /* skip BOS descriptor request */ #define USB_QUIRK_NO_BOS BIT(17) +/* Device claims zero configurations, forcing to 1 */ +#define USB_QUIRK_FORCE_ONE_CONFIG BIT(18) + #endif /* __LINUX_USB_QUIRKS_H */ -- cgit v1.2.3 From 96189080265e6bb5dde3a4afbaf947af493e3f82 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 9 Mar 2026 14:21:37 -0600 Subject: io_uring: ensure ctx->rings is stable for task work flags manipulation If DEFER_TASKRUN | SETUP_TASKRUN is used and task work is added while the ring is being resized, it's possible for the OR'ing of IORING_SQ_TASKRUN to happen in the small window of swapping into the new rings and the old rings being freed. Prevent this by adding a 2nd ->rings pointer, ->rings_rcu, which is protected by RCU. The task work flags manipulation is inside RCU already, and if the resize ring freeing is done post an RCU synchronize, then there's no need to add locking to the fast path of task work additions. Note: this is only done for DEFER_TASKRUN, as that's the only setup mode that supports ring resizing. If this ever changes, then they too need to use the io_ctx_mark_taskrun() helper. Link: https://lore.kernel.org/io-uring/20260309062759.482210-1-naup96721@gmail.com/ Cc: stable@vger.kernel.org Fixes: 79cfe9e59c2a ("io_uring/register: add IORING_REGISTER_RESIZE_RINGS") Reported-by: Hao-Yu Yang Suggested-by: Pavel Begunkov Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 1 + io_uring/io_uring.c | 2 ++ io_uring/register.c | 11 +++++++++++ io_uring/tw.c | 22 ++++++++++++++++++++-- 4 files changed, 34 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 3e4a82a6f817..dd1420bfcb73 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -388,6 +388,7 @@ struct io_ring_ctx { * regularly bounce b/w CPUs. */ struct { + struct io_rings __rcu *rings_rcu; struct llist_head work_llist; struct llist_head retry_llist; unsigned long check_cq; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index ccab8562d273..20fdc442e014 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2066,6 +2066,7 @@ static void io_rings_free(struct io_ring_ctx *ctx) io_free_region(ctx->user, &ctx->sq_region); io_free_region(ctx->user, &ctx->ring_region); ctx->rings = NULL; + RCU_INIT_POINTER(ctx->rings_rcu, NULL); ctx->sq_sqes = NULL; } @@ -2703,6 +2704,7 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, if (ret) return ret; ctx->rings = rings = io_region_get_ptr(&ctx->ring_region); + rcu_assign_pointer(ctx->rings_rcu, rings); if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) ctx->sq_array = (u32 *)((char *)rings + rl->sq_array_offset); diff --git a/io_uring/register.c b/io_uring/register.c index a839b22fd392..5f3eb018fb32 100644 --- a/io_uring/register.c +++ b/io_uring/register.c @@ -633,7 +633,15 @@ overflow: ctx->sq_entries = p->sq_entries; ctx->cq_entries = p->cq_entries; + /* + * Just mark any flag we may have missed and that the application + * should act on unconditionally. Worst case it'll be an extra + * syscall. + */ + atomic_or(IORING_SQ_TASKRUN | IORING_SQ_NEED_WAKEUP, &n.rings->sq_flags); ctx->rings = n.rings; + rcu_assign_pointer(ctx->rings_rcu, n.rings); + ctx->sq_sqes = n.sq_sqes; swap_old(ctx, o, n, ring_region); swap_old(ctx, o, n, sq_region); @@ -642,6 +650,9 @@ overflow: out: spin_unlock(&ctx->completion_lock); mutex_unlock(&ctx->mmap_lock); + /* Wait for concurrent io_ctx_mark_taskrun() */ + if (to_free == &o) + synchronize_rcu_expedited(); io_register_free_rings(ctx, to_free); if (ctx->sq_data) diff --git a/io_uring/tw.c b/io_uring/tw.c index 1ee2b8ab07c8..2f2b4ac4b126 100644 --- a/io_uring/tw.c +++ b/io_uring/tw.c @@ -152,6 +152,21 @@ void tctx_task_work(struct callback_head *cb) WARN_ON_ONCE(ret); } +/* + * Sets IORING_SQ_TASKRUN in the sq_flags shared with userspace, using the + * RCU protected rings pointer to be safe against concurrent ring resizing. + */ +static void io_ctx_mark_taskrun(struct io_ring_ctx *ctx) +{ + lockdep_assert_in_rcu_read_lock(); + + if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) { + struct io_rings *rings = rcu_dereference(ctx->rings_rcu); + + atomic_or(IORING_SQ_TASKRUN, &rings->sq_flags); + } +} + void io_req_local_work_add(struct io_kiocb *req, unsigned flags) { struct io_ring_ctx *ctx = req->ctx; @@ -206,8 +221,7 @@ void io_req_local_work_add(struct io_kiocb *req, unsigned flags) */ if (!head) { - if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) - atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); + io_ctx_mark_taskrun(ctx); if (ctx->has_evfd) io_eventfd_signal(ctx, false); } @@ -231,6 +245,10 @@ void io_req_normal_work_add(struct io_kiocb *req) if (!llist_add(&req->io_task_work.node, &tctx->task_list)) return; + /* + * Doesn't need to use ->rings_rcu, as resizing isn't supported for + * !DEFER_TASKRUN. + */ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); -- cgit v1.2.3 From 8431c602f551549f082bbfa67f3003f2d8e3e132 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 11 Mar 2026 12:31:10 +0000 Subject: ip_tunnel: adapt iptunnel_xmit_stats() to NETDEV_PCPU_STAT_DSTATS Blamed commits forgot that vxlan/geneve use udp_tunnel[6]_xmit_skb() which call iptunnel_xmit_stats(). iptunnel_xmit_stats() was assuming tunnels were only using NETDEV_PCPU_STAT_TSTATS. @syncp offset in pcpu_sw_netstats and pcpu_dstats is different. 32bit kernels would either have corruptions or freezes if the syncp sequence was overwritten. This patch also moves pcpu_stat_type closer to dev->{t,d}stats to avoid a potential cache line miss since iptunnel_xmit_stats() needs to read it. Fixes: 6fa6de302246 ("geneve: Handle stats using NETDEV_PCPU_STAT_DSTATS.") Fixes: be226352e8dc ("vxlan: Handle stats using NETDEV_PCPU_STAT_DSTATS.") Signed-off-by: Eric Dumazet Reviewed-by: Guillaume Nault Link: https://patch.msgid.link/20260311123110.1471930-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 3 +-- include/net/ip_tunnels.h | 30 +++++++++++++++++++++++------- 2 files changed, 24 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ae269a2e7f4d..d7aac6f185bc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2155,6 +2155,7 @@ struct net_device { unsigned long state; unsigned int flags; unsigned short hard_header_len; + enum netdev_stat_type pcpu_stat_type:8; netdev_features_t features; struct inet6_dev __rcu *ip6_ptr; __cacheline_group_end(net_device_read_txrx); @@ -2404,8 +2405,6 @@ struct net_device { void *ml_priv; enum netdev_ml_priv_type ml_priv_type; - enum netdev_stat_type pcpu_stat_type:8; - #if IS_ENABLED(CONFIG_GARP) struct garp_port __rcu *garp_port; #endif diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 80662f812080..1f577a4f8ce9 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -665,13 +665,29 @@ static inline int iptunnel_pull_offloads(struct sk_buff *skb) static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) { if (pkt_len > 0) { - struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); - - u64_stats_update_begin(&tstats->syncp); - u64_stats_add(&tstats->tx_bytes, pkt_len); - u64_stats_inc(&tstats->tx_packets); - u64_stats_update_end(&tstats->syncp); - put_cpu_ptr(tstats); + if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) { + struct pcpu_dstats *dstats = get_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + u64_stats_add(&dstats->tx_bytes, pkt_len); + u64_stats_inc(&dstats->tx_packets); + u64_stats_update_end(&dstats->syncp); + put_cpu_ptr(dstats); + return; + } + if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) { + struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats); + + u64_stats_update_begin(&tstats->syncp); + u64_stats_add(&tstats->tx_bytes, pkt_len); + u64_stats_inc(&tstats->tx_packets); + u64_stats_update_end(&tstats->syncp); + put_cpu_ptr(tstats); + return; + } + pr_err_once("iptunnel_xmit_stats pcpu_stat_type=%d\n", + dev->pcpu_stat_type); + WARN_ON_ONCE(1); return; } -- cgit v1.2.3 From 598adea720b97572c7028635cb1c59b3684e128c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 11 Mar 2026 16:24:02 +0100 Subject: netfilter: revert nft_set_rbtree: validate open interval overlap This reverts commit 648946966a08 ("netfilter: nft_set_rbtree: validate open interval overlap"). There have been reports of nft failing to laod valid rulesets after this patch was merged into -stable. I can reproduce several such problem with recent nft versions, including nft 1.1.6 which is widely shipped by distributions. We currently have little choice here. This commit can be resurrected at some point once the nftables fix that triggers the false overlap positive has appeared in common distros (see e83e32c8d1cd ("mnl: restore create element command with large batches" in nftables.git). Fixes: 648946966a08 ("netfilter: nft_set_rbtree: validate open interval overlap") Acked-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal --- include/net/netfilter/nf_tables.h | 4 --- net/netfilter/nf_tables_api.c | 21 +++--------- net/netfilter/nft_set_rbtree.c | 71 ++++++--------------------------------- 3 files changed, 14 insertions(+), 82 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index e2d2bfc1f989..6299af4ef423 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -277,8 +277,6 @@ struct nft_userdata { unsigned char data[]; }; -#define NFT_SET_ELEM_INTERNAL_LAST 0x1 - /* placeholder structure for opaque set element backend representation. */ struct nft_elem_priv { }; @@ -288,7 +286,6 @@ struct nft_elem_priv { }; * @key: element key * @key_end: closing element key * @data: element data - * @flags: flags * @priv: element private data and extensions */ struct nft_set_elem { @@ -304,7 +301,6 @@ struct nft_set_elem { u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)]; struct nft_data val; } data; - u32 flags; struct nft_elem_priv *priv; }; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index dacec5f8a11c..4ccdd33cf133 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -7156,8 +7156,7 @@ static u32 nft_set_maxsize(const struct nft_set *set) } static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, - const struct nlattr *attr, u32 nlmsg_flags, - bool last) + const struct nlattr *attr, u32 nlmsg_flags) { struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {}; struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; @@ -7444,11 +7443,6 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (flags) *nft_set_ext_flags(ext) = flags; - if (last) - elem.flags = NFT_SET_ELEM_INTERNAL_LAST; - else - elem.flags = 0; - if (obj) *nft_set_ext_obj(ext) = obj; @@ -7613,8 +7607,7 @@ static int nf_tables_newsetelem(struct sk_buff *skb, nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { - err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags, - nla_is_last(attr, rem)); + err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags); if (err < 0) { NL_SET_BAD_ATTR(extack, attr); return err; @@ -7738,7 +7731,7 @@ static void nft_trans_elems_destroy_abort(const struct nft_ctx *ctx, } static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, - const struct nlattr *attr, bool last) + const struct nlattr *attr) { struct nlattr *nla[NFTA_SET_ELEM_MAX + 1]; struct nft_set_ext_tmpl tmpl; @@ -7806,11 +7799,6 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, if (flags) *nft_set_ext_flags(ext) = flags; - if (last) - elem.flags = NFT_SET_ELEM_INTERNAL_LAST; - else - elem.flags = 0; - trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set); if (trans == NULL) goto fail_trans; @@ -7961,8 +7949,7 @@ static int nf_tables_delsetelem(struct sk_buff *skb, return nft_set_flush(&ctx, set, genmask); nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) { - err = nft_del_setelem(&ctx, set, attr, - nla_is_last(attr, rem)); + err = nft_del_setelem(&ctx, set, attr); if (err == -ENOENT && NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYSETELEM) continue; diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index ee3d4f5b9ff7..fe8bd497d74a 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -304,19 +304,10 @@ static void nft_rbtree_set_start_cookie(struct nft_rbtree *priv, priv->start_rbe_cookie = (unsigned long)rbe; } -static void nft_rbtree_set_start_cookie_open(struct nft_rbtree *priv, - const struct nft_rbtree_elem *rbe, - unsigned long open_interval) -{ - priv->start_rbe_cookie = (unsigned long)rbe | open_interval; -} - -#define NFT_RBTREE_OPEN_INTERVAL 1UL - static bool nft_rbtree_cmp_start_cookie(struct nft_rbtree *priv, const struct nft_rbtree_elem *rbe) { - return (priv->start_rbe_cookie & ~NFT_RBTREE_OPEN_INTERVAL) == (unsigned long)rbe; + return priv->start_rbe_cookie == (unsigned long)rbe; } static bool nft_rbtree_insert_same_interval(const struct net *net, @@ -346,14 +337,13 @@ static bool nft_rbtree_insert_same_interval(const struct net *net, static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_rbtree_elem *new, - struct nft_elem_priv **elem_priv, u64 tstamp, bool last) + struct nft_elem_priv **elem_priv, u64 tstamp) { struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL, *rbe_prev; struct rb_node *node, *next, *parent, **p, *first = NULL; struct nft_rbtree *priv = nft_set_priv(set); u8 cur_genmask = nft_genmask_cur(net); u8 genmask = nft_genmask_next(net); - unsigned long open_interval = 0; int d; /* Descend the tree to search for an existing element greater than the @@ -459,18 +449,10 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, } } - if (nft_rbtree_interval_null(set, new)) { + if (nft_rbtree_interval_null(set, new)) + priv->start_rbe_cookie = 0; + else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie) priv->start_rbe_cookie = 0; - } else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie) { - if (nft_set_is_anonymous(set)) { - priv->start_rbe_cookie = 0; - } else if (priv->start_rbe_cookie & NFT_RBTREE_OPEN_INTERVAL) { - /* Previous element is an open interval that partially - * overlaps with an existing non-open interval. - */ - return -ENOTEMPTY; - } - } /* - new start element matching existing start element: full overlap * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. @@ -478,27 +460,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) && nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) { *elem_priv = &rbe_ge->priv; - - /* - Corner case: new start element of open interval (which - * comes as last element in the batch) overlaps the start of - * an existing interval with an end element: partial overlap. - */ - node = rb_first(&priv->root); - rbe = __nft_rbtree_next_active(node, genmask); - if (rbe && nft_rbtree_interval_end(rbe)) { - rbe = nft_rbtree_next_active(rbe, genmask); - if (rbe && - nft_rbtree_interval_start(rbe) && - !nft_rbtree_cmp(set, new, rbe)) { - if (last) - return -ENOTEMPTY; - - /* Maybe open interval? */ - open_interval = NFT_RBTREE_OPEN_INTERVAL; - } - } - nft_rbtree_set_start_cookie_open(priv, rbe_ge, open_interval); - + nft_rbtree_set_start_cookie(priv, rbe_ge); return -EEXIST; } @@ -553,12 +515,6 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new)) return -ENOTEMPTY; - /* - start element overlaps an open interval but end element is new: - * partial overlap, reported as -ENOEMPTY. - */ - if (!rbe_ge && priv->start_rbe_cookie && nft_rbtree_interval_end(new)) - return -ENOTEMPTY; - /* Accepted element: pick insertion point depending on key value */ parent = NULL; p = &priv->root.rb_node; @@ -668,7 +624,6 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_elem_priv **elem_priv) { struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem->priv); - bool last = !!(elem->flags & NFT_SET_ELEM_INTERNAL_LAST); struct nft_rbtree *priv = nft_set_priv(set); u64 tstamp = nft_net_tstamp(net); int err; @@ -685,12 +640,8 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set, cond_resched(); write_lock_bh(&priv->lock); - err = __nft_rbtree_insert(net, set, rbe, elem_priv, tstamp, last); + err = __nft_rbtree_insert(net, set, rbe, elem_priv, tstamp); write_unlock_bh(&priv->lock); - - if (nft_rbtree_interval_end(rbe)) - priv->start_rbe_cookie = 0; - } while (err == -EAGAIN); return err; @@ -778,7 +729,6 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { struct nft_rbtree_elem *rbe, *this = nft_elem_priv_cast(elem->priv); - bool last = !!(elem->flags & NFT_SET_ELEM_INTERNAL_LAST); struct nft_rbtree *priv = nft_set_priv(set); const struct rb_node *parent = priv->root.rb_node; u8 genmask = nft_genmask_next(net); @@ -819,10 +769,9 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set, continue; } - if (nft_rbtree_interval_start(rbe)) { - if (!last) - nft_rbtree_set_start_cookie(priv, rbe); - } else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe)) + if (nft_rbtree_interval_start(rbe)) + nft_rbtree_set_start_cookie(priv, rbe); + else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe)) return NULL; nft_rbtree_flush(net, set, &rbe->priv); -- cgit v1.2.3 From 0548a13b5a145b16e4da0628b5936baf35f51b43 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 12 Mar 2026 12:38:59 +0100 Subject: nf_tables: nft_dynset: fix possible stateful expression memleak in error path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If cloning the second stateful expression in the element via GFP_ATOMIC fails, then the first stateful expression remains in place without being released.   unreferenced object (percpu) 0x607b97e9cab8 (size 16):     comm "softirq", pid 0, jiffies 4294931867     hex dump (first 16 bytes on cpu 3):       00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00     backtrace (crc 0):       pcpu_alloc_noprof+0x453/0xd80       nft_counter_clone+0x9c/0x190 [nf_tables]       nft_expr_clone+0x8f/0x1b0 [nf_tables]       nft_dynset_new+0x2cb/0x5f0 [nf_tables]       nft_rhash_update+0x236/0x11c0 [nf_tables]       nft_dynset_eval+0x11f/0x670 [nf_tables]       nft_do_chain+0x253/0x1700 [nf_tables]       nft_do_chain_ipv4+0x18d/0x270 [nf_tables]       nf_hook_slow+0xaa/0x1e0       ip_local_deliver+0x209/0x330 Fixes: 563125a73ac3 ("netfilter: nftables: generalize set extension to support for several expressions") Reported-by: Gurpreet Shergill Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal --- include/net/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 4 ++-- net/netfilter/nft_dynset.c | 10 +++++++++- 3 files changed, 13 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6299af4ef423..ec8a8ec9c0aa 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -874,6 +874,8 @@ struct nft_elem_priv *nft_set_elem_init(const struct nft_set *set, u64 timeout, u64 expiration, gfp_t gfp); int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set, struct nft_expr *expr_array[]); +void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, + struct nft_set_elem_expr *elem_expr); void nft_set_elem_destroy(const struct nft_set *set, const struct nft_elem_priv *elem_priv, bool destroy_expr); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 4ccdd33cf133..9b1c8d0a35fb 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -6744,8 +6744,8 @@ static void __nft_set_elem_expr_destroy(const struct nft_ctx *ctx, } } -static void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, - struct nft_set_elem_expr *elem_expr) +void nft_set_elem_expr_destroy(const struct nft_ctx *ctx, + struct nft_set_elem_expr *elem_expr) { struct nft_expr *expr; u32 size; diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 7807d8129664..9123277be03c 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -30,18 +30,26 @@ static int nft_dynset_expr_setup(const struct nft_dynset *priv, const struct nft_set_ext *ext) { struct nft_set_elem_expr *elem_expr = nft_set_ext_expr(ext); + struct nft_ctx ctx = { + .net = read_pnet(&priv->set->net), + .family = priv->set->table->family, + }; struct nft_expr *expr; int i; for (i = 0; i < priv->num_exprs; i++) { expr = nft_setelem_expr_at(elem_expr, elem_expr->size); if (nft_expr_clone(expr, priv->expr_array[i], GFP_ATOMIC) < 0) - return -1; + goto err_out; elem_expr->size += priv->expr_array[i]->ops->size; } return 0; +err_out: + nft_set_elem_expr_destroy(&ctx, elem_expr); + + return -1; } struct nft_elem_priv *nft_dynset_new(struct nft_set *set, -- cgit v1.2.3 From b7405dcf7385445e10821777143f18c3ce20fa04 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 15 Mar 2026 10:41:52 +0000 Subject: bonding: prevent potential infinite loop in bond_header_parse() bond_header_parse() can loop if a stack of two bonding devices is setup, because skb->dev always points to the hierarchy top. Add new "const struct net_device *dev" parameter to (struct header_ops)->parse() method to make sure the recursion is bounded, and that the final leaf parse method is called. Fixes: 950803f72547 ("bonding: fix type confusion in bond_setup_by_slave()") Signed-off-by: Eric Dumazet Reviewed-by: Jiayuan Chen Tested-by: Jiayuan Chen Cc: Jay Vosburgh Cc: Andrew Lunn Link: https://patch.msgid.link/20260315104152.1436867-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- drivers/firewire/net.c | 5 +++-- drivers/net/bonding/bond_main.c | 8 +++++--- include/linux/etherdevice.h | 3 ++- include/linux/if_ether.h | 3 ++- include/linux/netdevice.h | 6 ++++-- net/ethernet/eth.c | 9 +++------ net/ipv4/ip_gre.c | 3 ++- net/mac802154/iface.c | 4 +++- net/phonet/af_phonet.c | 5 ++++- 9 files changed, 28 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index f1a2bee39bf1..82b3b6d9ed2d 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -257,9 +257,10 @@ static void fwnet_header_cache_update(struct hh_cache *hh, memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len); } -static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) +static int fwnet_header_parse(const struct sk_buff *skb, const struct net_device *dev, + unsigned char *haddr) { - memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); + memcpy(haddr, dev->dev_addr, FWNET_ALEN); return FWNET_ALEN; } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 707419270ebf..33f414d03ab9 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1530,9 +1530,11 @@ static int bond_header_create(struct sk_buff *skb, struct net_device *bond_dev, return ret; } -static int bond_header_parse(const struct sk_buff *skb, unsigned char *haddr) +static int bond_header_parse(const struct sk_buff *skb, + const struct net_device *dev, + unsigned char *haddr) { - struct bonding *bond = netdev_priv(skb->dev); + struct bonding *bond = netdev_priv(dev); const struct header_ops *slave_ops; struct slave *slave; int ret = 0; @@ -1542,7 +1544,7 @@ static int bond_header_parse(const struct sk_buff *skb, unsigned char *haddr) if (slave) { slave_ops = READ_ONCE(slave->dev->header_ops); if (slave_ops && slave_ops->parse) - ret = slave_ops->parse(skb, haddr); + ret = slave_ops->parse(skb, slave->dev, haddr); } rcu_read_unlock(); return ret; diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 9a1eacf35d37..df8f88f63a70 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -42,7 +42,8 @@ extern const struct header_ops eth_header_ops; int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len); -int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); +int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, + unsigned char *haddr); int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 61b7335aa037..ca9afa824aa4 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h @@ -40,7 +40,8 @@ static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb) return (struct ethhdr *)skb_inner_mac_header(skb); } -int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); +int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, + unsigned char *haddr); extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d7aac6f185bc..7ca01eb3f7d2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -311,7 +311,9 @@ struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len); - int (*parse)(const struct sk_buff *skb, unsigned char *haddr); + int (*parse)(const struct sk_buff *skb, + const struct net_device *dev, + unsigned char *haddr); int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); void (*cache_update)(struct hh_cache *hh, const struct net_device *dev, @@ -3445,7 +3447,7 @@ static inline int dev_parse_header(const struct sk_buff *skb, if (!dev->header_ops || !dev->header_ops->parse) return 0; - return dev->header_ops->parse(skb, haddr); + return dev->header_ops->parse(skb, dev, haddr); } static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c index 13a63b48b7ee..d9faadbe9b6c 100644 --- a/net/ethernet/eth.c +++ b/net/ethernet/eth.c @@ -193,14 +193,11 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) } EXPORT_SYMBOL(eth_type_trans); -/** - * eth_header_parse - extract hardware address from packet - * @skb: packet to extract header from - * @haddr: destination buffer - */ -int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr) +int eth_header_parse(const struct sk_buff *skb, const struct net_device *dev, + unsigned char *haddr) { const struct ethhdr *eth = eth_hdr(skb); + memcpy(haddr, eth->h_source, ETH_ALEN); return ETH_ALEN; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e13244729ad8..35f0baa99d40 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -919,7 +919,8 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, return -(t->hlen + sizeof(*iph)); } -static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) +static int ipgre_header_parse(const struct sk_buff *skb, const struct net_device *dev, + unsigned char *haddr) { const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); memcpy(haddr, &iph->saddr, 4); diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 9e4631fade90..000be60d9580 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -469,7 +469,9 @@ static int mac802154_header_create(struct sk_buff *skb, } static int -mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr) +mac802154_header_parse(const struct sk_buff *skb, + const struct net_device *dev, + unsigned char *haddr) { struct ieee802154_hdr hdr; diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index 238a9638d2b0..d89225d6bfd3 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -129,9 +129,12 @@ static int pn_header_create(struct sk_buff *skb, struct net_device *dev, return 1; } -static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr) +static int pn_header_parse(const struct sk_buff *skb, + const struct net_device *dev, + unsigned char *haddr) { const u8 *media = skb_mac_header(skb); + *haddr = *media; return 1; } -- cgit v1.2.3 From 66360460cab63c248ca5b1070a01c0c29133b960 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 15 Mar 2026 11:54:22 -0400 Subject: net/sched: teql: Fix double-free in teql_master_xmit Whenever a TEQL devices has a lockless Qdisc as root, qdisc_reset should be called using the seq_lock to avoid racing with the datapath. Failure to do so may cause crashes like the following: [ 238.028993][ T318] BUG: KASAN: double-free in skb_release_data (net/core/skbuff.c:1139) [ 238.029328][ T318] Free of addr ffff88810c67ec00 by task poc_teql_uaf_ke/318 [ 238.029749][ T318] [ 238.029900][ T318] CPU: 3 UID: 0 PID: 318 Comm: poc_teql_ke Not tainted 7.0.0-rc3-00149-ge5b31d988a41 #704 PREEMPT(full) [ 238.029906][ T318] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 [ 238.029910][ T318] Call Trace: [ 238.029913][ T318] [ 238.029916][ T318] dump_stack_lvl (lib/dump_stack.c:122) [ 238.029928][ T318] print_report (mm/kasan/report.c:379 mm/kasan/report.c:482) [ 238.029940][ T318] ? skb_release_data (net/core/skbuff.c:1139) [ 238.029944][ T318] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221) ... [ 238.029957][ T318] ? skb_release_data (net/core/skbuff.c:1139) [ 238.029969][ T318] kasan_report_invalid_free (mm/kasan/report.c:221 mm/kasan/report.c:563) [ 238.029979][ T318] ? skb_release_data (net/core/skbuff.c:1139) [ 238.029989][ T318] check_slab_allocation (mm/kasan/common.c:231) [ 238.029995][ T318] kmem_cache_free (mm/slub.c:2637 (discriminator 1) mm/slub.c:6168 (discriminator 1) mm/slub.c:6298 (discriminator 1)) [ 238.030004][ T318] skb_release_data (net/core/skbuff.c:1139) ... [ 238.030025][ T318] sk_skb_reason_drop (net/core/skbuff.c:1256) [ 238.030032][ T318] pfifo_fast_reset (./include/linux/ptr_ring.h:171 ./include/linux/ptr_ring.h:309 ./include/linux/skb_array.h:98 net/sched/sch_generic.c:827) [ 238.030039][ T318] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221) ... [ 238.030054][ T318] qdisc_reset (net/sched/sch_generic.c:1034) [ 238.030062][ T318] teql_destroy (./include/linux/spinlock.h:395 net/sched/sch_teql.c:157) [ 238.030071][ T318] __qdisc_destroy (./include/net/pkt_sched.h:328 net/sched/sch_generic.c:1077) [ 238.030077][ T318] qdisc_graft (net/sched/sch_api.c:1062 net/sched/sch_api.c:1053 net/sched/sch_api.c:1159) [ 238.030089][ T318] ? __pfx_qdisc_graft (net/sched/sch_api.c:1091) [ 238.030095][ T318] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221) [ 238.030102][ T318] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221) [ 238.030106][ T318] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221) [ 238.030114][ T318] tc_get_qdisc (net/sched/sch_api.c:1529 net/sched/sch_api.c:1556) ... [ 238.072958][ T318] Allocated by task 303 on cpu 5 at 238.026275s: [ 238.073392][ T318] kasan_save_stack (mm/kasan/common.c:58) [ 238.073884][ T318] kasan_save_track (mm/kasan/common.c:64 (discriminator 5) mm/kasan/common.c:79 (discriminator 5)) [ 238.074230][ T318] __kasan_slab_alloc (mm/kasan/common.c:369) [ 238.074578][ T318] kmem_cache_alloc_node_noprof (./include/linux/kasan.h:253 mm/slub.c:4542 mm/slub.c:4869 mm/slub.c:4921) [ 238.076091][ T318] kmalloc_reserve (net/core/skbuff.c:616 (discriminator 107)) [ 238.076450][ T318] __alloc_skb (net/core/skbuff.c:713) [ 238.076834][ T318] alloc_skb_with_frags (./include/linux/skbuff.h:1383 net/core/skbuff.c:6763) [ 238.077178][ T318] sock_alloc_send_pskb (net/core/sock.c:2997) [ 238.077520][ T318] packet_sendmsg (net/packet/af_packet.c:2926 net/packet/af_packet.c:3019 net/packet/af_packet.c:3108) [ 238.081469][ T318] [ 238.081870][ T318] Freed by task 299 on cpu 1 at 238.028496s: [ 238.082761][ T318] kasan_save_stack (mm/kasan/common.c:58) [ 238.083481][ T318] kasan_save_track (mm/kasan/common.c:64 (discriminator 5) mm/kasan/common.c:79 (discriminator 5)) [ 238.085348][ T318] kasan_save_free_info (mm/kasan/generic.c:587 (discriminator 1)) [ 238.085900][ T318] __kasan_slab_free (mm/kasan/common.c:287) [ 238.086439][ T318] kmem_cache_free (mm/slub.c:6168 (discriminator 3) mm/slub.c:6298 (discriminator 3)) [ 238.087007][ T318] skb_release_data (net/core/skbuff.c:1139) [ 238.087491][ T318] consume_skb (net/core/skbuff.c:1451) [ 238.087757][ T318] teql_master_xmit (net/sched/sch_teql.c:358) [ 238.088116][ T318] dev_hard_start_xmit (./include/linux/netdevice.h:5324 ./include/linux/netdevice.h:5333 net/core/dev.c:3871 net/core/dev.c:3887) [ 238.088468][ T318] sch_direct_xmit (net/sched/sch_generic.c:347) [ 238.088820][ T318] __qdisc_run (net/sched/sch_generic.c:420 (discriminator 1)) [ 238.089166][ T318] __dev_queue_xmit (./include/net/sch_generic.h:229 ./include/net/pkt_sched.h:121 ./include/net/pkt_sched.h:117 net/core/dev.c:4196 net/core/dev.c:4802) Workflow to reproduce: 1. Initialize a TEQL topology (dummy0 and ifb0 as slaves, teql0 up). 2. Start multiple sender workers continuously transmitting packets through teql0 to drive teql_master_xmit(). 3. In parallel, repeatedly delete and re-add the root qdisc on dummy0 and ifb0 via RTNETLINK, forcing frequent teardown and reset activity (teql_destroy() / qdisc_reset()). 4. After running both workloads concurrently for several iterations, KASAN reports slab-use-after-free or double-free in the skb free path. Fix this by moving dev_reset_queue to sch_generic.h and calling it, instead of qdisc_reset, in teql_destroy since it handles both the lock and lockless cases correctly for root qdiscs. Fixes: 96009c7d500e ("sched: replace __QDISC_STATE_RUNNING bit with a spin lock") Reported-by: Xianrui Dong Tested-by: Xianrui Dong Co-developed-by: Victor Nogueira Signed-off-by: Victor Nogueira Signed-off-by: Jamal Hadi Salim Link: https://patch.msgid.link/20260315155422.147256-1-jhs@mojatatu.com Signed-off-by: Jakub Kicinski --- include/net/sch_generic.h | 28 ++++++++++++++++++++++++++++ net/sched/sch_generic.c | 27 --------------------------- net/sched/sch_teql.c | 7 ++----- 3 files changed, 30 insertions(+), 32 deletions(-) (limited to 'include') diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index d5d55cb21686..cafb266a0b80 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -716,6 +716,34 @@ void qdisc_destroy(struct Qdisc *qdisc); void qdisc_put(struct Qdisc *qdisc); void qdisc_put_unlocked(struct Qdisc *qdisc); void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); + +static inline void dev_reset_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_unused) +{ + struct Qdisc *qdisc; + bool nolock; + + qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); + if (!qdisc) + return; + + nolock = qdisc->flags & TCQ_F_NOLOCK; + + if (nolock) + spin_lock_bh(&qdisc->seqlock); + spin_lock_bh(qdisc_lock(qdisc)); + + qdisc_reset(qdisc); + + spin_unlock_bh(qdisc_lock(qdisc)); + if (nolock) { + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); + clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); + spin_unlock_bh(&qdisc->seqlock); + } +} + #ifdef CONFIG_NET_SCHED int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, void *type_data); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 98ffe64de51f..9e726c3bd86b 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1288,33 +1288,6 @@ static void dev_deactivate_queue(struct net_device *dev, } } -static void dev_reset_queue(struct net_device *dev, - struct netdev_queue *dev_queue, - void *_unused) -{ - struct Qdisc *qdisc; - bool nolock; - - qdisc = rtnl_dereference(dev_queue->qdisc_sleeping); - if (!qdisc) - return; - - nolock = qdisc->flags & TCQ_F_NOLOCK; - - if (nolock) - spin_lock_bh(&qdisc->seqlock); - spin_lock_bh(qdisc_lock(qdisc)); - - qdisc_reset(qdisc); - - spin_unlock_bh(qdisc_lock(qdisc)); - if (nolock) { - clear_bit(__QDISC_STATE_MISSED, &qdisc->state); - clear_bit(__QDISC_STATE_DRAINING, &qdisc->state); - spin_unlock_bh(&qdisc->seqlock); - } -} - static bool some_qdisc_is_busy(struct net_device *dev) { unsigned int i; diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 783300d8b019..ec4039a201a2 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c @@ -146,15 +146,12 @@ teql_destroy(struct Qdisc *sch) master->slaves = NEXT_SLAVE(q); if (q == master->slaves) { struct netdev_queue *txq; - spinlock_t *root_lock; txq = netdev_get_tx_queue(master->dev, 0); master->slaves = NULL; - root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc)); - spin_lock_bh(root_lock); - qdisc_reset(rtnl_dereference(txq->qdisc)); - spin_unlock_bh(root_lock); + dev_reset_queue(master->dev, + txq, NULL); } } skb_queue_purge(&dat->q); -- cgit v1.2.3 From a0671125d4f55e1e98d9bde8a0b671941987e208 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 13 Mar 2026 07:55:31 +0100 Subject: clsact: Fix use-after-free in init/destroy rollback asymmetry Fix a use-after-free in the clsact qdisc upon init/destroy rollback asymmetry. The latter is achieved by first fully initializing a clsact instance, and then in a second step having a replacement failure for the new clsact qdisc instance. clsact_init() initializes ingress first and then takes care of the egress part. This can fail midway, for example, via tcf_block_get_ext(). Upon failure, the kernel will trigger the clsact_destroy() callback. Commit 1cb6f0bae504 ("bpf: Fix too early release of tcx_entry") details the way how the transition is happening. If tcf_block_get_ext on the q->ingress_block ends up failing, we took the tcx_miniq_inc reference count on the ingress side, but not yet on the egress side. clsact_destroy() tests whether the {ingress,egress}_entry was non-NULL. However, even in midway failure on the replacement, both are in fact non-NULL with a valid egress_entry from the previous clsact instance. What we really need to test for is whether the qdisc instance-specific ingress or egress side previously got initialized. This adds a small helper for checking the miniq initialization called mini_qdisc_pair_inited, and utilizes that upon clsact_destroy() in order to fix the use-after-free scenario. Convert the ingress_destroy() side as well so both are consistent to each other. Fixes: 1cb6f0bae504 ("bpf: Fix too early release of tcx_entry") Reported-by: Keenan Dong Signed-off-by: Daniel Borkmann Cc: Martin KaFai Lau Acked-by: Martin KaFai Lau Link: https://patch.msgid.link/20260313065531.98639-1-daniel@iogearbox.net Signed-off-by: Paolo Abeni --- include/net/sch_generic.h | 5 +++++ net/sched/sch_ingress.c | 14 ++++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index cafb266a0b80..c3d657359a3d 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -1457,6 +1457,11 @@ void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, struct tcf_block *block); +static inline bool mini_qdisc_pair_inited(struct mini_Qdisc_pair *miniqp) +{ + return !!miniqp->p_miniq; +} + void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx); int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)); diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index cc6051d4f2ef..c3e18bae8fbf 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -113,14 +113,15 @@ static void ingress_destroy(struct Qdisc *sch) { struct ingress_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); - struct bpf_mprog_entry *entry = rtnl_dereference(dev->tcx_ingress); + struct bpf_mprog_entry *entry; if (sch->parent != TC_H_INGRESS) return; tcf_block_put_ext(q->block, sch, &q->block_info); - if (entry) { + if (mini_qdisc_pair_inited(&q->miniqp)) { + entry = rtnl_dereference(dev->tcx_ingress); tcx_miniq_dec(entry); if (!tcx_entry_is_active(entry)) { tcx_entry_update(dev, NULL, true); @@ -290,10 +291,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, static void clsact_destroy(struct Qdisc *sch) { + struct bpf_mprog_entry *ingress_entry, *egress_entry; struct clsact_sched_data *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); - struct bpf_mprog_entry *ingress_entry = rtnl_dereference(dev->tcx_ingress); - struct bpf_mprog_entry *egress_entry = rtnl_dereference(dev->tcx_egress); if (sch->parent != TC_H_CLSACT) return; @@ -301,7 +301,8 @@ static void clsact_destroy(struct Qdisc *sch) tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); - if (ingress_entry) { + if (mini_qdisc_pair_inited(&q->miniqp_ingress)) { + ingress_entry = rtnl_dereference(dev->tcx_ingress); tcx_miniq_dec(ingress_entry); if (!tcx_entry_is_active(ingress_entry)) { tcx_entry_update(dev, NULL, true); @@ -309,7 +310,8 @@ static void clsact_destroy(struct Qdisc *sch) } } - if (egress_entry) { + if (mini_qdisc_pair_inited(&q->miniqp_egress)) { + egress_entry = rtnl_dereference(dev->tcx_egress); tcx_miniq_dec(egress_entry); if (!tcx_entry_is_active(egress_entry)) { tcx_entry_update(dev, NULL, false); -- cgit v1.2.3 From d5ad6ab61cbd89afdb60881f6274f74328af3ee9 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Sat, 14 Mar 2026 06:54:55 +0000 Subject: wifi: mac80211: always free skb on ieee80211_tx_prepare_skb() failure ieee80211_tx_prepare_skb() has three error paths, but only two of them free the skb. The first error path (ieee80211_tx_prepare() returning TX_DROP) does not free it, while invoke_tx_handlers() failure and the fragmentation check both do. Add kfree_skb() to the first error path so all three are consistent, and remove the now-redundant frees in callers (ath9k, mt76, mac80211_hwsim) to avoid double-free. Document the skb ownership guarantee in the function's kdoc. Signed-off-by: Felix Fietkau Link: https://patch.msgid.link/20260314065455.2462900-1-nbd@nbd.name Fixes: 06be6b149f7e ("mac80211: add ieee80211_tx_prepare_skb() helper function") Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath9k/channel.c | 6 ++---- drivers/net/wireless/mediatek/mt76/scan.c | 4 +--- drivers/net/wireless/virtual/mac80211_hwsim.c | 1 - include/net/mac80211.h | 4 +++- net/mac80211/tx.c | 4 +++- 5 files changed, 9 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 121e51ce1bc0..8b27d8cc086a 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -1006,7 +1006,7 @@ static void ath_scan_send_probe(struct ath_softc *sc, skb_set_queue_mapping(skb, IEEE80211_AC_VO); if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, NULL)) - goto error; + return; txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO]; if (ath_tx_start(sc->hw, skb, &txctl)) @@ -1119,10 +1119,8 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp, skb->priority = 7; skb_set_queue_mapping(skb, IEEE80211_AC_VO); - if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) { - dev_kfree_skb_any(skb); + if (!ieee80211_tx_prepare_skb(sc->hw, vif, skb, band, &sta)) return false; - } break; default: return false; diff --git a/drivers/net/wireless/mediatek/mt76/scan.c b/drivers/net/wireless/mediatek/mt76/scan.c index ff9176cdee3d..63b0447e55c1 100644 --- a/drivers/net/wireless/mediatek/mt76/scan.c +++ b/drivers/net/wireless/mediatek/mt76/scan.c @@ -63,10 +63,8 @@ mt76_scan_send_probe(struct mt76_dev *dev, struct cfg80211_ssid *ssid) rcu_read_lock(); - if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) { - ieee80211_free_txskb(phy->hw, skb); + if (!ieee80211_tx_prepare_skb(phy->hw, vif, skb, band, NULL)) goto out; - } info = IEEE80211_SKB_CB(skb); if (req->no_cck) diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index f6b890dea7e0..1b6e55eb81a2 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -3021,7 +3021,6 @@ static void hw_scan_work(struct work_struct *work) hwsim->tmp_chan->band, NULL)) { rcu_read_unlock(); - kfree_skb(probe); continue; } diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 7f9d96939a4e..adce2144a678 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -7407,7 +7407,9 @@ void ieee80211_report_wowlan_wakeup(struct ieee80211_vif *vif, * @band: the band to transmit on * @sta: optional pointer to get the station to send the frame to * - * Return: %true if the skb was prepared, %false otherwise + * Return: %true if the skb was prepared, %false otherwise. + * On failure, the skb is freed by this function; callers must not + * free it again. * * Note: must be called under RCU lock */ diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8cdbd417d7be..b7aedaab8483 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1899,8 +1899,10 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, struct ieee80211_tx_data tx; struct sk_buff *skb2; - if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) + if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) { + kfree_skb(skb); return false; + } info->band = band; info->control.vif = vif; -- cgit v1.2.3 From b3a6df291fecf5f8a308953b65ca72b7fc9e015d Mon Sep 17 00:00:00 2001 From: Xiang Mei Date: Mon, 16 Mar 2026 18:02:41 -0700 Subject: udp_tunnel: fix NULL deref caused by udp_sock_create6 when CONFIG_IPV6=n When CONFIG_IPV6 is disabled, the udp_sock_create6() function returns 0 (success) without actually creating a socket. Callers such as fou_create() then proceed to dereference the uninitialized socket pointer, resulting in a NULL pointer dereference. The captured NULL deref crash: BUG: kernel NULL pointer dereference, address: 0000000000000018 RIP: 0010:fou_nl_add_doit (net/ipv4/fou_core.c:590 net/ipv4/fou_core.c:764) [...] Call Trace: genl_family_rcv_msg_doit.constprop.0 (net/netlink/genetlink.c:1114) genl_rcv_msg (net/netlink/genetlink.c:1194 net/netlink/genetlink.c:1209) [...] netlink_rcv_skb (net/netlink/af_netlink.c:2550) genl_rcv (net/netlink/genetlink.c:1219) netlink_unicast (net/netlink/af_netlink.c:1319 net/netlink/af_netlink.c:1344) netlink_sendmsg (net/netlink/af_netlink.c:1894) __sock_sendmsg (net/socket.c:727 (discriminator 1) net/socket.c:742 (discriminator 1)) __sys_sendto (./include/linux/file.h:62 (discriminator 1) ./include/linux/file.h:83 (discriminator 1) net/socket.c:2183 (discriminator 1)) __x64_sys_sendto (net/socket.c:2213 (discriminator 1) net/socket.c:2209 (discriminator 1) net/socket.c:2209 (discriminator 1)) do_syscall_64 (arch/x86/entry/syscall_64.c:63 (discriminator 1) arch/x86/entry/syscall_64.c:94 (discriminator 1)) entry_SYSCALL_64_after_hwframe (net/arch/x86/entry/entry_64.S:130) This patch makes udp_sock_create6 return -EPFNOSUPPORT instead, so callers correctly take their error paths. There is only one caller of the vulnerable function and only privileged users can trigger it. Fixes: fd384412e199b ("udp_tunnel: Seperate ipv6 functions into its own file.") Reported-by: Weiming Shi Signed-off-by: Xiang Mei Link: https://patch.msgid.link/20260317010241.1893893-1-xmei5@asu.edu Signed-off-by: Jakub Kicinski --- include/net/udp_tunnel.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index d9c6d04bb3b5..fc1fc43345b5 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -52,7 +52,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { - return 0; + return -EPFNOSUPPORT; } #endif -- cgit v1.2.3