summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarco Crivellari <marco.crivellari@suse.com>2025-12-24 19:13:01 +0300
committerCorey Minyard <corey@minyard.net>2026-03-05 19:53:20 +0300
commit122d16da1313f1746a4cdd31a620bbb141be7060 (patch)
tree419bf7cdce5d7b7c619d9c6943dfd898487e02a7
parentaf4e9ef3d78420feb8fe58cd9a1ab80c501b3c08 (diff)
downloadlinux-122d16da1313f1746a4cdd31a620bbb141be7060.tar.xz
ipmi: Replace use of system_wq with system_percpu_wq
This patch continues the effort to refactor workqueue APIs, which has begun with the changes introducing new workqueues and a new alloc_workqueue flag: commit 128ea9f6ccfb ("workqueue: Add system_percpu_wq and system_dfl_wq") commit 930c2ea566af ("workqueue: Add new WQ_PERCPU flag") The point of the refactoring is to eventually alter the default behavior of workqueues to become unbound by default so that their workload placement is optimized by the scheduler. Before that to happen after a careful review and conversion of each individual case, workqueue users must be converted to the better named new workqueues with no intended behaviour changes: system_wq -> system_percpu_wq system_unbound_wq -> system_dfl_wq This way the old obsolete workqueues (system_wq, system_unbound_wq) can be removed in the future. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Message-ID: <20251224161301.135382-1-marco.crivellari@suse.com> Signed-off-by: Corey Minyard <corey@minyard.net>
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index c41f51c82edd..869ac87a4b6a 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -987,7 +987,7 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
mutex_lock(&intf->user_msgs_mutex);
list_add_tail(&msg->link, &intf->user_msgs);
mutex_unlock(&intf->user_msgs_mutex);
- queue_work(system_wq, &intf->smi_work);
+ queue_work(system_percpu_wq, &intf->smi_work);
}
return rv;
@@ -4977,7 +4977,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
if (run_to_completion)
smi_work(&intf->smi_work);
else
- queue_work(system_wq, &intf->smi_work);
+ queue_work(system_percpu_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_msg_received);
@@ -4987,7 +4987,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
- queue_work(system_wq, &intf->smi_work);
+ queue_work(system_percpu_wq, &intf->smi_work);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -5162,7 +5162,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags);
}
- queue_work(system_wq, &intf->smi_work);
+ queue_work(system_percpu_wq, &intf->smi_work);
return need_timer;
}
@@ -5218,7 +5218,7 @@ static void ipmi_timeout(struct timer_list *unused)
if (atomic_read(&stop_operation))
return;
- queue_work(system_wq, &ipmi_timer_work);
+ queue_work(system_percpu_wq, &ipmi_timer_work);
}
static void need_waiter(struct ipmi_smi *intf)