summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rdmavt
diff options
context:
space:
mode:
authorSebastian Sanchez <sebastian.sanchez@intel.com>2018-05-02 16:43:39 +0300
committerDoug Ledford <dledford@redhat.com>2018-05-09 22:53:30 +0300
commitaf8aab71370a692eaf7e7969ba5b1a455ac20113 (patch)
tree5ea7da221cc96d1f819c87870589fca3a5d21f83 /drivers/infiniband/sw/rdmavt
parentc872a1f9e3aaf51b091ff19ef6cb1e1a298f3c90 (diff)
downloadlinux-af8aab71370a692eaf7e7969ba5b1a455ac20113.tar.xz
IB/hfi1: Optimize kthread pointer locking when queuing CQ entries
All threads queuing CQ entries on different CQs are unnecessarily synchronized by a spin lock to check if the CQ kthread worker hasn't been destroyed before queuing an CQ entry. The lock used in 6efaf10f163d ("IB/rdmavt: Avoid queuing work into a destroyed cq kthread worker") is a device global lock and will have poor performance at scale as completions are entered from a large number of CPUs. Convert to use RCU where the read side of RCU is rvt_cq_enter() to determine that the worker is alive prior to triggering the completion event. Apply write side RCU semantics in rvt_driver_cq_init() and rvt_cq_exit(). Fixes: 6efaf10f163d ("IB/rdmavt: Avoid queuing work into a destroyed cq kthread worker") Cc: <stable@vger.kernel.org> # 4.14.x Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Sebastian Sanchez <sebastian.sanchez@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/sw/rdmavt')
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index fb52b669bfce..340c17aba3b0 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -120,17 +120,20 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
if (cq->notify == IB_CQ_NEXT_COMP ||
(cq->notify == IB_CQ_SOLICITED &&
(solicited || entry->status != IB_WC_SUCCESS))) {
+ struct kthread_worker *worker;
+
/*
* This will cause send_complete() to be called in
* another thread.
*/
- spin_lock(&cq->rdi->n_cqs_lock);
- if (likely(cq->rdi->worker)) {
+ rcu_read_lock();
+ worker = rcu_dereference(cq->rdi->worker);
+ if (likely(worker)) {
cq->notify = RVT_CQ_NONE;
cq->triggered++;
- kthread_queue_work(cq->rdi->worker, &cq->comptask);
+ kthread_queue_work(worker, &cq->comptask);
}
- spin_unlock(&cq->rdi->n_cqs_lock);
+ rcu_read_unlock();
}
spin_unlock_irqrestore(&cq->lock, flags);
@@ -512,7 +515,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
int cpu;
struct kthread_worker *worker;
- if (rdi->worker)
+ if (rcu_access_pointer(rdi->worker))
return 0;
spin_lock_init(&rdi->n_cqs_lock);
@@ -524,7 +527,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
return PTR_ERR(worker);
set_user_nice(worker->task, MIN_NICE);
- rdi->worker = worker;
+ RCU_INIT_POINTER(rdi->worker, worker);
return 0;
}
@@ -536,15 +539,19 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
{
struct kthread_worker *worker;
- /* block future queuing from send_complete() */
- spin_lock_irq(&rdi->n_cqs_lock);
- worker = rdi->worker;
+ if (!rcu_access_pointer(rdi->worker))
+ return;
+
+ spin_lock(&rdi->n_cqs_lock);
+ worker = rcu_dereference_protected(rdi->worker,
+ lockdep_is_held(&rdi->n_cqs_lock));
if (!worker) {
- spin_unlock_irq(&rdi->n_cqs_lock);
+ spin_unlock(&rdi->n_cqs_lock);
return;
}
- rdi->worker = NULL;
- spin_unlock_irq(&rdi->n_cqs_lock);
+ RCU_INIT_POINTER(rdi->worker, NULL);
+ spin_unlock(&rdi->n_cqs_lock);
+ synchronize_rcu();
kthread_destroy_worker(worker);
}