diff options
author | Sebastian Sanchez <sebastian.sanchez@intel.com> | 2018-05-02 16:43:55 +0300 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2018-05-09 22:53:30 +0300 |
commit | 5d18ee67d4c1735f5c1f757e89228ec68e4f4ef3 (patch) | |
tree | bee1ad21ecea953b9048b94fd05b2c86a4128c27 /drivers/infiniband/sw/rdmavt/cq.c | |
parent | cf38ea100edfcc0ec0a5797966d69ec4e10fe4f1 (diff) | |
download | linux-5d18ee67d4c1735f5c1f757e89228ec68e4f4ef3.tar.xz |
IB/{hfi1, rdmavt, qib}: Implement CQ completion vector support
Currently the driver doesn't support completion vectors. These
are used to indicate which sets of CQs should be grouped together
into the same vector. A vector is a CQ processing thread that
runs on a specific CPU.
If an application has several CQs bound to different completion
vectors, and each completion vector runs on different CPUs, then
the completion queue workload is balanced. This helps scale as more
nodes are used.
Implement CQ completion vector support using a global workqueue
where a CQ entry is queued to the CPU corresponding to the CQ's
completion vector. Since the workqueue is global, it's guaranteed
to always be there when queueing CQ entries; Therefore, the RCU
locking for cq->rdi->worker in the hot path is superfluous.
Each completion vector is assigned to a different CPU. The number of
completion vectors available is computed by taking the number of
online, physical CPUs from the local NUMA node and subtracting the
CPUs used for kernel receive queues and the general interrupt.
Special use cases:
* If there are no CPUs left for completion vectors, the same CPU
for the general interrupt is used; Therefore, there would only
be one completion vector available.
* For multi-HFI systems, the number of completion vectors available
for each device is the total number of completion vectors in
the local NUMA node divided by the number of devices in the same
NUMA node. If there's a division remainder, the first device to
get initialized gets an extra completion vector.
Upon a CQ creation, an invalid completion vector could be specified.
Handle it as follows:
* If the completion vector is less than 0, set it to 0.
* Set the completion vector to the result of the passed completion
vector moded with the number of device completion vectors
available.
Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Sebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/sw/rdmavt/cq.c')
-rw-r--r-- | drivers/infiniband/sw/rdmavt/cq.c | 81 |
1 files changed, 32 insertions, 49 deletions
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c index 340c17aba3b0..4f1544ad4aff 100644 --- a/drivers/infiniband/sw/rdmavt/cq.c +++ b/drivers/infiniband/sw/rdmavt/cq.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2016 Intel Corporation. + * Copyright(c) 2016 - 2018 Intel Corporation. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -47,11 +47,12 @@ #include <linux/slab.h> #include <linux/vmalloc.h> -#include <linux/kthread.h> #include "cq.h" #include "vt.h" #include "trace.h" +static struct workqueue_struct *comp_vector_wq; + /** * rvt_cq_enter - add a new entry to the completion queue * @cq: completion queue @@ -120,27 +121,21 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) if (cq->notify == IB_CQ_NEXT_COMP || (cq->notify == IB_CQ_SOLICITED && (solicited || entry->status != IB_WC_SUCCESS))) { - struct kthread_worker *worker; - /* * This will cause send_complete() to be called in * another thread. */ - rcu_read_lock(); - worker = rcu_dereference(cq->rdi->worker); - if (likely(worker)) { - cq->notify = RVT_CQ_NONE; - cq->triggered++; - kthread_queue_work(worker, &cq->comptask); - } - rcu_read_unlock(); + cq->notify = RVT_CQ_NONE; + cq->triggered++; + queue_work_on(cq->comp_vector_cpu, comp_vector_wq, + &cq->comptask); } spin_unlock_irqrestore(&cq->lock, flags); } EXPORT_SYMBOL(rvt_cq_enter); -static void send_complete(struct kthread_work *work) +static void send_complete(struct work_struct *work) { struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); @@ -192,6 +187,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, struct ib_cq *ret; u32 sz; unsigned int entries = attr->cqe; + int comp_vector = attr->comp_vector; if (attr->flags) return ERR_PTR(-EINVAL); @@ -199,6 +195,11 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, if (entries < 1 || entries > rdi->dparms.props.max_cqe) return ERR_PTR(-EINVAL); + if (comp_vector < 0) + comp_vector = 0; + + comp_vector = comp_vector % rdi->ibdev.num_comp_vectors; + /* Allocate the completion queue structure. */ cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node); if (!cq) @@ -267,14 +268,22 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, * an error. */ cq->rdi = rdi; + if (rdi->driver_f.comp_vect_cpu_lookup) + cq->comp_vector_cpu = + rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector); + else + cq->comp_vector_cpu = + cpumask_first(cpumask_of_node(rdi->dparms.node)); + cq->ibcq.cqe = entries; cq->notify = RVT_CQ_NONE; spin_lock_init(&cq->lock); - kthread_init_work(&cq->comptask, send_complete); + INIT_WORK(&cq->comptask, send_complete); cq->queue = wc; ret = &cq->ibcq; + trace_rvt_create_cq(cq, attr); goto done; bail_ip: @@ -300,7 +309,7 @@ int rvt_destroy_cq(struct ib_cq *ibcq) struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); struct rvt_dev_info *rdi = cq->rdi; - kthread_flush_work(&cq->comptask); + flush_work(&cq->comptask); spin_lock_irq(&rdi->n_cqs_lock); rdi->n_cqs_allocated--; spin_unlock_irq(&rdi->n_cqs_lock); @@ -510,24 +519,13 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry) * * Return: 0 on success */ -int rvt_driver_cq_init(struct rvt_dev_info *rdi) +int rvt_driver_cq_init(void) { - int cpu; - struct kthread_worker *worker; - - if (rcu_access_pointer(rdi->worker)) - return 0; - - spin_lock_init(&rdi->n_cqs_lock); - - cpu = cpumask_first(cpumask_of_node(rdi->dparms.node)); - worker = kthread_create_worker_on_cpu(cpu, 0, - "%s", rdi->dparms.cq_name); - if (IS_ERR(worker)) - return PTR_ERR(worker); + comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE, + 0, "rdmavt_cq"); + if (!comp_vector_wq) + return -ENOMEM; - set_user_nice(worker->task, MIN_NICE); - RCU_INIT_POINTER(rdi->worker, worker); return 0; } @@ -535,23 +533,8 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi) * rvt_cq_exit - tear down cq reources * @rdi: rvt dev structure */ -void rvt_cq_exit(struct rvt_dev_info *rdi) +void rvt_cq_exit(void) { - struct kthread_worker *worker; - - if (!rcu_access_pointer(rdi->worker)) - return; - - spin_lock(&rdi->n_cqs_lock); - worker = rcu_dereference_protected(rdi->worker, - lockdep_is_held(&rdi->n_cqs_lock)); - if (!worker) { - spin_unlock(&rdi->n_cqs_lock); - return; - } - RCU_INIT_POINTER(rdi->worker, NULL); - spin_unlock(&rdi->n_cqs_lock); - synchronize_rcu(); - - kthread_destroy_worker(worker); + destroy_workqueue(comp_vector_wq); + comp_vector_wq = NULL; } |