summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c22
-rw-r--r--drivers/infiniband/core/cma.c27
-rw-r--r--drivers/infiniband/core/fmr_pool.c3
-rw-r--r--drivers/infiniband/core/sa_query.c18
-rw-r--r--drivers/infiniband/core/ucm.c16
-rw-r--r--drivers/infiniband/core/ucma.c32
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/hw/amso1100/c2_qp.c19
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h24
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h27
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c27
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c34
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c16
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c32
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c21
16 files changed, 136 insertions, 213 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 394fea2ba1bc..784b97cb05b0 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -382,20 +382,21 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
- int ret, id;
+ int id;
static int next_id;
- do {
- spin_lock_irqsave(&cm.lock, flags);
- ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
- next_id, &id);
- if (!ret)
- next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
- spin_unlock_irqrestore(&cm.lock, flags);
- } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
+ idr_preload(GFP_KERNEL);
+ spin_lock_irqsave(&cm.lock, flags);
+
+ id = idr_alloc(&cm.local_id_table, cm_id_priv, next_id, 0, GFP_NOWAIT);
+ if (id >= 0)
+ next_id = max(id + 1, 0);
+
+ spin_unlock_irqrestore(&cm.lock, flags);
+ idr_preload_end();
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
- return ret;
+ return id < 0 ? id : 0;
}
static void cm_free_id(__be32 local_id)
@@ -3844,7 +3845,6 @@ static int __init ib_cm_init(void)
cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
- idr_pre_get(&cm.local_id_table, GFP_KERNEL);
INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d789eea32168..71c2c7116802 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2143,33 +2143,23 @@ static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
unsigned short snum)
{
struct rdma_bind_list *bind_list;
- int port, ret;
+ int ret;
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
- do {
- ret = idr_get_new_above(ps, bind_list, snum, &port);
- } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
-
- if (ret)
- goto err1;
-
- if (port != snum) {
- ret = -EADDRNOTAVAIL;
- goto err2;
- }
+ ret = idr_alloc(ps, bind_list, snum, snum + 1, GFP_KERNEL);
+ if (ret < 0)
+ goto err;
bind_list->ps = ps;
- bind_list->port = (unsigned short) port;
+ bind_list->port = (unsigned short)ret;
cma_bind_port(bind_list, id_priv);
return 0;
-err2:
- idr_remove(ps, port);
-err1:
+err:
kfree(bind_list);
- return ret;
+ return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
}
static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
@@ -2214,10 +2204,9 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
{
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
- struct hlist_node *node;
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
- hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
+ hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
continue;
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index 176c8f90f2bb..9f5ad7cc33c8 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -118,14 +118,13 @@ static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
{
struct hlist_head *bucket;
struct ib_pool_fmr *fmr;
- struct hlist_node *pos;
if (!pool->cache_bucket)
return NULL;
bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
- hlist_for_each_entry(fmr, pos, bucket, cache_node)
+ hlist_for_each_entry(fmr, bucket, cache_node)
if (io_virtual_address == fmr->io_virtual_address &&
page_list_len == fmr->page_list_len &&
!memcmp(page_list, fmr->page_list,
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a8905abc56e4..934f45e79e5e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -611,19 +611,21 @@ static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
{
+ bool preload = gfp_mask & __GFP_WAIT;
unsigned long flags;
int ret, id;
-retry:
- if (!idr_pre_get(&query_idr, gfp_mask))
- return -ENOMEM;
+ if (preload)
+ idr_preload(gfp_mask);
spin_lock_irqsave(&idr_lock, flags);
- ret = idr_get_new(&query_idr, query, &id);
+
+ id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
+
spin_unlock_irqrestore(&idr_lock, flags);
- if (ret == -EAGAIN)
- goto retry;
- if (ret)
- return ret;
+ if (preload)
+ idr_preload_end();
+ if (id < 0)
+ return id;
query->mad_buf->timeout_ms = timeout_ms;
query->mad_buf->context[0] = query;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 49b15ac1987e..f2f63933e8a9 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -176,7 +176,6 @@ static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx)
static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
{
struct ib_ucm_context *ctx;
- int result;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
@@ -187,17 +186,10 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
ctx->file = file;
INIT_LIST_HEAD(&ctx->events);
- do {
- result = idr_pre_get(&ctx_id_table, GFP_KERNEL);
- if (!result)
- goto error;
-
- mutex_lock(&ctx_id_mutex);
- result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
- mutex_unlock(&ctx_id_mutex);
- } while (result == -EAGAIN);
-
- if (result)
+ mutex_lock(&ctx_id_mutex);
+ ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&ctx_id_mutex);
+ if (ctx->id < 0)
goto error;
list_add_tail(&ctx->file_list, &file->ctxs);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2709ff581392..5ca44cd9b00c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -145,7 +145,6 @@ static void ucma_put_ctx(struct ucma_context *ctx)
static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
{
struct ucma_context *ctx;
- int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -156,17 +155,10 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_LIST_HEAD(&ctx->mc_list);
ctx->file = file;
- do {
- ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
- if (!ret)
- goto error;
-
- mutex_lock(&mut);
- ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
- mutex_unlock(&mut);
- } while (ret == -EAGAIN);
-
- if (ret)
+ mutex_lock(&mut);
+ ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&mut);
+ if (ctx->id < 0)
goto error;
list_add_tail(&ctx->list, &file->ctx_list);
@@ -180,23 +172,15 @@ error:
static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
{
struct ucma_multicast *mc;
- int ret;
mc = kzalloc(sizeof(*mc), GFP_KERNEL);
if (!mc)
return NULL;
- do {
- ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
- if (!ret)
- goto error;
-
- mutex_lock(&mut);
- ret = idr_get_new(&multicast_idr, mc, &mc->id);
- mutex_unlock(&mut);
- } while (ret == -EAGAIN);
-
- if (ret)
+ mutex_lock(&mut);
+ mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
+ mutex_unlock(&mut);
+ if (mc->id < 0)
goto error;
mc->ctx = ctx;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e71d834c922a..a7d00f6b3bc1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -125,18 +125,17 @@ static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
{
int ret;
-retry:
- if (!idr_pre_get(idr, GFP_KERNEL))
- return -ENOMEM;
-
+ idr_preload(GFP_KERNEL);
spin_lock(&ib_uverbs_idr_lock);
- ret = idr_get_new(idr, uobj, &uobj->id);
- spin_unlock(&ib_uverbs_idr_lock);
- if (ret == -EAGAIN)
- goto retry;
+ ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ uobj->id = ret;
- return ret;
+ spin_unlock(&ib_uverbs_idr_lock);
+ idr_preload_end();
+
+ return ret < 0 ? ret : 0;
}
void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c
index 28cd5cb51859..0ab826b280b2 100644
--- a/drivers/infiniband/hw/amso1100/c2_qp.c
+++ b/drivers/infiniband/hw/amso1100/c2_qp.c
@@ -382,14 +382,17 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
{
int ret;
- do {
- spin_lock_irq(&c2dev->qp_table.lock);
- ret = idr_get_new_above(&c2dev->qp_table.idr, qp,
- c2dev->qp_table.last++, &qp->qpn);
- spin_unlock_irq(&c2dev->qp_table.lock);
- } while ((ret == -EAGAIN) &&
- idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL));
- return ret;
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&c2dev->qp_table.lock);
+
+ ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0,
+ GFP_NOWAIT);
+ if (ret >= 0)
+ qp->qpn = ret;
+
+ spin_unlock_irq(&c2dev->qp_table.lock);
+ idr_preload_end();
+ return ret < 0 ? ret : 0;
}
static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index a1c44578e039..837862287a29 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -153,19 +153,17 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,
void *handle, u32 id)
{
int ret;
- int newid;
-
- do {
- if (!idr_pre_get(idr, GFP_KERNEL)) {
- return -ENOMEM;
- }
- spin_lock_irq(&rhp->lock);
- ret = idr_get_new_above(idr, handle, id, &newid);
- BUG_ON(newid != id);
- spin_unlock_irq(&rhp->lock);
- } while (ret == -EAGAIN);
-
- return ret;
+
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&rhp->lock);
+
+ ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT);
+
+ spin_unlock_irq(&rhp->lock);
+ idr_preload_end();
+
+ BUG_ON(ret == -ENOSPC);
+ return ret < 0 ? ret : 0;
}
static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id)
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4c07fc069766..7eec5e13fa8c 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -260,20 +260,21 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
void *handle, u32 id, int lock)
{
int ret;
- int newid;
- do {
- if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
- return -ENOMEM;
- if (lock)
- spin_lock_irq(&rhp->lock);
- ret = idr_get_new_above(idr, handle, id, &newid);
- BUG_ON(!ret && newid != id);
- if (lock)
- spin_unlock_irq(&rhp->lock);
- } while (ret == -EAGAIN);
-
- return ret;
+ if (lock) {
+ idr_preload(GFP_KERNEL);
+ spin_lock_irq(&rhp->lock);
+ }
+
+ ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
+
+ if (lock) {
+ spin_unlock_irq(&rhp->lock);
+ idr_preload_end();
+ }
+
+ BUG_ON(ret == -ENOSPC);
+ return ret < 0 ? ret : 0;
}
static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 8f5290147e8a..212150c25ea0 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -128,7 +128,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
void *vpage;
u32 counter;
u64 rpage, cqx_fec, h_ret;
- int ipz_rc, ret, i;
+ int ipz_rc, i;
unsigned long flags;
if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
@@ -163,32 +163,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
adapter_handle = shca->ipz_hca_handle;
param.eq_handle = shca->eq.ipz_eq_handle;
- do {
- if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) {
- cq = ERR_PTR(-ENOMEM);
- ehca_err(device, "Can't reserve idr nr. device=%p",
- device);
- goto create_cq_exit1;
- }
-
- write_lock_irqsave(&ehca_cq_idr_lock, flags);
- ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token);
- write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
- } while (ret == -EAGAIN);
+ idr_preload(GFP_KERNEL);
+ write_lock_irqsave(&ehca_cq_idr_lock, flags);
+ my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
+ write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
+ idr_preload_end();
- if (ret) {
+ if (my_cq->token < 0) {
cq = ERR_PTR(-ENOMEM);
ehca_err(device, "Can't allocate new idr entry. device=%p",
device);
goto create_cq_exit1;
}
- if (my_cq->token > 0x1FFFFFF) {
- cq = ERR_PTR(-ENOMEM);
- ehca_err(device, "Invalid number of cq. device=%p", device);
- goto create_cq_exit2;
- }
-
/*
* CQs maximum depth is 4GB-64, but we need additional 20 as buffer
* for receiving errors CQEs.
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 149393915ae5..00d6861a6a18 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -636,30 +636,26 @@ static struct ehca_qp *internal_create_qp(
my_qp->send_cq =
container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
- do {
- if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
- ret = -ENOMEM;
- ehca_err(pd->device, "Can't reserve idr resources.");
- goto create_qp_exit0;
- }
+ idr_preload(GFP_KERNEL);
+ write_lock_irqsave(&ehca_qp_idr_lock, flags);
- write_lock_irqsave(&ehca_qp_idr_lock, flags);
- ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
- write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
- } while (ret == -EAGAIN);
+ ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
+ if (ret >= 0)
+ my_qp->token = ret;
- if (ret) {
- ret = -ENOMEM;
- ehca_err(pd->device, "Can't allocate new idr entry.");
+ write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
+ idr_preload_end();
+ if (ret < 0) {
+ if (ret == -ENOSPC) {
+ ret = -EINVAL;
+ ehca_err(pd->device, "Invalid number of qp");
+ } else {
+ ret = -ENOMEM;
+ ehca_err(pd->device, "Can't allocate new idr entry.");
+ }
goto create_qp_exit0;
}
- if (my_qp->token > 0x1FFFFFF) {
- ret = -EINVAL;
- ehca_err(pd->device, "Invalid number of qp");
- goto create_qp_exit1;
- }
-
if (has_srq)
parms.srq_token = my_qp->token;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 7b371f545ece..bd0caedafe99 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -194,11 +194,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
struct ipath_devdata *dd;
int ret;
- if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
- dd = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
dd = vzalloc(sizeof(*dd));
if (!dd) {
dd = ERR_PTR(-ENOMEM);
@@ -206,9 +201,10 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
}
dd->ipath_unit = -1;
+ idr_preload(GFP_KERNEL);
spin_lock_irqsave(&ipath_devs_lock, flags);
- ret = idr_get_new(&unit_table, dd, &dd->ipath_unit);
+ ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
if (ret < 0) {
printk(KERN_ERR IPATH_DRV_NAME
": Could not allocate unit ID: error %d\n", -ret);
@@ -216,6 +212,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
dd = ERR_PTR(ret);
goto bail_unlock;
}
+ dd->ipath_unit = ret;
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
@@ -224,7 +221,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
bail_unlock:
spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
+ idr_preload_end();
bail:
return dd;
}
@@ -2503,11 +2500,6 @@ static int __init infinipath_init(void)
* the PCI subsystem.
*/
idr_init(&unit_table);
- if (!idr_pre_get(&unit_table, GFP_KERNEL)) {
- printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n");
- ret = -ENOMEM;
- goto bail;
- }
ret = pci_register_driver(&ipath_driver);
if (ret < 0) {
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index dbc99d41605c..e0d79b2395e4 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -203,7 +203,7 @@ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)
static struct id_map_entry *
id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
{
- int ret, id;
+ int ret;
static int next_id;
struct id_map_entry *ent;
struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov;
@@ -220,25 +220,23 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)
ent->dev = to_mdev(ibdev);
INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout);
- do {
- spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
- ret = idr_get_new_above(&sriov->pv_id_table, ent,
- next_id, &id);
- if (!ret) {
- next_id = ((unsigned) id + 1) & MAX_IDR_MASK;
- ent->pv_cm_id = (u32)id;
- sl_id_map_add(ibdev, ent);
- }
+ idr_preload(GFP_KERNEL);
+ spin_lock(&to_mdev(ibdev)->sriov.id_map_lock);
- spin_unlock(&sriov->id_map_lock);
- } while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL));
- /*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/
- if (!ret) {
- spin_lock(&sriov->id_map_lock);
+ ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT);
+ if (ret >= 0) {
+ next_id = max(ret + 1, 0);
+ ent->pv_cm_id = (u32)ret;
+ sl_id_map_add(ibdev, ent);
list_add_tail(&ent->list, &sriov->cm_list);
- spin_unlock(&sriov->id_map_lock);
- return ent;
}
+
+ spin_unlock(&sriov->id_map_lock);
+ idr_preload_end();
+
+ if (ret >= 0)
+ return ent;
+
/*error flow*/
kfree(ent);
mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index c4e0131f1b57..48928c8e7774 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -51,18 +51,6 @@ static DEFINE_IDR(ocrdma_dev_id);
static union ib_gid ocrdma_zero_sgid;
-static int ocrdma_get_instance(void)
-{
- int instance = 0;
-
- /* Assign an unused number */
- if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL))
- return -1;
- if (idr_get_new(&ocrdma_dev_id, NULL, &instance))
- return -1;
- return instance;
-}
-
void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
{
u8 mac_addr[6];
@@ -416,7 +404,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
goto idr_err;
memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
- dev->id = ocrdma_get_instance();
+ dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);
if (dev->id < 0)
goto idr_err;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index ddf066d9abb6..50e33aa0b4e3 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1060,22 +1060,23 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
struct qib_devdata *dd;
int ret;
- if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
- dd = ERR_PTR(-ENOMEM);
- goto bail;
- }
-
dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);
if (!dd) {
dd = ERR_PTR(-ENOMEM);
goto bail;
}
+ idr_preload(GFP_KERNEL);
spin_lock_irqsave(&qib_devs_lock, flags);
- ret = idr_get_new(&qib_unit_table, dd, &dd->unit);
- if (ret >= 0)
+
+ ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT);
+ if (ret >= 0) {
+ dd->unit = ret;
list_add(&dd->list, &qib_dev_list);
+ }
+
spin_unlock_irqrestore(&qib_devs_lock, flags);
+ idr_preload_end();
if (ret < 0) {
qib_early_err(&pdev->dev,
@@ -1180,11 +1181,6 @@ static int __init qlogic_ib_init(void)
* the PCI subsystem.
*/
idr_init(&qib_unit_table);
- if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
- pr_err("idr_pre_get() failed\n");
- ret = -ENOMEM;
- goto bail_cq_wq;
- }
ret = pci_register_driver(&qib_driver);
if (ret < 0) {
@@ -1199,7 +1195,6 @@ static int __init qlogic_ib_init(void)
bail_unit:
idr_destroy(&qib_unit_table);
-bail_cq_wq:
destroy_workqueue(qib_cq_wq);
bail_dev:
qib_dev_cleanup();