summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/ehca/ehca_qp.c
diff options
context:
space:
mode:
authorStefan Roscher <ossrosch@linux.vnet.ibm.com>2008-04-30 00:46:53 +0400
committerRoland Dreier <rolandd@cisco.com>2008-04-30 00:46:53 +0400
commitd227fa7288adebe5ba37fa8e4a589c977d4e4a34 (patch)
tree8e0f080fc78f0342b5fd2531615ea525333d8d25 /drivers/infiniband/hw/ehca/ehca_qp.c
parentf56bcd8013566d4ad4759ae5fc85a6660e4655c7 (diff)
downloadlinux-d227fa7288adebe5ba37fa8e4a589c977d4e4a34.tar.xz
IB/ehca: Allocate event queue size depending on max number of CQs and QPs
If a lot of QPs fall into Error state at once and the EQ of the respective HCA is too small, it might overrun, causing the eHCA driver to stop processing completion events and calling the application's completion handlers, effectively causing traffic to stop. Fix this by limiting available QPs and CQs to a customizable max count, and determining EQ size based on these counts and a worst-case assumption. Signed-off-by: Stefan Roscher <stefan.roscher@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_qp.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c26
1 files changed, 24 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 57bef1152cc2..18fba92fa7ae 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -421,8 +421,18 @@ static struct ehca_qp *internal_create_qp(
u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
unsigned long flags;
- if (init_attr->create_flags)
+ if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) {
+ ehca_err(pd->device, "Unable to create QP, max number of %i "
+ "QPs reached.", ehca_max_qp);
+ ehca_err(pd->device, "To increase the maximum number of QPs "
+ "use the number_of_qps module parameter.\n");
+ return ERR_PTR(-ENOSPC);
+ }
+
+ if (init_attr->create_flags) {
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
+ }
memset(&parms, 0, sizeof(parms));
qp_type = init_attr->qp_type;
@@ -431,6 +441,7 @@ static struct ehca_qp *internal_create_qp(
init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
init_attr->sq_sig_type);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
@@ -455,6 +466,7 @@ static struct ehca_qp *internal_create_qp(
if (is_llqp && has_srq) {
ehca_err(pd->device, "LLQPs can't have an SRQ");
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
@@ -466,6 +478,7 @@ static struct ehca_qp *internal_create_qp(
ehca_err(pd->device, "no more than three SGEs "
"supported for SRQ pd=%p max_sge=%x",
pd, init_attr->cap.max_recv_sge);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
}
@@ -477,6 +490,7 @@ static struct ehca_qp *internal_create_qp(
qp_type != IB_QPT_SMI &&
qp_type != IB_QPT_GSI) {
ehca_err(pd->device, "wrong QP Type=%x", qp_type);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
@@ -490,6 +504,7 @@ static struct ehca_qp *internal_create_qp(
"or max_rq_wr=%x for RC LLQP",
init_attr->cap.max_send_wr,
init_attr->cap.max_recv_wr);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
break;
@@ -497,6 +512,7 @@ static struct ehca_qp *internal_create_qp(
if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
ehca_err(pd->device, "UD LLQP not supported "
"by this adapter");
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-ENOSYS);
}
if (!(init_attr->cap.max_send_sge <= 5
@@ -508,20 +524,22 @@ static struct ehca_qp *internal_create_qp(
"or max_recv_sge=%x for UD LLQP",
init_attr->cap.max_send_sge,
init_attr->cap.max_recv_sge);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
} else if (init_attr->cap.max_send_wr > 255) {
ehca_err(pd->device,
"Invalid Number of "
"max_send_wr=%x for UD QP_TYPE=%x",
init_attr->cap.max_send_wr, qp_type);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
break;
default:
ehca_err(pd->device, "unsupported LL QP Type=%x",
qp_type);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
- break;
}
} else {
int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
@@ -533,6 +551,7 @@ static struct ehca_qp *internal_create_qp(
"send_sge=%x recv_sge=%x max_sge=%x",
init_attr->cap.max_send_sge,
init_attr->cap.max_recv_sge, max_sge);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-EINVAL);
}
}
@@ -543,6 +562,7 @@ static struct ehca_qp *internal_create_qp(
my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
if (!my_qp) {
ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(-ENOMEM);
}
@@ -823,6 +843,7 @@ create_qp_exit1:
create_qp_exit0:
kmem_cache_free(qp_cache, my_qp);
+ atomic_dec(&shca->num_qps);
return ERR_PTR(ret);
}
@@ -1948,6 +1969,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
if (HAS_SQ(my_qp))
ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
kmem_cache_free(qp_cache, my_qp);
+ atomic_dec(&shca->num_qps);
return 0;
}