summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/cxgb4/qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c217
1 files changed, 113 insertions, 104 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 582936708e6e..086f62f5dc9e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -212,13 +212,23 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->db = rdev->lldi.db_reg;
wq->gts = rdev->lldi.gts_reg;
- if (user) {
- wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
- (wq->sq.qid << rdev->qpshift);
- wq->sq.udb &= PAGE_MASK;
- wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
- (wq->rq.qid << rdev->qpshift);
- wq->rq.udb &= PAGE_MASK;
+ if (user || is_t5(rdev->lldi.adapter_type)) {
+ u32 off;
+
+ off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK;
+ if (user) {
+ wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off);
+ } else {
+ off += 128 * (wq->sq.qid & rdev->qpmask) + 8;
+ wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off);
+ }
+ off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK;
+ if (user) {
+ wq->rq.udb = (u64 __iomem *)(rdev->bar2_pa + off);
+ } else {
+ off += 128 * (wq->rq.qid & rdev->qpmask) + 8;
+ wq->rq.udb = (u64 __iomem *)(rdev->bar2_kva + off);
+ }
}
wq->rdev = rdev;
wq->rq.msn = 1;
@@ -299,9 +309,10 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (ret)
goto free_dma;
- PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
+ PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%lx rqudb 0x%lx\n",
__func__, wq->sq.qid, wq->rq.qid, wq->db,
- (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);
+ (__force unsigned long) wq->sq.udb,
+ (__force unsigned long) wq->rq.udb);
return 0;
free_dma:
@@ -425,6 +436,8 @@ static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
default:
return -EINVAL;
}
+ wqe->send.r3 = 0;
+ wqe->send.r4 = 0;
plen = 0;
if (wr->num_sge) {
@@ -555,7 +568,8 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
int pbllen = roundup(wr->wr.fast_reg.page_list_len * sizeof(u64), 32);
int rem;
- if (wr->wr.fast_reg.page_list_len > T4_MAX_FR_DEPTH)
+ if (wr->wr.fast_reg.page_list_len >
+ t4_max_fr_depth(use_dsgl))
return -EINVAL;
wqe->fr.qpbinde_to_dcacpu = 0;
@@ -638,6 +652,48 @@ void c4iw_qp_rem_ref(struct ib_qp *qp)
wake_up(&(to_c4iw_qp(qp)->wait));
}
+static void add_to_fc_list(struct list_head *head, struct list_head *entry)
+{
+ if (list_empty(entry))
+ list_add_tail(entry, head);
+}
+
+static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qhp->rhp->lock, flags);
+ spin_lock(&qhp->lock);
+ if (qhp->rhp->db_state == NORMAL)
+ t4_ring_sq_db(&qhp->wq, inc,
+ is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
+ else {
+ add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
+ qhp->wq.sq.wq_pidx_inc += inc;
+ }
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ return 0;
+}
+
+static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qhp->rhp->lock, flags);
+ spin_lock(&qhp->lock);
+ if (qhp->rhp->db_state == NORMAL)
+ t4_ring_rq_db(&qhp->wq, inc,
+ is_t5(qhp->rhp->rdev.lldi.adapter_type), NULL);
+ else {
+ add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
+ qhp->wq.rq.wq_pidx_inc += inc;
+ }
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&qhp->rhp->lock, flags);
+ return 0;
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -646,7 +702,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
enum fw_wr_opcodes fw_opcode = 0;
enum fw_ri_wr_flags fw_flags;
struct c4iw_qp *qhp;
- union t4_wr *wqe;
+ union t4_wr *wqe = NULL;
u32 num_wrs;
struct t4_swsqe *swsqe;
unsigned long flag;
@@ -675,7 +731,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
fw_flags = 0;
if (wr->send_flags & IB_SEND_SOLICITED)
fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
- if (wr->send_flags & IB_SEND_SIGNALED)
+ if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
fw_flags |= FW_RI_COMPLETION_FLAG;
swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
switch (wr->opcode) {
@@ -736,7 +792,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
swsqe->idx = qhp->wq.sq.pidx;
swsqe->complete = 0;
- swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
+ swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
+ qhp->sq_sig_all;
swsqe->flushed = 0;
swsqe->wr_id = wr->wr_id;
@@ -750,9 +807,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
t4_sq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
}
- if (t4_wq_db_enabled(&qhp->wq))
- t4_ring_sq_db(&qhp->wq, idx);
- spin_unlock_irqrestore(&qhp->lock, flag);
+ if (!qhp->rhp->rdev.status_page->db_off) {
+ t4_ring_sq_db(&qhp->wq, idx,
+ is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ } else {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ring_kernel_sq_db(qhp, idx);
+ }
return err;
}
@@ -761,7 +823,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
{
int err = 0;
struct c4iw_qp *qhp;
- union t4_recv_wr *wqe;
+ union t4_recv_wr *wqe = NULL;
u32 num_wrs;
u8 len16 = 0;
unsigned long flag;
@@ -812,9 +874,14 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wr = wr->next;
num_wrs--;
}
- if (t4_wq_db_enabled(&qhp->wq))
- t4_ring_rq_db(&qhp->wq, idx);
- spin_unlock_irqrestore(&qhp->lock, flag);
+ if (!qhp->rhp->rdev.status_page->db_off) {
+ t4_ring_rq_db(&qhp->wq, idx,
+ is_t5(qhp->rhp->rdev.lldi.adapter_type), wqe);
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ } else {
+ spin_unlock_irqrestore(&qhp->lock, flag);
+ ring_kernel_rq_db(qhp, idx);
+ }
return err;
}
@@ -1200,35 +1267,6 @@ out:
return ret;
}
-/*
- * Called by the library when the qp has user dbs disabled due to
- * a DB_FULL condition. This function will single-thread all user
- * DB rings to avoid overflowing the hw db-fifo.
- */
-static int ring_kernel_db(struct c4iw_qp *qhp, u32 qid, u16 inc)
-{
- int delay = db_delay_usecs;
-
- mutex_lock(&qhp->rhp->db_mutex);
- do {
-
- /*
- * The interrupt threshold is dbfifo_int_thresh << 6. So
- * make sure we don't cross that and generate an interrupt.
- */
- if (cxgb4_dbfifo_count(qhp->rhp->rdev.lldi.ports[0], 1) <
- (qhp->rhp->rdev.lldi.dbfifo_int_thresh << 5)) {
- writel(QID(qid) | PIDX(inc), qhp->wq.db);
- break;
- }
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(delay));
- delay = min(delay << 1, 2000);
- } while (1);
- mutex_unlock(&qhp->rhp->db_mutex);
- return 0;
-}
-
int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
enum c4iw_qp_attr_mask mask,
struct c4iw_qp_attributes *attrs,
@@ -1278,11 +1316,11 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
if (mask & C4IW_QP_ATTR_SQ_DB) {
- ret = ring_kernel_db(qhp, qhp->wq.sq.qid, attrs->sq_db_inc);
+ ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
goto out;
}
if (mask & C4IW_QP_ATTR_RQ_DB) {
- ret = ring_kernel_db(qhp, qhp->wq.rq.qid, attrs->rq_db_inc);
+ ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
goto out;
}
@@ -1332,6 +1370,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
switch (attrs->next_state) {
case C4IW_QP_STATE_CLOSING:
BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
+ t4_set_wq_in_error(&qhp->wq);
set_state(qhp, C4IW_QP_STATE_CLOSING);
ep = qhp->ep;
if (!internal) {
@@ -1339,30 +1378,30 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
disconnect = 1;
c4iw_get_ep(&qhp->ep->com);
}
- t4_set_wq_in_error(&qhp->wq);
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
break;
case C4IW_QP_STATE_TERMINATE:
+ t4_set_wq_in_error(&qhp->wq);
set_state(qhp, C4IW_QP_STATE_TERMINATE);
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
- t4_set_wq_in_error(&qhp->wq);
ep = qhp->ep;
- disconnect = 1;
- if (!internal)
+ if (!internal) {
+ c4iw_get_ep(&qhp->ep->com);
terminate = 1;
- else {
+ disconnect = 1;
+ } else {
+ terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
}
- c4iw_get_ep(&qhp->ep->com);
break;
case C4IW_QP_STATE_ERROR:
- set_state(qhp, C4IW_QP_STATE_ERROR);
t4_set_wq_in_error(&qhp->wq);
+ set_state(qhp, C4IW_QP_STATE_ERROR);
if (!internal) {
abort = 1;
disconnect = 1;
@@ -1465,14 +1504,6 @@ out:
return ret;
}
-static int enable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_enable_wq_db(&qp->wq);
- return 0;
-}
-
int c4iw_destroy_qp(struct ib_qp *ib_qp)
{
struct c4iw_dev *rhp;
@@ -1490,22 +1521,15 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
wait_event(qhp->wait, !qhp->ep);
- spin_lock_irq(&rhp->lock);
- remove_handle_nolock(rhp, &rhp->qpidr, qhp->wq.sq.qid);
- rhp->qpcnt--;
- BUG_ON(rhp->qpcnt < 0);
- if (rhp->qpcnt <= db_fc_threshold && rhp->db_state == FLOW_CONTROL) {
- rhp->rdev.stats.db_state_transitions++;
- rhp->db_state = NORMAL;
- idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
- }
- if (db_coalescing_threshold >= 0)
- if (rhp->qpcnt <= db_coalescing_threshold)
- cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
- spin_unlock_irq(&rhp->lock);
+ remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
atomic_dec(&qhp->refcnt);
wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
+ spin_lock_irq(&rhp->lock);
+ if (!list_empty(&qhp->db_fc_entry))
+ list_del_init(&qhp->db_fc_entry);
+ spin_unlock_irq(&rhp->lock);
+
ucontext = ib_qp->uobject ?
to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
destroy_qp(&rhp->rdev, &qhp->wq,
@@ -1516,14 +1540,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
return 0;
}
-static int disable_qp_db(int id, void *p, void *data)
-{
- struct c4iw_qp *qp = p;
-
- t4_disable_wq_db(&qp->wq);
- return 0;
-}
-
struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct ib_udata *udata)
{
@@ -1533,7 +1549,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_cq *schp;
struct c4iw_cq *rchp;
struct c4iw_create_qp_resp uresp;
- int sqsize, rqsize;
+ unsigned int sqsize, rqsize;
struct c4iw_ucontext *ucontext;
int ret;
struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
@@ -1605,25 +1621,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.enable_bind = 1;
qhp->attr.max_ord = 1;
qhp->attr.max_ird = 1;
+ qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
atomic_set(&qhp->refcnt, 1);
- spin_lock_irq(&rhp->lock);
- if (rhp->db_state != NORMAL)
- t4_disable_wq_db(&qhp->wq);
- rhp->qpcnt++;
- if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
- rhp->rdev.stats.db_state_transitions++;
- rhp->db_state = FLOW_CONTROL;
- idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
- }
- if (db_coalescing_threshold >= 0)
- if (rhp->qpcnt > db_coalescing_threshold)
- cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
- ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
- spin_unlock_irq(&rhp->lock);
+ ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
goto err2;
@@ -1692,11 +1696,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
insert_mmap(ucontext, mm2);
mm3->key = uresp.sq_db_gts_key;
- mm3->addr = qhp->wq.sq.udb;
+ mm3->addr = (__force unsigned long) qhp->wq.sq.udb;
mm3->len = PAGE_SIZE;
insert_mmap(ucontext, mm3);
mm4->key = uresp.rq_db_gts_key;
- mm4->addr = qhp->wq.rq.udb;
+ mm4->addr = (__force unsigned long) qhp->wq.rq.udb;
mm4->len = PAGE_SIZE;
insert_mmap(ucontext, mm4);
if (mm5) {
@@ -1709,6 +1713,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
+ INIT_LIST_HEAD(&qhp->db_fc_entry);
PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
__func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
qhp->wq.sq.qid);
@@ -1772,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
/*
* Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
* ringing the queue db when we're in DB_FULL mode.
+ * Only allow this on T4 devices.
*/
attrs.sq_db_inc = attr->sq_psn;
attrs.rq_db_inc = attr->rq_psn;
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+ if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
+ (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
+ return -EINVAL;
return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
}