diff options
author | Steve Wise <swise@opengridcomputing.com> | 2013-08-06 19:34:35 +0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2013-08-13 22:55:45 +0400 |
commit | 1cf24dcef4e1dd0c34d8c39b09a9ce9a01accc72 (patch) | |
tree | 31fc9b6019990693eac87713c15a97b5571932ee /drivers/infiniband/hw/cxgb4/t4.h | |
parent | 97d7ec0c410e89ece852e768b8bfd42d4d5822fd (diff) | |
download | linux-1cf24dcef4e1dd0c34d8c39b09a9ce9a01accc72.tar.xz |
RDMA/cxgb4: Fix QP flush logic
This patch makes following fixes in QP flush logic:
- correctly flushes unsignaled WRs followed by a signaled WR
- supports for flushing a CQ bound to multiple QPs
- resets cidx_flush if a active queue starts getting HW CQEs again
- marks WQ in error when we leave RTS. This was only being done for
user queues, but we need it for kernel queues too so that
post_send/post_recv will start returning the appropriate error
synchronously
- eats unsignaled read resp CQEs. HW always inserts CQEs so we must
silently discard them if the read work request was unsignaled.
- handles QP flushes with pending SW CQEs. The flush and out of order
completion logic has a bug where if out of order completions are
flushed but not yet polled by the consumer and the qp is then
flushed then we end up inserting duplicate completions.
- c4iw_flush_sq() should only flush wrs that have not already been
flushed. Since we already track where in the SQ we've flushed via
sq.cidx_flush, just start at that point and flush any remaining.
This bug only caused a problem in the presence of unsignaled work
requests.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Vipul Pandya <vipul@chelsio.com>
[ Fixed sparse warning due to htonl/ntohl confusion. - Roland ]
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/t4.h')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index ebcb03bd1b72..3a6a289b9d3e 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -36,9 +36,9 @@ #include "t4_msg.h" #include "t4fw_ri_api.h" -#define T4_MAX_NUM_QP (1<<16) -#define T4_MAX_NUM_CQ (1<<15) -#define T4_MAX_NUM_PD (1<<15) +#define T4_MAX_NUM_QP 65536 +#define T4_MAX_NUM_CQ 65536 +#define T4_MAX_NUM_PD 65536 #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) #define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) #define T4_MAX_IQ_SIZE (65520 - 1) @@ -269,6 +269,7 @@ struct t4_swsqe { int complete; int signaled; u16 idx; + int flushed; }; static inline pgprot_t t4_pgprot_wc(pgprot_t prot) @@ -300,6 +301,7 @@ struct t4_sq { u16 pidx; u16 wq_pidx; u16 flags; + short flush_cidx; }; struct t4_swrqe { @@ -330,6 +332,7 @@ struct t4_wq { void __iomem *db; void __iomem *gts; struct c4iw_rdev *rdev; + int flushed; }; static inline int t4_rqes_posted(struct t4_wq *wq) @@ -412,6 +415,9 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) static inline void t4_sq_consume(struct t4_wq *wq) { + BUG_ON(wq->sq.in_use < 1); + if (wq->sq.cidx == wq->sq.flush_cidx) + wq->sq.flush_cidx = -1; wq->sq.in_use--; if (++wq->sq.cidx == wq->sq.size) wq->sq.cidx = 0; @@ -505,12 +511,18 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se) static inline void t4_swcq_produce(struct t4_cq *cq) { cq->sw_in_use++; + if (cq->sw_in_use == cq->size) { + PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); + cq->error = 1; + BUG_ON(1); + } if (++cq->sw_pidx == cq->size) cq->sw_pidx = 0; } static inline void t4_swcq_consume(struct t4_cq *cq) { + BUG_ON(cq->sw_in_use < 1); cq->sw_in_use--; if (++cq->sw_cidx == cq->size) cq->sw_cidx = 0; @@ -552,6 +564,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ret = -EOVERFLOW; cq->error = 1; printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid); + BUG_ON(1); } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { *cqe = &cq->queue[cq->cidx]; ret = 0; @@ -562,6 +575,12 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) { + if (cq->sw_in_use == cq->size) { + PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid); + cq->error = 1; + BUG_ON(1); + return NULL; + } if (cq->sw_in_use) return &cq->sw_queue[cq->sw_cidx]; return NULL; |