summaryrefslogtreecommitdiff
path: root/include/rdma/rdmavt_qp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/rdma/rdmavt_qp.h')
-rw-r--r--include/rdma/rdmavt_qp.h312
1 files changed, 251 insertions, 61 deletions
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 68e38c20afc0..0eeea520a853 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -2,7 +2,7 @@
#define DEF_RDMAVT_INCQP_H
/*
- * Copyright(c) 2016 - 2018 Intel Corporation.
+ * Copyright(c) 2016 - 2019 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -52,6 +52,7 @@
#include <rdma/ib_pack.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdmavt_cq.h>
+#include <rdma/rvt-abi.h>
/*
* Atomic bit definitions for r_aflags.
*/
@@ -156,6 +157,22 @@
#define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
#define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
+/**
+ * rvt_ud_wr - IB UD work plus AH cache
+ * @wr: valid IB work request
+ * @attr: pointer to an allocated AH attribute
+ *
+ * Special case the UD WR so we can keep track of the AH attributes.
+ *
+ * NOTE: This data structure is stricly ordered wr then attr. I.e the attr
+ * MUST come after wr. The ib_ud_wr is sized and copied in rvt_post_one_wr.
+ * The copy assumes that wr is first.
+ */
+struct rvt_ud_wr {
+ struct ib_ud_wr wr;
+ struct rdma_ah_attr *attr;
+};
+
/*
* Send work request queue entry.
* The size of the sg_list is determined when the QP is created and stored
@@ -164,7 +181,7 @@
struct rvt_swqe {
union {
struct ib_send_wr wr; /* don't use wr.sg_list */
- struct ib_ud_wr ud_wr;
+ struct rvt_ud_wr ud_wr;
struct ib_reg_wr reg_wr;
struct ib_rdma_wr rdma_wr;
struct ib_atomic_wr atomic_wr;
@@ -177,33 +194,84 @@ struct rvt_swqe {
struct rvt_sge sg_list[0];
};
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
+/**
+ * struct rvt_krwq - kernel struct receive work request
+ * @p_lock: lock to protect producer of the kernel buffer
+ * @head: index of next entry to fill
+ * @c_lock:lock to protect consumer of the kernel buffer
+ * @tail: index of next entry to pull
+ * @count: count is aproximate of total receive enteries posted
+ * @rvt_rwqe: struct of receive work request queue entry
+ *
+ * This structure is used to contain the head pointer,
+ * tail pointer and receive work queue entries for kernel
+ * mode user.
*/
-struct rvt_rwqe {
- u64 wr_id;
- u8 num_sge;
- struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct rvt_rwq {
+struct rvt_krwq {
+ spinlock_t p_lock; /* protect producer */
u32 head; /* new work requests posted to the head */
+
+ /* protect consumer */
+ spinlock_t c_lock ____cacheline_aligned_in_smp;
u32 tail; /* receives pull requests from here. */
- struct rvt_rwqe wq[0];
+ u32 count; /* approx count of receive entries posted */
+ struct rvt_rwqe *curr_wq;
+ struct rvt_rwqe wq[];
};
+/*
+ * rvt_get_swqe_ah - Return the pointer to the struct rvt_ah
+ * @swqe: valid Send WQE
+ *
+ */
+static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
+{
+ return ibah_to_rvtah(swqe->ud_wr.wr.ah);
+}
+
+/**
+ * rvt_get_swqe_ah_attr - Return the cached ah attribute information
+ * @swqe: valid Send WQE
+ *
+ */
+static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
+{
+ return swqe->ud_wr.attr;
+}
+
+/**
+ * rvt_get_swqe_remote_qpn - Access the remote QPN value
+ * @swqe: valid Send WQE
+ *
+ */
+static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
+{
+ return swqe->ud_wr.wr.remote_qpn;
+}
+
+/**
+ * rvt_get_swqe_remote_qkey - Acces the remote qkey value
+ * @swqe: valid Send WQE
+ *
+ */
+static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
+{
+ return swqe->ud_wr.wr.remote_qkey;
+}
+
+/**
+ * rvt_get_swqe_pkey_index - Access the pkey index
+ * @swqe: valid Send WQE
+ *
+ */
+static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
+{
+ return swqe->ud_wr.wr.pkey_index;
+}
+
struct rvt_rq {
struct rvt_rwq *wq;
+ struct rvt_krwq *kwq;
u32 size; /* size of RWQE array */
u8 max_sge;
/* protect changes in this struct */
@@ -472,7 +540,7 @@ static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
{
return (struct rvt_rwqe *)
- ((char *)rq->wq->wq +
+ ((char *)rq->kwq->curr_wq +
(sizeof(struct rvt_rwqe) +
rq->max_sge * sizeof(struct ib_sge)) * n);
}
@@ -565,42 +633,6 @@ static inline void rvt_qp_wqe_unreserve(
extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
-/**
- * rvt_qp_swqe_complete() - insert send completion
- * @qp - the qp
- * @wqe - the send wqe
- * @status - completion status
- *
- * Insert a send completion into the completion
- * queue if the qp indicates it should be done.
- *
- * See IBTA 10.7.3.1 for info on completion
- * control.
- */
-static inline void rvt_qp_swqe_complete(
- struct rvt_qp *qp,
- struct rvt_swqe *wqe,
- enum ib_wc_opcode opcode,
- enum ib_wc_status status)
-{
- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
- return;
- if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
- (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
- status != IB_WC_SUCCESS) {
- struct ib_wc wc;
-
- memset(&wc, 0, sizeof(wc));
- wc.wr_id = wqe->wr.wr_id;
- wc.status = status;
- wc.opcode = opcode;
- wc.qp = &qp->ibqp;
- wc.byte_len = wqe->length;
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
- status != IB_WC_SUCCESS);
- }
-}
-
/*
* Compare the lower 24 bits of the msn values.
* Returns an integer <, ==, or > than zero.
@@ -734,7 +766,119 @@ static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
{
rvt_put_swqe(wqe);
if (qp->allowed_ops == IB_OPCODE_UD)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+ rdma_destroy_ah_attr(wqe->ud_wr.attr);
+}
+
+/**
+ * rvt_qp_sqwe_incr - increment ring index
+ * @qp: the qp
+ * @val: the starting value
+ *
+ * Return: the new value wrapping as appropriate
+ */
+static inline u32
+rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
+{
+ if (++val >= qp->s_size)
+ val = 0;
+ return val;
+}
+
+int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
+
+/**
+ * rvt_recv_cq - add a new entry to completion queue
+ * by receive queue
+ * @qp: receive queue
+ * @wc: work completion entry to add
+ * @solicited: true if @entry is solicited
+ *
+ * This is wrapper function for rvt_enter_cq function call by
+ * receive queue. If rvt_cq_enter return false, it means cq is
+ * full and the qp is put into error state.
+ */
+static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
+ bool solicited)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
+
+ if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
+ rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
+}
+
+/**
+ * rvt_send_cq - add a new entry to completion queue
+ * by send queue
+ * @qp: send queue
+ * @wc: work completion entry to add
+ * @solicited: true if @entry is solicited
+ *
+ * This is wrapper function for rvt_enter_cq function call by
+ * send queue. If rvt_cq_enter return false, it means cq is
+ * full and the qp is put into error state.
+ */
+static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
+ bool solicited)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
+
+ if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
+ rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
+}
+
+/**
+ * rvt_qp_complete_swqe - insert send completion
+ * @qp - the qp
+ * @wqe - the send wqe
+ * @opcode - wc operation (driver dependent)
+ * @status - completion status
+ *
+ * Update the s_last information, and then insert a send
+ * completion into the completion
+ * queue if the qp indicates it should be done.
+ *
+ * See IBTA 10.7.3.1 for info on completion
+ * control.
+ *
+ * Return: new last
+ */
+static inline u32
+rvt_qp_complete_swqe(struct rvt_qp *qp,
+ struct rvt_swqe *wqe,
+ enum ib_wc_opcode opcode,
+ enum ib_wc_status status)
+{
+ bool need_completion;
+ u64 wr_id;
+ u32 byte_len, last;
+ int flags = wqe->wr.send_flags;
+
+ rvt_put_qp_swqe(qp, wqe);
+
+ need_completion =
+ !(flags & RVT_SEND_RESERVE_USED) &&
+ (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
+ (flags & IB_SEND_SIGNALED) ||
+ status != IB_WC_SUCCESS);
+ if (need_completion) {
+ wr_id = wqe->wr.wr_id;
+ byte_len = wqe->length;
+ /* above fields required before writing s_last */
+ }
+ last = rvt_qp_swqe_incr(qp, qp->s_last);
+ /* see rvt_qp_is_avail() */
+ smp_store_release(&qp->s_last, last);
+ if (need_completion) {
+ struct ib_wc w = {
+ .wr_id = wr_id,
+ .status = status,
+ .opcode = opcode,
+ .qp = &qp->ibqp,
+ .byte_len = byte_len,
+ };
+ rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
+ }
+ return last;
}
extern const int ib_rvt_state_ops[];
@@ -742,7 +886,6 @@ extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
void rvt_comm_est(struct rvt_qp *qp);
-int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
unsigned long rvt_rnr_tbl_to_usec(u32 index);
enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
@@ -784,6 +927,53 @@ struct rvt_qp_iter {
int n;
};
+/**
+ * ib_cq_tail - Return tail index of cq buffer
+ * @send_cq - The cq for send
+ *
+ * This is called in qp_iter_print to get tail
+ * of cq buffer.
+ */
+static inline u32 ib_cq_tail(struct ib_cq *send_cq)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
+
+ return ibcq_to_rvtcq(send_cq)->ip ?
+ RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
+ ibcq_to_rvtcq(send_cq)->kqueue->tail;
+}
+
+/**
+ * ib_cq_head - Return head index of cq buffer
+ * @send_cq - The cq for send
+ *
+ * This is called in qp_iter_print to get head
+ * of cq buffer.
+ */
+static inline u32 ib_cq_head(struct ib_cq *send_cq)
+{
+ struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
+
+ return ibcq_to_rvtcq(send_cq)->ip ?
+ RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
+ ibcq_to_rvtcq(send_cq)->kqueue->head;
+}
+
+/**
+ * rvt_free_rq - free memory allocated for rvt_rq struct
+ * @rvt_rq: request queue data structure
+ *
+ * This function should only be called if the rvt_mmap_info()
+ * has not succeeded.
+ */
+static inline void rvt_free_rq(struct rvt_rq *rq)
+{
+ kvfree(rq->kwq);
+ rq->kwq = NULL;
+ vfree(rq->wq);
+ rq->wq = NULL;
+}
+
struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
u64 v,
void (*cb)(struct rvt_qp *qp, u64 v));