summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c112
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c192
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c55
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h40
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c113
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h19
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h1
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c48
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c295
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c48
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c88
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c26
18 files changed, 677 insertions, 412 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 8af33cf1fc4e..2d5cbf4363e4 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -734,7 +734,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
/* change ethxxx to iwxxx */
strcpy(name, "iw");
strcat(name, &c2dev->netdev->name[3]);
- netdev = alloc_netdev(0, name, setup);
+ netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
if (!netdev) {
printk(KERN_ERR PFX "%s - etherdev alloc failed",
__func__);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 768a0fb67dd6..c2fb71c182a8 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -79,9 +79,10 @@ static int dack_mode = 1;
module_param(dack_mode, int, 0644);
MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
-int c4iw_max_read_depth = 8;
+uint c4iw_max_read_depth = 32;
module_param(c4iw_max_read_depth, int, 0644);
-MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
+MODULE_PARM_DESC(c4iw_max_read_depth,
+ "Per-connection max ORD/IRD (default=32)");
static int enable_tcp_timestamps;
module_param(enable_tcp_timestamps, int, 0644);
@@ -474,7 +475,8 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
16)) | FW_WR_FLOWID(ep->hwtid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
- flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
+ (ep->com.dev->rdev.lldi.pf));
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
@@ -821,6 +823,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
if (mpa_rev_to_use == 2) {
mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
sizeof (struct mpa_v2_conn_params));
+ PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
+ ep->ord);
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
@@ -1190,8 +1194,8 @@ static int connect_request_upcall(struct c4iw_ep *ep)
sizeof(struct mpa_v2_conn_params);
} else {
/* this means MPA_v1 is used. Send max supported */
- event.ord = c4iw_max_read_depth;
- event.ird = c4iw_max_read_depth;
+ event.ord = cur_max_read_depth(ep->com.dev);
+ event.ird = cur_max_read_depth(ep->com.dev);
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
}
@@ -1255,6 +1259,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
return credits;
}
+#define RELAXED_IRD_NEGOTIATION 1
+
static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
@@ -1366,17 +1372,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
resp_ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
+ PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
+ __func__, resp_ird, resp_ord, ep->ird, ep->ord);
/*
* This is a double-check. Ideally, below checks are
* not required since ird/ord stuff has been taken
* care of in c4iw_accept_cr
*/
- if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
+ if (ep->ird < resp_ord) {
+ if (RELAXED_IRD_NEGOTIATION && resp_ord <=
+ ep->com.dev->rdev.lldi.max_ordird_qp)
+ ep->ird = resp_ord;
+ else
+ insuff_ird = 1;
+ } else if (ep->ird > resp_ord) {
+ ep->ird = resp_ord;
+ }
+ if (ep->ord > resp_ird) {
+ if (RELAXED_IRD_NEGOTIATION)
+ ep->ord = resp_ird;
+ else
+ insuff_ird = 1;
+ }
+ if (insuff_ird) {
err = -ENOMEM;
ep->ird = resp_ord;
ep->ord = resp_ird;
- insuff_ird = 1;
}
if (ntohs(mpa_v2_params->ird) &
@@ -1579,6 +1601,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
MPA_V2_IRD_ORD_MASK;
ep->ord = ntohs(mpa_v2_params->ord) &
MPA_V2_IRD_ORD_MASK;
+ PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
+ ep->ord);
if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
if (peer2peer) {
if (ntohs(mpa_v2_params->ord) &
@@ -1798,6 +1822,20 @@ static int is_neg_adv(unsigned int status)
status == CPL_ERR_KEEPALV_NEG_ADVICE;
}
+static char *neg_adv_str(unsigned int status)
+{
+ switch (status) {
+ case CPL_ERR_RTX_NEG_ADVICE:
+ return "Retransmit timeout";
+ case CPL_ERR_PERSIST_NEG_ADVICE:
+ return "Persist timeout";
+ case CPL_ERR_KEEPALV_NEG_ADVICE:
+ return "Keepalive timeout";
+ default:
+ return "Unknown";
+ }
+}
+
static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
{
ep->snd_win = snd_win;
@@ -1996,8 +2034,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
status, status2errno(status));
if (is_neg_adv(status)) {
- printk(KERN_WARNING MOD "Connection problems for atid %u\n",
- atid);
+ dev_warn(&dev->rdev.lldi.pdev->dev,
+ "Connection problems for atid %u status %u (%s)\n",
+ atid, status, neg_adv_str(status));
return 0;
}
@@ -2472,8 +2511,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
if (is_neg_adv(req->status)) {
- PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
- ep->hwtid);
+ dev_warn(&dev->rdev.lldi.pdev->dev,
+ "Negative advice on abort - tid %u status %d (%s)\n",
+ ep->hwtid, req->status, neg_adv_str(req->status));
return 0;
}
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -2731,8 +2771,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
BUG_ON(!qp);
set_bit(ULP_ACCEPT, &ep->com.history);
- if ((conn_param->ord > c4iw_max_read_depth) ||
- (conn_param->ird > c4iw_max_read_depth)) {
+ if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
+ (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
abort_connection(ep, NULL, GFP_KERNEL);
err = -EINVAL;
goto err;
@@ -2740,31 +2780,41 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
if (conn_param->ord > ep->ird) {
- ep->ird = conn_param->ird;
- ep->ord = conn_param->ord;
- send_mpa_reject(ep, conn_param->private_data,
- conn_param->private_data_len);
- abort_connection(ep, NULL, GFP_KERNEL);
- err = -ENOMEM;
- goto err;
+ if (RELAXED_IRD_NEGOTIATION) {
+ ep->ord = ep->ird;
+ } else {
+ ep->ird = conn_param->ird;
+ ep->ord = conn_param->ord;
+ send_mpa_reject(ep, conn_param->private_data,
+ conn_param->private_data_len);
+ abort_connection(ep, NULL, GFP_KERNEL);
+ err = -ENOMEM;
+ goto err;
+ }
}
- if (conn_param->ird > ep->ord) {
- if (!ep->ord)
- conn_param->ird = 1;
- else {
+ if (conn_param->ird < ep->ord) {
+ if (RELAXED_IRD_NEGOTIATION &&
+ ep->ord <= h->rdev.lldi.max_ordird_qp) {
+ conn_param->ird = ep->ord;
+ } else {
abort_connection(ep, NULL, GFP_KERNEL);
err = -ENOMEM;
goto err;
}
}
-
}
ep->ird = conn_param->ird;
ep->ord = conn_param->ord;
- if (ep->mpa_attr.version != 2)
+ if (ep->mpa_attr.version == 1) {
if (peer2peer && ep->ird == 0)
ep->ird = 1;
+ } else {
+ if (peer2peer &&
+ (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
+ (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
+ ep->ird = 1;
+ }
PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
@@ -2803,6 +2853,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
return 0;
err1:
ep->com.cm_id = NULL;
+ abort_connection(ep, NULL, GFP_KERNEL);
cm_id->rem_ref(cm_id);
err:
mutex_unlock(&ep->com.mutex);
@@ -2886,8 +2937,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
int iptype;
int iwpm_err = 0;
- if ((conn_param->ord > c4iw_max_read_depth) ||
- (conn_param->ird > c4iw_max_read_depth)) {
+ if ((conn_param->ord > cur_max_read_depth(dev)) ||
+ (conn_param->ird > cur_max_read_depth(dev))) {
err = -EINVAL;
goto out;
}
@@ -3867,8 +3918,9 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (is_neg_adv(req->status)) {
- PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
- ep->hwtid);
+ dev_warn(&dev->rdev.lldi.pdev->dev,
+ "Negative advice on abort - tid %u status %d (%s)\n",
+ ep->hwtid, req->status, neg_adv_str(req->status));
kfree_skb(skb);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c04292c950f1..0f773e78e080 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -633,11 +633,15 @@ proc_cqe:
wq->sq.cidx = (uint16_t)idx;
PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
+ if (c4iw_wr_log)
+ c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq);
} else {
PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
BUG_ON(t4_rq_empty(wq));
+ if (c4iw_wr_log)
+ c4iw_log_wr_stats(wq, hw_cqe);
t4_rq_consume(wq);
goto skip_cqe;
}
@@ -895,7 +899,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
/*
* Make actual HW queue 2x to avoid cdix_inc overflows.
*/
- hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
+ hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
/*
* Make HW queue at least 64 entries so GTS updates aren't too
@@ -909,14 +913,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
/*
* memsize must be a multiple of the page size if its a user cq.
*/
- if (ucontext) {
+ if (ucontext)
memsize = roundup(memsize, PAGE_SIZE);
- hwentries = memsize / sizeof *chp->cq.queue;
- while (hwentries > T4_MAX_IQ_SIZE) {
- memsize -= PAGE_SIZE;
- hwentries = memsize / sizeof *chp->cq.queue;
- }
- }
chp->cq.size = hwentries;
chp->cq.memsize = memsize;
chp->cq.vector = vector;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 7db82b24302b..f25df5276c22 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -33,6 +33,7 @@
#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
+#include <linux/math64.h>
#include <rdma/ib_verbs.h>
@@ -55,6 +56,15 @@ module_param(allow_db_coalescing_on_t5, int, 0644);
MODULE_PARM_DESC(allow_db_coalescing_on_t5,
"Allow DB Coalescing on T5 (default = 0)");
+int c4iw_wr_log = 0;
+module_param(c4iw_wr_log, int, 0444);
+MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
+
+int c4iw_wr_log_size_order = 12;
+module_param(c4iw_wr_log_size_order, int, 0444);
+MODULE_PARM_DESC(c4iw_wr_log_size_order,
+ "Number of entries (log2) in the work request timing log.");
+
struct uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
@@ -103,6 +113,117 @@ static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
}
+void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
+{
+ struct wr_log_entry le;
+ int idx;
+
+ if (!wq->rdev->wr_log)
+ return;
+
+ idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
+ (wq->rdev->wr_log_size - 1);
+ le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
+ getnstimeofday(&le.poll_host_ts);
+ le.valid = 1;
+ le.cqe_sge_ts = CQE_TS(cqe);
+ if (SQ_TYPE(cqe)) {
+ le.qid = wq->sq.qid;
+ le.opcode = CQE_OPCODE(cqe);
+ le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
+ le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
+ le.wr_id = CQE_WRID_SQ_IDX(cqe);
+ } else {
+ le.qid = wq->rq.qid;
+ le.opcode = FW_RI_RECEIVE;
+ le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
+ le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
+ le.wr_id = CQE_WRID_MSN(cqe);
+ }
+ wq->rdev->wr_log[idx] = le;
+}
+
+static int wr_log_show(struct seq_file *seq, void *v)
+{
+ struct c4iw_dev *dev = seq->private;
+ struct timespec prev_ts = {0, 0};
+ struct wr_log_entry *lep;
+ int prev_ts_set = 0;
+ int idx, end;
+
+#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
+
+ idx = atomic_read(&dev->rdev.wr_log_idx) &
+ (dev->rdev.wr_log_size - 1);
+ end = idx - 1;
+ if (end < 0)
+ end = dev->rdev.wr_log_size - 1;
+ lep = &dev->rdev.wr_log[idx];
+ while (idx != end) {
+ if (lep->valid) {
+ if (!prev_ts_set) {
+ prev_ts_set = 1;
+ prev_ts = lep->poll_host_ts;
+ }
+ seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
+ "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
+ "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
+ "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
+ "cqe_poll_delta_ns %llu\n",
+ idx,
+ timespec_sub(lep->poll_host_ts,
+ prev_ts).tv_sec,
+ timespec_sub(lep->poll_host_ts,
+ prev_ts).tv_nsec,
+ lep->qid, lep->opcode,
+ lep->opcode == FW_RI_RECEIVE ?
+ "msn" : "wrid",
+ lep->wr_id,
+ timespec_sub(lep->poll_host_ts,
+ lep->post_host_ts).tv_sec,
+ timespec_sub(lep->poll_host_ts,
+ lep->post_host_ts).tv_nsec,
+ lep->post_sge_ts, lep->cqe_sge_ts,
+ lep->poll_sge_ts,
+ ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
+ ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
+ prev_ts = lep->poll_host_ts;
+ }
+ idx++;
+ if (idx > (dev->rdev.wr_log_size - 1))
+ idx = 0;
+ lep = &dev->rdev.wr_log[idx];
+ }
+#undef ts2ns
+ return 0;
+}
+
+static int wr_log_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wr_log_show, inode->i_private);
+}
+
+static ssize_t wr_log_clear(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
+ int i;
+
+ if (dev->rdev.wr_log)
+ for (i = 0; i < dev->rdev.wr_log_size; i++)
+ dev->rdev.wr_log[i].valid = 0;
+ return count;
+}
+
+static const struct file_operations wr_log_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = wr_log_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = wr_log_clear,
+};
+
static int dump_qp(int id, void *p, void *data)
{
struct c4iw_qp *qp = p;
@@ -241,12 +362,32 @@ static int dump_stag(int id, void *p, void *data)
struct c4iw_debugfs_data *stagd = data;
int space;
int cc;
+ struct fw_ri_tpte tpte;
+ int ret;
space = stagd->bufsize - stagd->pos - 1;
if (space == 0)
return 1;
- cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
+ ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
+ (__be32 *)&tpte);
+ if (ret) {
+ dev_err(&stagd->devp->rdev.lldi.pdev->dev,
+ "%s cxgb4_read_tpte err %d\n", __func__, ret);
+ return ret;
+ }
+ cc = snprintf(stagd->buf + stagd->pos, space,
+ "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
+ "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
+ (u32)id<<8,
+ G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
+ G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+ ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
+ ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
if (cc < space)
stagd->pos += cc;
return 0;
@@ -259,7 +400,7 @@ static int stag_release(struct inode *inode, struct file *file)
printk(KERN_INFO "%s null stagd?\n", __func__);
return 0;
}
- kfree(stagd->buf);
+ vfree(stagd->buf);
kfree(stagd);
return 0;
}
@@ -282,8 +423,8 @@ static int stag_open(struct inode *inode, struct file *file)
idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
spin_unlock_irq(&stagd->devp->lock);
- stagd->bufsize = count * sizeof("0x12345678\n");
- stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
+ stagd->bufsize = count * 256;
+ stagd->buf = vmalloc(stagd->bufsize);
if (!stagd->buf) {
ret = -ENOMEM;
goto err1;
@@ -348,6 +489,7 @@ static int stats_show(struct seq_file *seq, void *v)
dev->rdev.stats.act_ofld_conn_fails);
seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
dev->rdev.stats.pas_ofld_conn_fails);
+ seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
return 0;
}
@@ -583,6 +725,12 @@ static int setup_debugfs(struct c4iw_dev *devp)
if (de && de->d_inode)
de->d_inode->i_size = 4096;
+ if (c4iw_wr_log) {
+ de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root,
+ (void *)devp, &wr_log_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = 4096;
+ }
return 0;
}
@@ -696,7 +844,20 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
pr_err(MOD "error allocating status page\n");
goto err4;
}
+
+ if (c4iw_wr_log) {
+ rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
+ sizeof(*rdev->wr_log), GFP_KERNEL);
+ if (rdev->wr_log) {
+ rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
+ atomic_set(&rdev->wr_log_idx, 0);
+ } else {
+ pr_err(MOD "error allocating wr_log. Logging disabled\n");
+ }
+ }
+
rdev->status_page->db_off = 0;
+
return 0;
err4:
c4iw_rqtpool_destroy(rdev);
@@ -710,6 +871,7 @@ err1:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
+ kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
@@ -768,6 +930,27 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
}
devp->rdev.lldi = *infop;
+ /* init various hw-queue params based on lld info */
+ PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
+ __func__, devp->rdev.lldi.sge_ingpadboundary,
+ devp->rdev.lldi.sge_egrstatuspagesize);
+
+ devp->rdev.hw_queue.t4_eq_status_entries =
+ devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+ devp->rdev.hw_queue.t4_max_eq_size = 65520;
+ devp->rdev.hw_queue.t4_max_iq_size = 65520;
+ devp->rdev.hw_queue.t4_max_rq_size = 8192 -
+ devp->rdev.hw_queue.t4_eq_status_entries - 1;
+ devp->rdev.hw_queue.t4_max_sq_size =
+ devp->rdev.hw_queue.t4_max_eq_size -
+ devp->rdev.hw_queue.t4_eq_status_entries - 1;
+ devp->rdev.hw_queue.t4_max_qp_depth =
+ devp->rdev.hw_queue.t4_max_rq_size;
+ devp->rdev.hw_queue.t4_max_cq_depth =
+ devp->rdev.hw_queue.t4_max_iq_size - 2;
+ devp->rdev.hw_queue.t4_stat_len =
+ devp->rdev.lldi.sge_egrstatuspagesize;
+
/*
* For T5 devices, we map all of BAR2 with WC.
* For T4 devices with onchip qp mem, we map only that part
@@ -818,6 +1001,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
mutex_init(&devp->rdev.stats.lock);
mutex_init(&devp->db_mutex);
INIT_LIST_HEAD(&devp->db_fc_list);
+ devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
if (c4iw_debugfs_root) {
devp->debugfs_root = debugfs_create_dir(
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index d61d0a18f784..fbe6051af254 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -35,6 +35,55 @@
#include "iw_cxgb4.h"
+static void print_tpte(struct c4iw_dev *dev, u32 stag)
+{
+ int ret;
+ struct fw_ri_tpte tpte;
+
+ ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
+ (__be32 *)&tpte);
+ if (ret) {
+ dev_err(&dev->rdev.lldi.pdev->dev,
+ "%s cxgb4_read_tpte err %d\n", __func__, ret);
+ return;
+ }
+ PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
+ "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
+ stag & 0xffffff00,
+ G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
+ G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
+ G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+ ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
+ ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
+}
+
+static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
+{
+ __be64 *p = (void *)err_cqe;
+
+ dev_err(&dev->rdev.lldi.pdev->dev,
+ "AE qpid %d opcode %d status 0x%x "
+ "type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
+ CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+ CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
+ CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+
+ PDBG("%016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
+ be64_to_cpu(p[3]));
+
+ /*
+ * Ingress WRITE and READ_RESP errors provide
+ * the offending stag, so parse and log it.
+ */
+ if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
+ CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
+ print_tpte(dev, CQE_WRID_STAG(err_cqe));
+}
+
static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
struct c4iw_qp *qhp,
struct t4_cqe *err_cqe,
@@ -44,11 +93,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
struct c4iw_qp_attributes attrs;
unsigned long flag;
- printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
- "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
- CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
- CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+ dump_err_cqe(dev, err_cqe);
if (qhp->attr.state == C4IW_QP_STATE_RTS) {
attrs.next_state = C4IW_QP_STATE_TERMINATE;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 361fff7a0742..b5678ac97393 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -139,6 +139,29 @@ struct c4iw_stats {
u64 pas_ofld_conn_fails;
};
+struct c4iw_hw_queue {
+ int t4_eq_status_entries;
+ int t4_max_eq_size;
+ int t4_max_iq_size;
+ int t4_max_rq_size;
+ int t4_max_sq_size;
+ int t4_max_qp_depth;
+ int t4_max_cq_depth;
+ int t4_stat_len;
+};
+
+struct wr_log_entry {
+ struct timespec post_host_ts;
+ struct timespec poll_host_ts;
+ u64 post_sge_ts;
+ u64 cqe_sge_ts;
+ u64 poll_sge_ts;
+ u16 qid;
+ u16 wr_id;
+ u8 opcode;
+ u8 valid;
+};
+
struct c4iw_rdev {
struct c4iw_resource resource;
unsigned long qpshift;
@@ -156,7 +179,11 @@ struct c4iw_rdev {
unsigned long oc_mw_pa;
void __iomem *oc_mw_kva;
struct c4iw_stats stats;
+ struct c4iw_hw_queue hw_queue;
struct t4_dev_status_page *status_page;
+ atomic_t wr_log_idx;
+ struct wr_log_entry *wr_log;
+ int wr_log_size;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -166,7 +193,7 @@ static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
{
- return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
+ return (int)(rdev->lldi.vr->stag.size >> 5);
}
#define C4IW_WR_TO (30*HZ)
@@ -237,6 +264,7 @@ struct c4iw_dev {
struct idr atid_idr;
struct idr stid_idr;
struct list_head db_fc_list;
+ u32 avail_ird;
};
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -318,6 +346,13 @@ static inline void remove_handle_nolock(struct c4iw_dev *rhp,
_remove_handle(rhp, idr, id, 0);
}
+extern uint c4iw_max_read_depth;
+
+static inline int cur_max_read_depth(struct c4iw_dev *dev)
+{
+ return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
+}
+
struct c4iw_pd {
struct ib_pd ibpd;
u32 pdid;
@@ -991,7 +1026,8 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
extern struct cxgb4_client t4c_client;
extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
-extern int c4iw_max_read_depth;
+extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
+extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index b1d305338de6..72e3b69d1b76 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -318,14 +318,16 @@ static int c4iw_query_device(struct ib_device *ibdev,
props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
props->max_mr_size = T4_MAX_MR_SIZE;
- props->max_qp = T4_MAX_NUM_QP;
- props->max_qp_wr = T4_MAX_QP_DEPTH;
+ props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
+ props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
props->max_sge = T4_MAX_RECV_SGE;
props->max_sge_rd = 1;
- props->max_qp_rd_atom = c4iw_max_read_depth;
- props->max_qp_init_rd_atom = c4iw_max_read_depth;
- props->max_cq = T4_MAX_NUM_CQ;
- props->max_cqe = T4_MAX_CQ_DEPTH;
+ props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
+ props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
+ c4iw_max_read_depth);
+ props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+ props->max_cq = dev->rdev.lldi.vr->qp.size;
+ props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
props->max_mr = c4iw_num_stags(&dev->rdev);
props->max_pd = T4_MAX_NUM_PD;
props->local_ca_ack_delay = 0;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 086f62f5dc9e..c158fcc02bca 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -58,6 +58,31 @@ static int max_fr_immd = T4_MAX_FR_IMMD;
module_param(max_fr_immd, int, 0644);
MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
+static int alloc_ird(struct c4iw_dev *dev, u32 ird)
+{
+ int ret = 0;
+
+ spin_lock_irq(&dev->lock);
+ if (ird <= dev->avail_ird)
+ dev->avail_ird -= ird;
+ else
+ ret = -ENOMEM;
+ spin_unlock_irq(&dev->lock);
+
+ if (ret)
+ dev_warn(&dev->rdev.lldi.pdev->dev,
+ "device IRD resources exhausted\n");
+
+ return ret;
+}
+
+static void free_ird(struct c4iw_dev *dev, int ird)
+{
+ spin_lock_irq(&dev->lock);
+ dev->avail_ird += ird;
+ spin_unlock_irq(&dev->lock);
+}
+
static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
{
unsigned long flag;
@@ -180,9 +205,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
}
/*
- * RQT must be a power of 2.
+ * RQT must be a power of 2 and at least 16 deep.
*/
- wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
+ wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
if (!wq->rq.rqt_hwaddr) {
ret = -ENOMEM;
@@ -258,7 +283,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/*
* eqsize is the number of 64B entries plus the status page size.
*/
- eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+ eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
+ rdev->hw_queue.t4_eq_status_entries;
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
@@ -283,7 +309,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/*
* eqsize is the number of 64B entries plus the status page size.
*/
- eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+ eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
+ rdev->hw_queue.t4_eq_status_entries;
res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
@@ -796,6 +823,11 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qhp->sq_sig_all;
swsqe->flushed = 0;
swsqe->wr_id = wr->wr_id;
+ if (c4iw_wr_log) {
+ swsqe->sge_ts = cxgb4_read_sge_timestamp(
+ qhp->rhp->rdev.lldi.ports[0]);
+ getnstimeofday(&swsqe->host_ts);
+ }
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
@@ -859,6 +891,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
+ if (c4iw_wr_log) {
+ qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
+ cxgb4_read_sge_timestamp(
+ qhp->rhp->rdev.lldi.ports[0]);
+ getnstimeofday(
+ &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
+ }
wqe->recv.opcode = FW_RI_RECV_WR;
wqe->recv.r1 = 0;
@@ -1202,12 +1241,20 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
int ret;
struct sk_buff *skb;
- PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
- qhp->ep->hwtid);
+ PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
+ qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = alloc_ird(rhp, qhp->attr.max_ird);
+ if (ret) {
+ qhp->attr.max_ird = 0;
+ kfree_skb(skb);
+ goto out;
+ }
set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1258,10 +1305,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
ret = c4iw_ofld_send(&rhp->rdev, skb);
if (ret)
- goto out;
+ goto err1;
ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+ if (!ret)
+ goto out;
+err1:
+ free_ird(rhp, qhp->attr.max_ird);
out:
PDBG("%s ret %d\n", __func__, ret);
return ret;
@@ -1306,7 +1357,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
newattr.max_ord = attrs->max_ord;
}
if (mask & C4IW_QP_ATTR_MAX_IRD) {
- if (attrs->max_ird > c4iw_max_read_depth) {
+ if (attrs->max_ird > cur_max_read_depth(rhp)) {
ret = -EINVAL;
goto out;
}
@@ -1529,6 +1580,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
if (!list_empty(&qhp->db_fc_entry))
list_del_init(&qhp->db_fc_entry);
spin_unlock_irq(&rhp->lock);
+ free_ird(rhp, qhp->attr.max_ird);
ucontext = ib_qp->uobject ?
to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
@@ -1569,13 +1621,17 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
return ERR_PTR(-EINVAL);
- rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
- if (rqsize > T4_MAX_RQ_SIZE)
+ if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
return ERR_PTR(-E2BIG);
+ rqsize = attrs->cap.max_recv_wr + 1;
+ if (rqsize < 8)
+ rqsize = 8;
- sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
- if (sqsize > T4_MAX_SQ_SIZE)
+ if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
return ERR_PTR(-E2BIG);
+ sqsize = attrs->cap.max_send_wr + 1;
+ if (sqsize < 8)
+ sqsize = 8;
ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
@@ -1583,19 +1639,20 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (!qhp)
return ERR_PTR(-ENOMEM);
qhp->wq.sq.size = sqsize;
- qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
+ qhp->wq.sq.memsize =
+ (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+ sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
qhp->wq.sq.flush_cidx = -1;
qhp->wq.rq.size = rqsize;
- qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
+ qhp->wq.rq.memsize =
+ (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+ sizeof(*qhp->wq.rq.queue);
if (ucontext) {
qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
}
- PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
- __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
-
ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
if (ret)
@@ -1619,8 +1676,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.enable_rdma_read = 1;
qhp->attr.enable_rdma_write = 1;
qhp->attr.enable_bind = 1;
- qhp->attr.max_ord = 1;
- qhp->attr.max_ird = 1;
+ qhp->attr.max_ord = 0;
+ qhp->attr.max_ird = 0;
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
mutex_init(&qhp->mutex);
@@ -1714,9 +1771,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
INIT_LIST_HEAD(&qhp->db_fc_entry);
- PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
- __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
- qhp->wq.sq.qid);
+ PDBG("%s sq id %u size %u memsize %zu num_entries %u "
+ "rq id %u size %u memsize %zu num_entries %u\n", __func__,
+ qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
+ attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
+ qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
return &qhp->ibqp;
err8:
kfree(mm5);
@@ -1804,5 +1863,11 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
memset(attr, 0, sizeof *attr);
memset(init_attr, 0, sizeof *init_attr);
attr->qp_state = to_ib_qp_state(qhp->attr.state);
+ init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
+ init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
+ init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+ init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
+ init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 68b0a6bf4eb0..df5edfa31a8f 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -36,22 +36,11 @@
#include "t4_msg.h"
#include "t4fw_ri_api.h"
-#define T4_MAX_NUM_QP 65536
-#define T4_MAX_NUM_CQ 65536
#define T4_MAX_NUM_PD 65536
-#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
-#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
-#define T4_MAX_IQ_SIZE (65520 - 1)
-#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
-#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
-#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
-#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
-#define T4_MAX_NUM_STAG (1<<15)
#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
-#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
#define A_PCIE_MA_SYNC 0x30b4
struct t4_status_page {
@@ -244,8 +233,8 @@ struct t4_cqe {
#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
/* generic accessor macros */
-#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
-#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
+#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
+#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
/* macros for flit 3 of the cqe */
#define S_CQE_GENBIT 63
@@ -277,6 +266,8 @@ struct t4_swsqe {
int signaled;
u16 idx;
int flushed;
+ struct timespec host_ts;
+ u64 sge_ts;
};
static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
@@ -314,6 +305,8 @@ struct t4_sq {
struct t4_swrqe {
u64 wr_id;
+ struct timespec host_ts;
+ u64 sge_ts;
};
struct t4_rq {
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 91289a051af9..5709e77faf7c 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -849,6 +849,5 @@ enum { /* TCP congestion control algorithms */
#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
#define CONG_CNTRL_VALID (1 << 18)
-#define T5_OPT_2_VALID (1 << 31)
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 8ae4f896cb41..e4056279166d 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_core_srq *msrq = NULL;
if (qp->ibqp.xrcd) {
- msrq = mlx5_core_get_srq(&dev->mdev,
+ msrq = mlx5_core_get_srq(dev->mdev,
be32_to_cpu(cqe->srqn));
srq = to_mibsrq(msrq);
} else {
@@ -348,7 +348,7 @@ static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
u16 tail, u16 head)
{
- int idx;
+ u16 idx;
do {
idx = tail & (qp->sq.wqe_cnt - 1);
@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
- mlx5_buf_free(&dev->mdev, &buf->buf);
+ mlx5_buf_free(dev->mdev, &buf->buf);
}
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -450,7 +450,7 @@ repoll:
* because CQs will be locked while QPs are removed
* from the table.
*/
- mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
+ mqp = __mlx5_qp_lookup(dev->mdev, qpn);
if (unlikely(!mqp)) {
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
cq->mcq.cqn, qpn);
@@ -514,11 +514,11 @@ repoll:
case MLX5_CQE_SIG_ERR:
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
- read_lock(&dev->mdev.priv.mr_table.lock);
- mmr = __mlx5_mr_lookup(&dev->mdev,
+ read_lock(&dev->mdev->priv.mr_table.lock);
+ mmr = __mlx5_mr_lookup(dev->mdev,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
if (unlikely(!mmr)) {
- read_unlock(&dev->mdev.priv.mr_table.lock);
+ read_unlock(&dev->mdev->priv.mr_table.lock);
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
return -EINVAL;
@@ -536,7 +536,7 @@ repoll:
mr->sig->err_item.expected,
mr->sig->err_item.actual);
- read_unlock(&dev->mdev.priv.mr_table.lock);
+ read_unlock(&dev->mdev->priv.mr_table.lock);
goto repoll;
}
@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
mlx5_cq_arm(&to_mcq(ibcq)->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
- to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
- MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
+ to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
+ MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
return 0;
}
@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
{
int err;
- err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
+ err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
PAGE_SIZE * 2, &buf->buf);
if (err)
return err;
@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
{
int err;
- err = mlx5_db_alloc(&dev->mdev, &cq->db);
+ err = mlx5_db_alloc(dev->mdev, &cq->db);
if (err)
return err;
@@ -716,7 +716,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
- *index = dev->mdev.priv.uuari.uars[0].index;
+ *index = dev->mdev->priv.uuari.uars[0].index;
return 0;
@@ -724,14 +724,14 @@ err_buf:
free_cq_buf(dev, &cq->buf);
err_db:
- mlx5_db_free(&dev->mdev, &cq->db);
+ mlx5_db_free(dev->mdev, &cq->db);
return err;
}
static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
{
free_cq_buf(dev, &cq->buf);
- mlx5_db_free(&dev->mdev, &cq->db);
+ mlx5_db_free(dev->mdev, &cq->db);
}
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(-EINVAL);
entries = roundup_pow_of_two(entries + 1);
- if (entries > dev->mdev.caps.max_cqes)
+ if (entries > dev->mdev->caps.max_cqes)
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
cqb->ctx.c_eqn = cpu_to_be16(eqn);
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
- err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
if (err)
goto err_cqb;
@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
return &cq->ibcq;
err_cmd:
- mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
+ mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
err_cqb:
mlx5_vfree(cqb);
@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
if (cq->uobject)
context = cq->uobject->context;
- mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
+ mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
if (context)
destroy_cq_user(mcq, context);
else
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int err;
u32 fsel;
- if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+ if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
return -ENOSYS;
in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
in->ctx.cq_period = cpu_to_be16(cq_period);
in->ctx.cq_max_count = cpu_to_be16(cq_count);
in->field_select = cpu_to_be32(fsel);
- err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
+ err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
kfree(in);
if (err)
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
int uninitialized_var(cqe_size);
unsigned long flags;
- if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+ if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
pr_info("Firmware does not support resize CQ\n");
return -ENOSYS;
}
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -EINVAL;
entries = roundup_pow_of_two(entries + 1);
- if (entries > dev->mdev.caps.max_cqes + 1)
+ if (entries > dev->mdev->caps.max_cqes + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
in->cqn = cpu_to_be32(cq->mcq.cqn);
- err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
+ err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err)
goto ex_alloc;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 5c8938be0e08..b514bbb5610f 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -41,7 +41,7 @@ enum {
};
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad)
{
u8 op_modifier = 0;
@@ -54,7 +54,7 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
if (ignore_bkey || !in_wc)
op_modifier |= 0x2;
- return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
+ return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
}
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
packet_error = be16_to_cpu(out_mad->status);
- dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
+ dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
out:
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 364d4b6937f5..d8907b20522a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -54,96 +54,17 @@ MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRIVER_VERSION);
-static int prof_sel = 2;
-module_param_named(prof_sel, prof_sel, int, 0444);
-MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+static int deprecated_prof_sel = 2;
+module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
static char mlx5_version[] =
DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
-static struct mlx5_profile profile[] = {
- [0] = {
- .mask = 0,
- },
- [1] = {
- .mask = MLX5_PROF_MASK_QP_SIZE,
- .log_max_qp = 12,
- },
- [2] = {
- .mask = MLX5_PROF_MASK_QP_SIZE |
- MLX5_PROF_MASK_MR_CACHE,
- .log_max_qp = 17,
- .mr_cache[0] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[1] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[2] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[3] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[4] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[5] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[6] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[7] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[8] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[9] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[10] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[11] = {
- .size = 500,
- .limit = 250
- },
- .mr_cache[12] = {
- .size = 64,
- .limit = 32
- },
- .mr_cache[13] = {
- .size = 32,
- .limit = 16
- },
- .mr_cache[14] = {
- .size = 16,
- .limit = 8
- },
- .mr_cache[15] = {
- .size = 8,
- .limit = 4
- },
- },
-};
-
int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
{
- struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+ struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
struct mlx5_eq *eq, *n;
int err = -ENOENT;
@@ -163,7 +84,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
{
- struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+ struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
char name[MLX5_MAX_EQ_NAME];
struct mlx5_eq *eq, *n;
int ncomp_vec;
@@ -182,9 +103,9 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
}
snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
- err = mlx5_create_map_eq(&dev->mdev, eq,
+ err = mlx5_create_map_eq(dev->mdev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
- name, &dev->mdev.priv.uuari.uars[0]);
+ name, &dev->mdev->priv.uuari.uars[0]);
if (err) {
kfree(eq);
goto clean;
@@ -204,7 +125,7 @@ clean:
list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
list_del(&eq->list);
spin_unlock(&table->lock);
- if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
+ if (mlx5_destroy_unmap_eq(dev->mdev, eq))
mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
kfree(eq);
spin_lock(&table->lock);
@@ -215,14 +136,14 @@ clean:
static void free_comp_eqs(struct mlx5_ib_dev *dev)
{
- struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+ struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
struct mlx5_eq *eq, *n;
spin_lock(&table->lock);
list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
list_del(&eq->list);
spin_unlock(&table->lock);
- if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
+ if (mlx5_destroy_unmap_eq(dev->mdev, eq))
mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
kfree(eq);
spin_lock(&table->lock);
@@ -255,14 +176,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
memset(props, 0, sizeof(*props));
- props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) |
- (fw_rev_min(&dev->mdev) << 16) |
- fw_rev_sub(&dev->mdev);
+ props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
+ (fw_rev_min(dev->mdev) << 16) |
+ fw_rev_sub(dev->mdev);
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_RC_RNR_NAK_GEN;
- flags = dev->mdev.caps.flags;
+ flags = dev->mdev->caps.flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -292,30 +213,30 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
props->max_mr_size = ~0ull;
- props->page_size_cap = dev->mdev.caps.min_page_sz;
- props->max_qp = 1 << dev->mdev.caps.log_max_qp;
- props->max_qp_wr = dev->mdev.caps.max_wqes;
- max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
- max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
+ props->page_size_cap = dev->mdev->caps.min_page_sz;
+ props->max_qp = 1 << dev->mdev->caps.log_max_qp;
+ props->max_qp_wr = dev->mdev->caps.max_wqes;
+ max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
+ max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_cq = 1 << dev->mdev.caps.log_max_cq;
- props->max_cqe = dev->mdev.caps.max_cqes - 1;
- props->max_mr = 1 << dev->mdev.caps.log_max_mkey;
- props->max_pd = 1 << dev->mdev.caps.log_max_pd;
- props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp;
- props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp;
+ props->max_cq = 1 << dev->mdev->caps.log_max_cq;
+ props->max_cqe = dev->mdev->caps.max_cqes - 1;
+ props->max_mr = 1 << dev->mdev->caps.log_max_mkey;
+ props->max_pd = 1 << dev->mdev->caps.log_max_pd;
+ props->max_qp_rd_atom = dev->mdev->caps.max_ra_req_qp;
+ props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp;
props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
- props->max_srq = 1 << dev->mdev.caps.log_max_srq;
- props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1;
+ props->max_srq = 1 << dev->mdev->caps.log_max_srq;
+ props->max_srq_wr = dev->mdev->caps.max_srq_wqes - 1;
props->max_srq_sge = max_rq_sg - 1;
props->max_fast_reg_page_list_len = (unsigned int)-1;
- props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
+ props->local_ca_ack_delay = dev->mdev->caps.local_ca_ack_delay;
props->atomic_cap = IB_ATOMIC_NONE;
props->masked_atomic_cap = IB_ATOMIC_NONE;
props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
- props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
- props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
+ props->max_mcast_grp = 1 << dev->mdev->caps.log_max_mcg;
+ props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg;
props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
props->max_mcast_grp;
props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
@@ -336,7 +257,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
int ext_active_speed;
int err = -ENOMEM;
- if (port < 1 || port > dev->mdev.caps.num_ports) {
+ if (port < 1 || port > dev->mdev->caps.num_ports) {
mlx5_ib_warn(dev, "invalid port number %d\n", port);
return -EINVAL;
}
@@ -367,8 +288,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
props->phys_state = out_mad->data[33] >> 4;
props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
props->gid_tbl_len = out_mad->data[50];
- props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg;
- props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len;
+ props->max_msg_sz = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg;
+ props->pkey_tbl_len = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len;
props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
props->active_width = out_mad->data[31] & 0xf;
@@ -395,7 +316,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == 4) {
- if (dev->mdev.caps.ext_port_cap[port - 1] &
+ if (dev->mdev->caps.ext_port_cap[port - 1] &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -508,7 +429,7 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
* a 144 trap. If cmd fails, just ignore.
*/
memcpy(&in, props->node_desc, 64);
- err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out,
+ err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
if (err)
return err;
@@ -535,7 +456,7 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
~props->clr_port_cap_mask;
- err = mlx5_set_port_caps(&dev->mdev, port, tmp);
+ err = mlx5_set_port_caps(dev->mdev, port, tmp);
out:
mutex_unlock(&dev->cap_mask_mutex);
@@ -557,7 +478,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
int uuarn;
int err;
int i;
- int reqlen;
+ size_t reqlen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
@@ -591,14 +512,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
- resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp;
- resp.bf_reg_size = dev->mdev.caps.bf_reg_size;
+ resp.qp_tab_size = 1 << dev->mdev->caps.log_max_qp;
+ resp.bf_reg_size = dev->mdev->caps.bf_reg_size;
resp.cache_line_size = L1_CACHE_BYTES;
- resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz;
- resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz;
- resp.max_send_wqebb = dev->mdev.caps.max_wqes;
- resp.max_recv_wr = dev->mdev.caps.max_wqes;
- resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes;
+ resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz;
+ resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz;
+ resp.max_send_wqebb = dev->mdev->caps.max_wqes;
+ resp.max_recv_wr = dev->mdev->caps.max_wqes;
+ resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
@@ -635,7 +556,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
}
for (i = 0; i < num_uars; i++) {
- err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index);
+ err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
if (err)
goto out_count;
}
@@ -644,7 +565,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
mutex_init(&context->db_page_mutex);
resp.tot_uuars = req.total_num_uuars;
- resp.num_ports = dev->mdev.caps.num_ports;
+ resp.num_ports = dev->mdev->caps.num_ports;
err = ib_copy_to_udata(udata, &resp,
sizeof(resp) - sizeof(resp.reserved));
if (err)
@@ -658,7 +579,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
out_uars:
for (i--; i >= 0; i--)
- mlx5_cmd_free_uar(&dev->mdev, uars[i].index);
+ mlx5_cmd_free_uar(dev->mdev, uars[i].index);
out_count:
kfree(uuari->count);
@@ -681,7 +602,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int i;
for (i = 0; i < uuari->num_uars; i++) {
- if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index))
+ if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
}
@@ -695,7 +616,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
{
- return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index;
+ return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
}
static int get_command(unsigned long offset)
@@ -773,7 +694,7 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
seg->start_addr = 0;
- err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in),
+ err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
NULL, NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
@@ -798,7 +719,7 @@ static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
memset(&mr, 0, sizeof(mr));
mr.key = key;
- err = mlx5_core_destroy_mkey(&dev->mdev, &mr);
+ err = mlx5_core_destroy_mkey(dev->mdev, &mr);
if (err)
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
}
@@ -815,7 +736,7 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
if (!pd)
return ERR_PTR(-ENOMEM);
- err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn);
+ err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
if (err) {
kfree(pd);
return ERR_PTR(err);
@@ -824,14 +745,14 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
if (context) {
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
+ mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
kfree(pd);
return ERR_PTR(-EFAULT);
}
} else {
err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
if (err) {
- mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
+ mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
kfree(pd);
return ERR_PTR(err);
}
@@ -848,7 +769,7 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
if (!pd->uobject)
free_pa_mkey(mdev, mpd->pa_lkey);
- mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn);
+ mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
kfree(mpd);
return 0;
@@ -859,7 +780,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
int err;
- err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num);
+ err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
if (err)
mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -872,7 +793,7 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
int err;
- err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num);
+ err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
if (err)
mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -906,7 +827,7 @@ static int init_node_data(struct mlx5_ib_dev *dev)
if (err)
goto out;
- dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
+ dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
out:
@@ -921,7 +842,7 @@ static ssize_t show_fw_pages(struct device *device, struct device_attribute *att
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages);
+ return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
}
static ssize_t show_reg_pages(struct device *device,
@@ -930,7 +851,7 @@ static ssize_t show_reg_pages(struct device *device,
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages);
+ return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages);
}
static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -938,7 +859,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "MT%d\n", dev->mdev.pdev->device);
+ return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
}
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -946,8 +867,8 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev),
- fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev));
+ return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+ fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
}
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -955,7 +876,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
- return sprintf(buf, "%x\n", dev->mdev.rev_id);
+ return sprintf(buf, "%x\n", dev->mdev->rev_id);
}
static ssize_t show_board(struct device *device, struct device_attribute *attr,
@@ -964,7 +885,7 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
- dev->mdev.board_id);
+ dev->mdev->board_id);
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -983,11 +904,12 @@ static struct device_attribute *mlx5_class_attributes[] = {
&dev_attr_reg_pages,
};
-static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
- void *data)
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+ enum mlx5_dev_event event, unsigned long param)
{
- struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev);
+ struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
struct ib_event ibev;
+
u8 port = 0;
switch (event) {
@@ -998,12 +920,12 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
case MLX5_DEV_EVENT_PORT_UP:
ibev.event = IB_EVENT_PORT_ACTIVE;
- port = *(u8 *)data;
+ port = (u8)param;
break;
case MLX5_DEV_EVENT_PORT_DOWN:
ibev.event = IB_EVENT_PORT_ERR;
- port = *(u8 *)data;
+ port = (u8)param;
break;
case MLX5_DEV_EVENT_PORT_INITIALIZED:
@@ -1012,22 +934,22 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
case MLX5_DEV_EVENT_LID_CHANGE:
ibev.event = IB_EVENT_LID_CHANGE;
- port = *(u8 *)data;
+ port = (u8)param;
break;
case MLX5_DEV_EVENT_PKEY_CHANGE:
ibev.event = IB_EVENT_PKEY_CHANGE;
- port = *(u8 *)data;
+ port = (u8)param;
break;
case MLX5_DEV_EVENT_GUID_CHANGE:
ibev.event = IB_EVENT_GID_CHANGE;
- port = *(u8 *)data;
+ port = (u8)param;
break;
case MLX5_DEV_EVENT_CLIENT_REREG:
ibev.event = IB_EVENT_CLIENT_REREGISTER;
- port = *(u8 *)data;
+ port = (u8)param;
break;
}
@@ -1047,7 +969,7 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
{
int port;
- for (port = 1; port <= dev->mdev.caps.num_ports; port++)
+ for (port = 1; port <= dev->mdev->caps.num_ports; port++)
mlx5_query_ext_port_caps(dev, port);
}
@@ -1072,14 +994,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
goto out;
}
- for (port = 1; port <= dev->mdev.caps.num_ports; port++) {
+ for (port = 1; port <= dev->mdev->caps.num_ports; port++) {
err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
if (err) {
mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
break;
}
- dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
- dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
+ dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
+ dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
dprops->max_pkeys, pprops->gid_tbl_len);
}
@@ -1328,10 +1250,8 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
mlx5_ib_dealloc_pd(devr->p0);
}
-static int init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
- struct mlx5_core_dev *mdev;
struct mlx5_ib_dev *dev;
int err;
int i;
@@ -1340,28 +1260,19 @@ static int init_one(struct pci_dev *pdev,
dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
if (!dev)
- return -ENOMEM;
+ return NULL;
- mdev = &dev->mdev;
- mdev->event = mlx5_ib_event;
- if (prof_sel >= ARRAY_SIZE(profile)) {
- pr_warn("selected pofile out of range, selceting default\n");
- prof_sel = 0;
- }
- mdev->profile = &profile[prof_sel];
- err = mlx5_dev_init(mdev, pdev);
- if (err)
- goto err_free;
+ dev->mdev = mdev;
err = get_port_caps(dev);
if (err)
- goto err_cleanup;
+ goto err_dealloc;
get_ext_port_caps(dev);
err = alloc_comp_eqs(dev);
if (err)
- goto err_cleanup;
+ goto err_dealloc;
MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
@@ -1480,7 +1391,7 @@ static int init_one(struct pci_dev *pdev,
dev->ib_active = true;
- return 0;
+ return dev;
err_umrc:
destroy_umrc_res(dev);
@@ -1494,49 +1405,39 @@ err_rsrc:
err_eqs:
free_comp_eqs(dev);
-err_cleanup:
- mlx5_dev_cleanup(mdev);
-
-err_free:
+err_dealloc:
ib_dealloc_device((struct ib_device *)dev);
- return err;
+ return NULL;
}
-static void remove_one(struct pci_dev *pdev)
+static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
{
- struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev);
-
+ struct mlx5_ib_dev *dev = context;
destroy_umrc_res(dev);
ib_unregister_device(&dev->ib_dev);
destroy_dev_resources(&dev->devr);
free_comp_eqs(dev);
- mlx5_dev_cleanup(&dev->mdev);
ib_dealloc_device(&dev->ib_dev);
}
-static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = {
- { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
- { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table);
-
-static struct pci_driver mlx5_ib_driver = {
- .name = DRIVER_NAME,
- .id_table = mlx5_ib_pci_table,
- .probe = init_one,
- .remove = remove_one
+static struct mlx5_interface mlx5_ib_interface = {
+ .add = mlx5_ib_add,
+ .remove = mlx5_ib_remove,
+ .event = mlx5_ib_event,
};
static int __init mlx5_ib_init(void)
{
- return pci_register_driver(&mlx5_ib_driver);
+ if (deprecated_prof_sel != 2)
+ pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
+
+ return mlx5_register_interface(&mlx5_ib_interface);
}
static void __exit mlx5_ib_cleanup(void)
{
- pci_unregister_driver(&mlx5_ib_driver);
+ mlx5_unregister_interface(&mlx5_ib_interface);
}
module_init(mlx5_ib_init);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 8499aec94db6..a3e81444c825 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -148,7 +148,7 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
u64 off_mask;
u64 buf_off;
- page_size = 1 << page_shift;
+ page_size = (u64)1 << page_shift;
page_mask = page_size - 1;
buf_off = addr & page_mask;
off_size = page_size >> 6;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index f2ccf1a5a291..386780f0d1e1 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -360,7 +360,7 @@ struct mlx5_ib_resources {
struct mlx5_ib_dev {
struct ib_device ib_dev;
- struct mlx5_core_dev mdev;
+ struct mlx5_core_dev *mdev;
MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
struct list_head eqs_list;
int num_ports;
@@ -454,16 +454,6 @@ static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
return container_of(ibah, struct mlx5_ib_ah, ibah);
}
-static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev)
-{
- return container_of(dev, struct mlx5_ib_dev, mdev);
-}
-
-static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev)
-{
- return mlx5_core2ibdev(pci2mlx5_core_dev(pdev));
-}
-
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
@@ -471,7 +461,7 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
- int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+ u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
void *in_mad, void *response_mad);
struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
struct mlx5_ib_ah *ah);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index afa873bd028e..80b3c63eab5d 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -73,7 +73,7 @@ static void reg_mr_callback(int status, void *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
- struct mlx5_mr_table *table = &dev->mdev.priv.mr_table;
+ struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
int err;
spin_lock_irqsave(&ent->lock, flags);
@@ -97,9 +97,9 @@ static void reg_mr_callback(int status, void *context)
return;
}
- spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
- key = dev->mdev.priv.mkey_key++;
- spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
+ spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
+ key = dev->mdev->priv.mkey_key++;
+ spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
cache->last_add = jiffies;
@@ -155,7 +155,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
sizeof(*in), reg_mr_callback,
mr, &mr->out);
if (err) {
@@ -188,7 +188,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+ err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
@@ -479,7 +479,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
- err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+ err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
@@ -496,7 +496,7 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
if (!mlx5_debugfs_root)
return 0;
- cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root);
+ cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
if (!cache->root)
return -ENOMEM;
@@ -571,8 +571,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->order = i + 2;
ent->dev = dev;
- if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE)
- limit = dev->mdev.profile->mr_cache[i].limit;
+ if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
+ limit = dev->mdev->profile->mr_cache[i].limit;
else
limit = 0;
@@ -610,7 +610,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_core_dev *mdev = &dev->mdev;
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_mkey_seg *seg;
struct mlx5_ib_mr *mr;
@@ -846,7 +846,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1 << page_shift));
- err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
@@ -923,7 +923,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
mr->npages = npages;
spin_lock(&dev->mr_lock);
- dev->mdev.priv.reg_pages += npages;
+ dev->mdev->priv.reg_pages += npages;
spin_unlock(&dev->mr_lock);
mr->ibmr.lkey = mr->mmr.key;
mr->ibmr.rkey = mr->mmr.key;
@@ -978,7 +978,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
int err;
if (!umred) {
- err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+ err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
@@ -996,7 +996,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
if (umem) {
ib_umem_release(umem);
spin_lock(&dev->mr_lock);
- dev->mdev.priv.reg_pages -= npages;
+ dev->mdev->priv.reg_pages -= npages;
spin_unlock(&dev->mr_lock);
}
@@ -1044,7 +1044,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
}
/* create mem & wire PSVs */
- err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn,
+ err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
2, psv_index);
if (err)
goto err_free_sig;
@@ -1060,7 +1060,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
}
in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
- err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in),
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
NULL, NULL, NULL);
if (err)
goto err_destroy_psv;
@@ -1074,11 +1074,11 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
err_destroy_psv:
if (mr->sig) {
- if (mlx5_core_destroy_psv(&dev->mdev,
+ if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
- if (mlx5_core_destroy_psv(&dev->mdev,
+ if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
@@ -1099,18 +1099,18 @@ int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
int err;
if (mr->sig) {
- if (mlx5_core_destroy_psv(&dev->mdev,
+ if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
- if (mlx5_core_destroy_psv(&dev->mdev,
+ if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
kfree(mr->sig);
}
- err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+ err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
@@ -1149,7 +1149,7 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
* TBD not needed - issue 197292 */
in->seg.log2_page_size = PAGE_SHIFT;
- err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
+ err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
NULL, NULL);
kfree(in);
if (err)
@@ -1202,7 +1202,7 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
struct mlx5_ib_dev *dev = to_mdev(page_list->device);
int size = page_list->max_page_list_len * sizeof(u64);
- dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list,
+ dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
mfrpl->map);
kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index bbbcf389272c..7efe6e3f3542 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -162,7 +162,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
int wq_size;
/* Sanity check RQ size before proceeding */
- if (cap->max_recv_wr > dev->mdev.caps.max_wqes)
+ if (cap->max_recv_wr > dev->mdev->caps.max_wqes)
return -EINVAL;
if (!has_rq) {
@@ -182,10 +182,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
qp->rq.wqe_cnt = wq_size / wqe_size;
- if (wqe_size > dev->mdev.caps.max_rq_desc_sz) {
+ if (wqe_size > dev->mdev->caps.max_rq_desc_sz) {
mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
wqe_size,
- dev->mdev.caps.max_rq_desc_sz);
+ dev->mdev->caps.max_rq_desc_sz);
return -EINVAL;
}
qp->rq.wqe_shift = ilog2(wqe_size);
@@ -277,9 +277,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
if (wqe_size < 0)
return wqe_size;
- if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
+ if (wqe_size > dev->mdev->caps.max_sq_desc_sz) {
mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
- wqe_size, dev->mdev.caps.max_sq_desc_sz);
+ wqe_size, dev->mdev->caps.max_sq_desc_sz);
return -EINVAL;
}
@@ -292,9 +292,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
- if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+ if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
- qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+ qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -311,9 +311,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
{
int desc_sz = 1 << qp->sq.wqe_shift;
- if (desc_sz > dev->mdev.caps.max_sq_desc_sz) {
+ if (desc_sz > dev->mdev->caps.max_sq_desc_sz) {
mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
- desc_sz, dev->mdev.caps.max_sq_desc_sz);
+ desc_sz, dev->mdev->caps.max_sq_desc_sz);
return -EINVAL;
}
@@ -325,9 +325,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
qp->sq.wqe_cnt = ucmd->sq_wqe_count;
- if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+ if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
- qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+ qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
return -EINVAL;
}
@@ -674,7 +674,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int uuarn;
int err;
- uuari = &dev->mdev.priv.uuari;
+ uuari = &dev->mdev->priv.uuari;
if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
return -EINVAL;
@@ -700,7 +700,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
- err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
+ err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
goto err_uuar;
@@ -722,7 +722,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
mlx5_fill_page_array(&qp->buf, (*in)->pas);
- err = mlx5_db_alloc(&dev->mdev, &qp->db);
+ err = mlx5_db_alloc(dev->mdev, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
goto err_free;
@@ -747,7 +747,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
return 0;
err_wrid:
- mlx5_db_free(&dev->mdev, &qp->db);
+ mlx5_db_free(dev->mdev, &qp->db);
kfree(qp->sq.wqe_head);
kfree(qp->sq.w_list);
kfree(qp->sq.wrid);
@@ -758,23 +758,23 @@ err_free:
mlx5_vfree(*in);
err_buf:
- mlx5_buf_free(&dev->mdev, &qp->buf);
+ mlx5_buf_free(dev->mdev, &qp->buf);
err_uuar:
- free_uuar(&dev->mdev.priv.uuari, uuarn);
+ free_uuar(&dev->mdev->priv.uuari, uuarn);
return err;
}
static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
- mlx5_db_free(&dev->mdev, &qp->db);
+ mlx5_db_free(dev->mdev, &qp->db);
kfree(qp->sq.wqe_head);
kfree(qp->sq.w_list);
kfree(qp->sq.wrid);
kfree(qp->sq.wr_data);
kfree(qp->rq.wrid);
- mlx5_buf_free(&dev->mdev, &qp->buf);
- free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn);
+ mlx5_buf_free(dev->mdev, &qp->buf);
+ free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
}
static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -812,7 +812,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->rq.lock);
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
- if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
+ if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
return -EINVAL;
} else {
@@ -851,9 +851,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_ib_dbg(dev, "invalid rq params\n");
return -EINVAL;
}
- if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) {
+ if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) {
mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
- ucmd.sq_wqe_count, dev->mdev.caps.max_wqes);
+ ucmd.sq_wqe_count, dev->mdev->caps.max_wqes);
return -EINVAL;
}
err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -957,7 +957,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
- err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen);
+ err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
if (err) {
mlx5_ib_dbg(dev, "create qp failed\n");
goto err_create;
@@ -1081,7 +1081,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (!in)
return;
if (qp->state != IB_QPS_RESET)
- if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state),
+ if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
qp->mqp.qpn);
@@ -1097,7 +1097,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
mlx5_ib_unlock_cqs(send_cq, recv_cq);
}
- err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp);
+ err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
if (err)
mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
kfree(in);
@@ -1165,7 +1165,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
case IB_QPT_XRC_INI:
- if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
+ if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
mlx5_ib_dbg(dev, "XRC not supported\n");
return ERR_PTR(-ENOSYS);
}
@@ -1279,7 +1279,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
} else {
while (rate != IB_RATE_2_5_GBPS &&
!(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- dev->mdev.caps.stat_rate_support))
+ dev->mdev->caps.stat_rate_support))
--rate;
}
@@ -1318,9 +1318,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
path->port = port;
if (ah->ah_flags & IB_AH_GRH) {
- if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) {
+ if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) {
pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
- ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len);
+ ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len);
return -EINVAL;
}
@@ -1539,7 +1539,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = -EINVAL;
goto out;
}
- context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg;
+ context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg;
}
if (attr_mask & IB_QP_DEST_QPN)
@@ -1637,7 +1637,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
optpar = ib_mask_to_mlx5_opt(attr_mask);
optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
in->optparam = cpu_to_be32(optpar);
- err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state),
+ err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
to_mlx5_state(new_state), in, sqd_event,
&qp->mqp);
if (err)
@@ -1699,21 +1699,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
if ((attr_mask & IB_QP_PORT) &&
- (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports))
+ (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports))
goto out;
if (attr_mask & IB_QP_PKEY_INDEX) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len)
+ if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len)
goto out;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
- attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp)
+ attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp)
goto out;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
- attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp)
+ attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp)
goto out;
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -2479,7 +2479,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{
struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
- struct mlx5_core_dev *mdev = &dev->mdev;
+ struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_mr *mr;
struct mlx5_wqe_data_seg *dpseg;
@@ -2539,7 +2539,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_RDMA_WRITE_WITH_IMM:
set_raddr_seg(seg, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
- seg += sizeof(struct mlx5_wqe_raddr_seg);
+ seg += sizeof(struct mlx5_wqe_raddr_seg);
size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
break;
@@ -2668,7 +2668,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_QPT_SMI:
case IB_QPT_GSI:
set_datagram_seg(seg, wr);
- seg += sizeof(struct mlx5_wqe_datagram_seg);
+ seg += sizeof(struct mlx5_wqe_datagram_seg);
size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
if (unlikely((seg == qend)))
seg = mlx5_get_send_wqe(qp, 0);
@@ -2888,7 +2888,7 @@ static int to_ib_qp_access_flags(int mlx5_flags)
static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
struct mlx5_qp_path *path)
{
- struct mlx5_core_dev *dev = &ibdev->mdev;
+ struct mlx5_core_dev *dev = ibdev->mdev;
memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
ib_ah_attr->port_num = path->port;
@@ -2931,7 +2931,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
goto out;
}
context = &outb->ctx;
- err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb));
+ err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
if (err)
goto out_free;
@@ -3014,14 +3014,14 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct mlx5_ib_xrcd *xrcd;
int err;
- if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC))
+ if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC))
return ERR_PTR(-ENOSYS);
xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
if (!xrcd)
return ERR_PTR(-ENOMEM);
- err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn);
+ err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
if (err) {
kfree(xrcd);
return ERR_PTR(-ENOMEM);
@@ -3036,7 +3036,7 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
int err;
- err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn);
+ err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
if (err) {
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
return err;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 384af6dec5eb..70bd131ba646 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -159,7 +159,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
int page_shift;
int npages;
- err = mlx5_db_alloc(&dev->mdev, &srq->db);
+ err = mlx5_db_alloc(dev->mdev, &srq->db);
if (err) {
mlx5_ib_warn(dev, "alloc dbell rec failed\n");
return err;
@@ -167,7 +167,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
*srq->db.db = 0;
- if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+ if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
mlx5_ib_dbg(dev, "buf alloc failed\n");
err = -ENOMEM;
goto err_db;
@@ -212,10 +212,10 @@ err_in:
mlx5_vfree(*in);
err_buf:
- mlx5_buf_free(&dev->mdev, &srq->buf);
+ mlx5_buf_free(dev->mdev, &srq->buf);
err_db:
- mlx5_db_free(&dev->mdev, &srq->db);
+ mlx5_db_free(dev->mdev, &srq->db);
return err;
}
@@ -229,8 +229,8 @@ static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
{
kfree(srq->wrid);
- mlx5_buf_free(&dev->mdev, &srq->buf);
- mlx5_db_free(&dev->mdev, &srq->db);
+ mlx5_buf_free(dev->mdev, &srq->buf);
+ mlx5_db_free(dev->mdev, &srq->db);
}
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
@@ -248,10 +248,10 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
u32 flgs, xrcdn;
/* Sanity check SRQ size before proceeding */
- if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) {
+ if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) {
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
- dev->mdev.caps.max_srq_wqes);
+ dev->mdev->caps.max_srq_wqes);
return ERR_PTR(-EINVAL);
}
@@ -303,7 +303,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
- err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen);
+ err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
mlx5_vfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
@@ -327,7 +327,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
return &srq->ibsrq;
err_core:
- mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
+ mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
err_usr_kern_srq:
if (pd->uobject)
@@ -357,7 +357,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL;
mutex_lock(&srq->mutex);
- ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1);
+ ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
mutex_unlock(&srq->mutex);
if (ret)
@@ -378,7 +378,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
if (!out)
return -ENOMEM;
- ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out);
+ ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
if (ret)
goto out_box;
@@ -396,7 +396,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq);
- mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq);
+ mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
if (srq->uobject) {
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);