summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/qedr
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qedr')
-rw-r--r--drivers/infiniband/hw/qedr/main.c93
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h23
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c22
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_hsi.h56
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c243
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h2
7 files changed, 252 insertions, 189 deletions
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index b9b47e5cc8b3..ef11e770f822 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -340,43 +340,58 @@ static void qedr_remove_sysfiles(struct qedr_dev *dev)
static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
{
struct pci_dev *bridge;
- u32 val;
-
- dev->atomic_cap = IB_ATOMIC_NONE;
+ u32 ctl2, cap2;
+ u16 flags;
+ int rc;
bridge = pdev->bus->self;
if (!bridge)
- return;
-
- /* Check whether we are connected directly or via a switch */
- while (bridge && bridge->bus->parent) {
- DP_DEBUG(dev, QEDR_MSG_INIT,
- "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
- bridge->bus->number, bridge->bus->primary);
- /* Need to check Atomic Op Routing Supported all the way to
- * root complex.
- */
- pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
- if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
- pcie_capability_clear_word(pdev,
- PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_ATOMIC_REQ);
- return;
- }
+ goto disable;
+
+ /* Check atomic routing support all the way to root complex */
+ while (bridge->bus->parent) {
+ rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
+ if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
+ goto disable;
+
+ rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
+ if (rc)
+ goto disable;
+
+ rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2);
+ if (rc)
+ goto disable;
+
+ if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) ||
+ (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK))
+ goto disable;
bridge = bridge->bus->parent->self;
}
- bridge = pdev->bus->self;
- /* according to bridge capability */
- pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
- if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
- pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_ATOMIC_REQ);
- dev->atomic_cap = IB_ATOMIC_GLOB;
- } else {
- pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
- PCI_EXP_DEVCTL2_ATOMIC_REQ);
- }
+ rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
+ if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
+ goto disable;
+
+ rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
+ if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64))
+ goto disable;
+
+ /* Set atomic operations */
+ pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ dev->atomic_cap = IB_ATOMIC_GLOB;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
+
+ return;
+
+disable:
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_ATOMIC_REQ);
+ dev->atomic_cap = IB_ATOMIC_NONE;
+
+ DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
+
}
static const struct qed_rdma_ops *qed_ops;
@@ -423,14 +438,21 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle)
cq->arm_flags = 0;
- if (cq->ibcq.comp_handler)
+ if (!cq->destroyed && cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler)
(&cq->ibcq, cq->ibcq.cq_context);
+ /* The CQ's CNQ notification counter is checked before
+ * destroying the CQ in a busy-wait loop that waits for all of
+ * the CQ's CNQ interrupts to be processed. It is increased
+ * here, only after the completion handler, to ensure that the
+ * the handler is not running when the CQ is destroyed.
+ */
+ cq->cnq_notif++;
+
sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
cnq->n_comp++;
-
}
qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
@@ -587,9 +609,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
#define EVENT_TYPE_CQ 1
#define EVENT_TYPE_QP 2
struct qedr_dev *dev = (struct qedr_dev *)context;
- union event_ring_data *data = fw_handle;
- u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) +
- data->roce_handle.lo;
+ struct regpair *async_handle = (struct regpair *)fw_handle;
+ u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
u8 event_type = EVENT_TYPE_NOT_DEFINED;
struct ib_event event;
struct ib_cq *ibcq;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index bb32e4792ec9..aa08c76a4245 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -38,7 +38,8 @@
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_roce_if.h>
#include <linux/qed/qede_roce.h>
-#include "qedr_hsi.h"
+#include <linux/qed/roce_common.h>
+#include "qedr_hsi_rdma.h"
#define QEDR_MODULE_VERSION "8.10.10.0"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
@@ -271,6 +272,8 @@ struct qedr_cq {
u32 cq_cons;
struct qedr_userq q;
+ u8 destroyed;
+ u16 cnq_notif;
};
struct qedr_pd {
@@ -389,7 +392,7 @@ struct qedr_qp {
struct qedr_ah {
struct ib_ah ibah;
- struct ib_ah_attr attr;
+ struct rdma_ah_attr attr;
};
enum qedr_mr_type {
@@ -428,7 +431,8 @@ struct qedr_mr {
RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
#define QEDR_RESP_RDMA (RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
-#define QEDR_RESP_RDMA_IMM (QEDR_RESP_IMM | QEDR_RESP_RDMA)
+#define QEDR_RESP_INV (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
+ RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
static inline void qedr_inc_sw_cons(struct qedr_qp_hwq_info *info)
{
@@ -442,19 +446,24 @@ static inline void qedr_inc_sw_prod(struct qedr_qp_hwq_info *info)
}
static inline int qedr_get_dmac(struct qedr_dev *dev,
- struct ib_ah_attr *ah_attr, u8 *mac_addr)
+ struct rdma_ah_attr *ah_attr, u8 *mac_addr)
{
union ib_gid zero_sgid = { { 0 } };
struct in6_addr in6;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ u8 *dmac;
- if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
+ if (!memcmp(&grh->dgid, &zero_sgid, sizeof(union ib_gid))) {
DP_ERR(dev, "Local port GID not supported\n");
eth_zero_addr(mac_addr);
return -EINVAL;
}
- memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
- ether_addr_copy(mac_addr, ah_attr->dmac);
+ memcpy(&in6, grh->dgid.raw, sizeof(in6));
+ dmac = rdma_ah_retrieve_dmac(ah_attr);
+ if (!dmac)
+ return -EINVAL;
+ ether_addr_copy(mac_addr, dmac);
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 699632893dd9..3d7705cec770 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -43,14 +43,11 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "qedr_hsi.h"
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_roce_if.h>
#include "qedr.h"
-#include "qedr_hsi.h"
#include "verbs.h"
#include <rdma/qedr-abi.h>
-#include "qedr_hsi.h"
#include "qedr_cm.h"
void qedr_inc_sw_gsi_cons(struct qedr_qp_hwq_info *info)
@@ -246,8 +243,8 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
int *roce_mode)
{
bool has_vlan = false, has_grh_ipv6 = true;
- struct ib_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
- struct ib_global_route *grh = &ah_attr->grh;
+ struct rdma_ah_attr *ah_attr = &get_qedr_ah(ud_wr(swr)->ah)->attr;
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
union ib_gid sgid;
int send_size = 0;
u16 vlan_id = 0;
@@ -263,12 +260,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
for (i = 0; i < swr->num_sge; ++i)
send_size += swr->sg_list[i].length;
- rc = ib_get_cached_gid(qp->ibqp.device, ah_attr->port_num,
+ rc = ib_get_cached_gid(qp->ibqp.device, rdma_ah_get_port_num(ah_attr),
grh->sgid_index, &sgid, &sgid_attr);
if (rc) {
DP_ERR(dev,
"gsi post send: failed to get cached GID (port=%d, ix=%d)\n",
- ah_attr->port_num, grh->sgid_index);
+ rdma_ah_get_port_num(ah_attr),
+ grh->sgid_index);
return rc;
}
@@ -280,7 +278,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
DP_ERR(dev, "gsi post send: GID not found GID index %d\n",
- ah_attr->grh.sgid_index);
+ grh->sgid_index);
return -ENOENT;
}
@@ -310,7 +308,7 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
}
/* ENET + VLAN headers */
- ether_addr_copy(udh->eth.dmac_h, ah_attr->dmac);
+ ether_addr_copy(udh->eth.dmac_h, ah_attr->roce.dmac);
ether_addr_copy(udh->eth.smac_h, dev->ndev->dev_addr);
if (has_vlan) {
udh->eth.type = htons(ETH_P_8021Q);
@@ -344,13 +342,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
u32 ipv4_addr;
udh->ip4.protocol = IPPROTO_UDP;
- udh->ip4.tos = htonl(ah_attr->grh.flow_label);
+ udh->ip4.tos = htonl(grh->flow_label);
udh->ip4.frag_off = htons(IP_DF);
- udh->ip4.ttl = ah_attr->grh.hop_limit;
+ udh->ip4.ttl = grh->hop_limit;
ipv4_addr = qedr_get_ipv4_from_gid(sgid.raw);
udh->ip4.saddr = ipv4_addr;
- ipv4_addr = qedr_get_ipv4_from_gid(ah_attr->grh.dgid.raw);
+ ipv4_addr = qedr_get_ipv4_from_gid(grh->dgid.raw);
udh->ip4.daddr = ipv4_addr;
/* note: checksum is calculated by the device */
}
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.h b/drivers/infiniband/hw/qedr/qedr_cm.h
index 78efb1b056d1..a55916323ea9 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.h
+++ b/drivers/infiniband/hw/qedr/qedr_cm.h
@@ -39,7 +39,7 @@
#define QEDR_ROCE_V2_UDP_SPORT (0000)
-static inline u32 qedr_get_ipv4_from_gid(u8 *gid)
+static inline u32 qedr_get_ipv4_from_gid(const u8 *gid)
{
return *(u32 *)(void *)&gid[12];
}
diff --git a/drivers/infiniband/hw/qedr/qedr_hsi.h b/drivers/infiniband/hw/qedr/qedr_hsi.h
deleted file mode 100644
index 66d27521373f..000000000000
--- a/drivers/infiniband/hw/qedr/qedr_hsi.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* QLogic qedr NIC Driver
- * Copyright (c) 2015-2016 QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and /or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __QED_HSI_ROCE__
-#define __QED_HSI_ROCE__
-
-#include <linux/qed/common_hsi.h>
-#include <linux/qed/roce_common.h>
-#include "qedr_hsi_rdma.h"
-
-/* Affiliated asynchronous events / errors enumeration */
-enum roce_async_events_type {
- ROCE_ASYNC_EVENT_NONE = 0,
- ROCE_ASYNC_EVENT_COMM_EST = 1,
- ROCE_ASYNC_EVENT_SQ_DRAINED,
- ROCE_ASYNC_EVENT_SRQ_LIMIT,
- ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
- ROCE_ASYNC_EVENT_CQ_ERR,
- ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
- ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
- ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
- ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
- ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
- ROCE_ASYNC_EVENT_SRQ_EMPTY,
- MAX_ROCE_ASYNC_EVENTS_TYPE
-};
-
-#endif /* __QED_HSI_ROCE__ */
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 6b3bb32803bd..17685cfea6a2 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -43,7 +43,8 @@
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
-#include "qedr_hsi.h"
+#include <linux/qed/common_hsi.h>
+#include "qedr_hsi_rdma.h"
#include <linux/qed/qed_if.h>
#include "qedr.h"
#include "verbs.h"
@@ -680,16 +681,16 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
pbe_cnt = 0;
- shift = ilog2(umem->page_size);
+ shift = umem->page_shift;
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift;
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */
pbe->lo = cpu_to_le32(sg_dma_address(sg) +
- umem->page_size * pg_cnt);
+ (pg_cnt << shift));
addr = upper_32_bits(sg_dma_address(sg) +
- umem->page_size * pg_cnt);
+ (pg_cnt << shift));
pbe->hi = cpu_to_le32(addr);
pbe_cnt++;
total_num_pbes++;
@@ -821,6 +822,17 @@ int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct qedr_cq *cq = get_qedr_cq(ibcq);
unsigned long sflags;
+ struct qedr_dev *dev;
+
+ dev = get_qedr_dev(ibcq->device);
+
+ if (cq->destroyed) {
+ DP_ERR(dev,
+ "warning: arm was invoked after destroy for cq %p (icid=%d)\n",
+ cq, cq->icid);
+ return -EINVAL;
+ }
+
if (cq->cq_type == QEDR_CQ_TYPE_GSI)
return 0;
@@ -986,35 +998,82 @@ int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
return 0;
}
+#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
+#define QEDR_DESTROY_CQ_ITER_DURATION (10)
+
int qedr_destroy_cq(struct ib_cq *ibcq)
{
struct qedr_dev *dev = get_qedr_dev(ibcq->device);
struct qed_rdma_destroy_cq_out_params oparams;
struct qed_rdma_destroy_cq_in_params iparams;
struct qedr_cq *cq = get_qedr_cq(ibcq);
+ int iter;
+ int rc;
- DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
+ DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq %p (icid=%d)\n", cq, cq->icid);
+
+ cq->destroyed = 1;
/* GSIs CQs are handled by driver, so they don't exist in the FW */
- if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
- int rc;
+ if (cq->cq_type == QEDR_CQ_TYPE_GSI)
+ goto done;
- iparams.icid = cq->icid;
- rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams,
- &oparams);
- if (rc)
- return rc;
- dev->ops->common->chain_free(dev->cdev, &cq->pbl);
- }
+ iparams.icid = cq->icid;
+ rc = dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
+ if (rc)
+ return rc;
+
+ dev->ops->common->chain_free(dev->cdev, &cq->pbl);
if (ibcq->uobject && ibcq->uobject->context) {
qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
ib_umem_release(cq->q.umem);
}
+ /* We don't want the IRQ handler to handle a non-existing CQ so we
+ * wait until all CNQ interrupts, if any, are received. This will always
+ * happen and will always happen very fast. If not, then a serious error
+ * has occured. That is why we can use a long delay.
+ * We spin for a short time so we don’t lose time on context switching
+ * in case all the completions are handled in that span. Otherwise
+ * we sleep for a while and check again. Since the CNQ may be
+ * associated with (only) the current CPU we use msleep to allow the
+ * current CPU to be freed.
+ * The CNQ notification is increased in qedr_irq_handler().
+ */
+ iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
+ while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
+ udelay(QEDR_DESTROY_CQ_ITER_DURATION);
+ iter--;
+ }
+
+ iter = QEDR_DESTROY_CQ_MAX_ITERATIONS;
+ while (oparams.num_cq_notif != READ_ONCE(cq->cnq_notif) && iter) {
+ msleep(QEDR_DESTROY_CQ_ITER_DURATION);
+ iter--;
+ }
+
+ if (oparams.num_cq_notif != cq->cnq_notif)
+ goto err;
+
+ /* Note that we don't need to have explicit code to wait for the
+ * completion of the event handler because it is invoked from the EQ.
+ * Since the destroy CQ ramrod has also been received on the EQ we can
+ * be certain that there's no event handler in process.
+ */
+done:
+ cq->sig = ~cq->sig;
+
kfree(cq);
return 0;
+
+err:
+ DP_ERR(dev,
+ "CQ %p (icid=%d) not freed, expecting %d ints but got %d ints\n",
+ cq, cq->icid, oparams.num_cq_notif, cq->cnq_notif);
+
+ return -EINVAL;
}
static inline int get_gid_info_from_table(struct ib_qp *ibqp,
@@ -1025,13 +1084,15 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
{
enum rdma_network_type nw_type;
struct ib_gid_attr gid_attr;
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
union ib_gid gid;
u32 ipv4_addr;
int rc = 0;
int i;
- rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
- attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
+ rc = ib_get_cached_gid(ibqp->device,
+ rdma_ah_get_port_num(&attr->ah_attr),
+ grh->sgid_index, &gid, &gid_attr);
if (rc)
return rc;
@@ -1048,7 +1109,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
- &attr->ah_attr.grh.dgid,
+ &grh->dgid,
sizeof(qp_params->dgid));
qp_params->roce_mode = ROCE_V2_IPV6;
SET_FIELD(qp_params->modify_flags,
@@ -1058,7 +1119,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
sizeof(qp_params->sgid));
memcpy(&qp_params->dgid.bytes[0],
- &attr->ah_attr.grh.dgid,
+ &grh->dgid,
sizeof(qp_params->dgid));
qp_params->roce_mode = ROCE_V1;
break;
@@ -1068,7 +1129,7 @@ static inline int get_gid_info_from_table(struct ib_qp *ibqp,
ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
qp_params->sgid.ipv4_addr = ipv4_addr;
ipv4_addr =
- qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
+ qedr_get_ipv4_from_gid(grh->dgid.raw);
qp_params->dgid.ipv4_addr = ipv4_addr;
SET_FIELD(qp_params->modify_flags,
QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
@@ -1690,6 +1751,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct qedr_qp *qp = get_qedr_qp(ibqp);
struct qed_rdma_modify_qp_in_params qp_params = { 0 };
struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
enum ib_qp_state old_qp_state, new_qp_state;
int rc = 0;
@@ -1772,17 +1834,17 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
SET_FIELD(qp_params.modify_flags,
QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
- qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
- qp_params.flow_label = attr->ah_attr.grh.flow_label;
- qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
+ qp_params.traffic_class_tos = grh->traffic_class;
+ qp_params.flow_label = grh->flow_label;
+ qp_params.hop_limit_ttl = grh->hop_limit;
- qp->sgid_idx = attr->ah_attr.grh.sgid_index;
+ qp->sgid_idx = grh->sgid_index;
rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
if (rc) {
DP_ERR(dev,
"modify qp: problems with GID index %d (rc=%d)\n",
- attr->ah_attr.grh.sgid_index, rc);
+ grh->sgid_index, rc);
return rc;
}
@@ -1967,25 +2029,21 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
qp_init_attr->cap = qp_attr->cap;
- memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
- sizeof(qp_attr->ah_attr.grh.dgid.raw));
-
- qp_attr->ah_attr.grh.flow_label = params.flow_label;
- qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
- qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
- qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
-
- qp_attr->ah_attr.ah_flags = IB_AH_GRH;
- qp_attr->ah_attr.port_num = 1;
- qp_attr->ah_attr.sl = 0;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
+ params.flow_label, qp->sgid_idx,
+ params.hop_limit_ttl, params.traffic_class_tos);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid.bytes[0]);
+ rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
+ rdma_ah_set_sl(&qp_attr->ah_attr, 0);
qp_attr->timeout = params.timeout;
qp_attr->rnr_retry = params.rnr_retry;
qp_attr->retry_cnt = params.retry_cnt;
qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
qp_attr->pkey_index = params.pkey_index;
qp_attr->port_num = 1;
- qp_attr->ah_attr.src_path_bits = 0;
- qp_attr->ah_attr.static_rate = 0;
+ rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
+ rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
qp_attr->alt_pkey_index = 0;
qp_attr->alt_port_num = 0;
qp_attr->alt_timeout = 0;
@@ -2053,7 +2111,7 @@ int qedr_destroy_qp(struct ib_qp *ibqp)
return rc;
}
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
struct ib_udata *udata)
{
struct qedr_ah *ah;
@@ -2189,7 +2247,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
- mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
+ mr->hw_mr.page_size_log = mr->umem->page_shift;
mr->hw_mr.fbo = ib_umem_offset(mr->umem);
mr->hw_mr.length = len;
mr->hw_mr.vaddr = usr_addr;
@@ -2624,6 +2682,8 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
fwqe1->l_key = wr->key;
+ fwqe2->access_ctrl = 0;
+
SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
!!(wr->access & IB_ACCESS_REMOTE_READ));
SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
@@ -3270,57 +3330,81 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
return cnt;
}
-static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
- struct qedr_cq *cq, struct ib_wc *wc,
- struct rdma_cqe_responder *resp, u64 wr_id)
+static inline int qedr_cqe_resp_status_to_ib(u8 status)
{
- enum ib_wc_status wc_status = IB_WC_SUCCESS;
- u8 flags;
-
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
-
- switch (resp->status) {
+ switch (status) {
case RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR:
- wc_status = IB_WC_LOC_ACCESS_ERR;
- break;
+ return IB_WC_LOC_ACCESS_ERR;
case RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR:
- wc_status = IB_WC_LOC_LEN_ERR;
- break;
+ return IB_WC_LOC_LEN_ERR;
case RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR:
- wc_status = IB_WC_LOC_QP_OP_ERR;
- break;
+ return IB_WC_LOC_QP_OP_ERR;
case RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR:
- wc_status = IB_WC_LOC_PROT_ERR;
- break;
+ return IB_WC_LOC_PROT_ERR;
case RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR:
- wc_status = IB_WC_MW_BIND_ERR;
- break;
+ return IB_WC_MW_BIND_ERR;
case RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR:
- wc_status = IB_WC_REM_INV_RD_REQ_ERR;
- break;
+ return IB_WC_REM_INV_RD_REQ_ERR;
case RDMA_CQE_RESP_STS_OK:
- wc_status = IB_WC_SUCCESS;
- wc->byte_len = le32_to_cpu(resp->length);
+ return IB_WC_SUCCESS;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
- flags = resp->flags & QEDR_RESP_RDMA_IMM;
+static inline int qedr_set_ok_cqe_resp_wc(struct rdma_cqe_responder *resp,
+ struct ib_wc *wc)
+{
+ wc->status = IB_WC_SUCCESS;
+ wc->byte_len = le32_to_cpu(resp->length);
- if (flags == QEDR_RESP_RDMA_IMM)
+ if (resp->flags & QEDR_RESP_IMM) {
+ wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->wc_flags |= IB_WC_WITH_IMM;
+
+ if (resp->flags & QEDR_RESP_RDMA)
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
- if (flags == QEDR_RESP_RDMA_IMM || flags == QEDR_RESP_IMM) {
- wc->ex.imm_data =
- le32_to_cpu(resp->imm_data_or_inv_r_Key);
- wc->wc_flags |= IB_WC_WITH_IMM;
- }
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- DP_ERR(dev, "Invalid CQE status detected\n");
+ if (resp->flags & QEDR_RESP_INV)
+ return -EINVAL;
+
+ } else if (resp->flags & QEDR_RESP_INV) {
+ wc->ex.imm_data = le32_to_cpu(resp->imm_data_or_inv_r_Key);
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+
+ if (resp->flags & QEDR_RESP_RDMA)
+ return -EINVAL;
+
+ } else if (resp->flags & QEDR_RESP_RDMA) {
+ return -EINVAL;
}
- /* fill WC */
- wc->status = wc_status;
+ return 0;
+}
+
+static void __process_resp_one(struct qedr_dev *dev, struct qedr_qp *qp,
+ struct qedr_cq *cq, struct ib_wc *wc,
+ struct rdma_cqe_responder *resp, u64 wr_id)
+{
+ /* Must fill fields before qedr_set_ok_cqe_resp_wc() */
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = 0;
+
+ if (likely(resp->status == RDMA_CQE_RESP_STS_OK)) {
+ if (qedr_set_ok_cqe_resp_wc(resp, wc))
+ DP_ERR(dev,
+ "CQ %p (icid=%d) has invalid CQE responder flags=0x%x\n",
+ cq, cq->icid, resp->flags);
+
+ } else {
+ wc->status = qedr_cqe_resp_status_to_ib(resp->status);
+ if (wc->status == IB_WC_GENERAL_ERR)
+ DP_ERR(dev,
+ "CQ %p (icid=%d) contains an invalid CQE status %d\n",
+ cq, cq->icid, resp->status);
+ }
+
+ /* Fill the rest of the WC */
wc->vendor_err = 0;
wc->src_qp = qp->id;
wc->qp = &qp->ibqp;
@@ -3415,6 +3499,13 @@ int qedr_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int update = 0;
int done = 0;
+ if (cq->destroyed) {
+ DP_ERR(dev,
+ "warning: poll was invoked after destroy for cq %p (icid=%d)\n",
+ cq, cq->icid);
+ return 0;
+ }
+
if (cq->cq_type == QEDR_CQ_TYPE_GSI)
return qedr_gsi_poll_cq(ibcq, num_entries, wc);
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 070677ca4d19..0f8ab49d5a1a 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -70,7 +70,7 @@ int qedr_query_qp(struct ib_qp *, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int qedr_destroy_qp(struct ib_qp *ibqp);
-struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr,
+struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct rdma_ah_attr *attr,
struct ib_udata *udata);
int qedr_destroy_ah(struct ib_ah *ibah);