summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c37
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c14
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c209
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c91
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c68
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c12
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h26
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c49
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c227
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c83
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h295
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c42
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
29 files changed, 976 insertions, 249 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index 00400c352c1a..766a71ccefed 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -604,16 +604,14 @@ static int c2_up(struct net_device *netdev)
tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
c2_port->mem_size = tx_size + rx_size;
- c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
- &c2_port->dma);
+ c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
+ &c2_port->dma);
if (c2_port->mem == NULL) {
pr_debug("Unable to allocate memory for "
"host descriptor rings\n");
return -ENOMEM;
}
- memset(c2_port->mem, 0, c2_port->mem_size);
-
/* Create the Rx host descriptor ring */
if ((ret =
c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 49e0e8533f74..1b63185b4ad4 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -260,11 +260,14 @@ static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
mq->msg_pool.host, dma_unmap_addr(mq, mapping));
}
-static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, int q_size,
- int msg_size)
+static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
+ size_t q_size, size_t msg_size)
{
u8 *pool_start;
+ if (q_size > SIZE_MAX / msg_size)
+ return -EINVAL;
+
pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
&mq->host_dma, GFP_KERNEL);
if (!pool_start)
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index fbe6051af254..c9df0549f51d 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -227,6 +227,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
chp = get_chp(dev, qid);
if (chp) {
+ t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index c158fcc02bca..41cd6882b648 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1105,7 +1105,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
struct c4iw_cq *schp)
{
int count;
- int flushed;
+ int rq_flushed, sq_flushed;
unsigned long flag;
PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
@@ -1123,27 +1123,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
- flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
+ rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
- if (flushed) {
- spin_lock_irqsave(&rchp->comp_handler_lock, flag);
- (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
- spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
- }
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock);
if (schp != rchp)
c4iw_flush_hw_cq(schp);
- flushed = c4iw_flush_sq(qhp);
+ sq_flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag);
- if (flushed) {
- spin_lock_irqsave(&schp->comp_handler_lock, flag);
- (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
- spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+
+ if (schp == rchp) {
+ if (t4_clear_cq_armed(&rchp->cq) &&
+ (rq_flushed || sq_flushed)) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
+ } else {
+ if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
+ if (t4_clear_cq_armed(&schp->cq) && sq_flushed) {
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
}
}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index df5edfa31a8f..c04e5134b30c 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -524,6 +524,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq)
return !wq->rq.queue[wq->rq.size].status.db_off;
}
+enum t4_cq_flags {
+ CQ_ARMED = 1,
+};
+
struct t4_cq {
struct t4_cqe *queue;
dma_addr_t dma_addr;
@@ -544,12 +548,19 @@ struct t4_cq {
u16 cidx_inc;
u8 gen;
u8 error;
+ unsigned long flags;
};
+static inline int t4_clear_cq_armed(struct t4_cq *cq)
+{
+ return test_and_clear_bit(CQ_ARMED, &cq->flags);
+}
+
static inline int t4_arm_cq(struct t4_cq *cq, int se)
{
u32 val;
+ set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_MASK) {
val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) |
INGRESSQID(cq->cqid);
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index 43f2d0424d4f..e890e5ba0e01 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -726,7 +726,7 @@ bail:
* @dd: the infinipath device
* @pkeys: the PKEY table
*/
-static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
+static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
{
struct ipath_portdata *pd;
int i;
@@ -759,6 +759,7 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
}
if (changed) {
u64 pkey;
+ struct ib_event event;
pkey = (u64) dd->ipath_pkeys[0] |
((u64) dd->ipath_pkeys[1] << 16) |
@@ -768,12 +769,17 @@ static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys)
(unsigned long long) pkey);
ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
pkey);
+
+ event.event = IB_EVENT_PKEY_CHANGE;
+ event.device = &dd->verbs_dev->ibdev;
+ event.element.port_num = port;
+ ib_dispatch_event(&event);
}
return 0;
}
static int recv_subn_set_pkeytable(struct ib_smp *smp,
- struct ib_device *ibdev)
+ struct ib_device *ibdev, u8 port)
{
u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
__be16 *p = (__be16 *) smp->data;
@@ -784,7 +790,7 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
for (i = 0; i < n; i++)
q[i] = be16_to_cpu(p[i]);
- if (startpx != 0 || set_pkeys(dev->dd, q) != 0)
+ if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
smp->status |= IB_SMP_INVALID_FIELD;
return recv_subn_get_pkeytable(smp, ibdev);
@@ -1342,7 +1348,7 @@ static int process_subn(struct ib_device *ibdev, int mad_flags,
ret = recv_subn_set_portinfo(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_PKEY_TABLE:
- ret = recv_subn_set_pkeytable(smp, ibdev);
+ ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
goto bail;
case IB_SMP_ATTR_SM_INFO:
if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index dc66c4506916..1da1252dcdb3 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -54,7 +54,7 @@ static void __ipath_release_user_pages(struct page **p, size_t num_pages,
/* call with current->mm->mmap_sem held */
static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p, struct vm_area_struct **vma)
+ struct page **p)
{
unsigned long lock_limit;
size_t got;
@@ -74,7 +74,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
num_pages - got, 1, 1,
- p + got, vma);
+ p + got, NULL);
if (ret < 0)
goto bail_release;
}
@@ -165,7 +165,7 @@ int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
down_write(&current->mm->mmap_sem);
- ret = __ipath_get_user_pages(start_page, num_pages, p, NULL);
+ ret = __ipath_get_user_pages(start_page, num_pages, p);
up_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 287ad0564acd..82a7dd87089b 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -891,7 +891,7 @@ int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI,
NULL, 0, send_handler,
- NULL, NULL);
+ NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0f7027e7db13..bda5994ceb68 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -59,6 +59,7 @@
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
+#define MLX4_IB_CARD_REV_A0 0xA0
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
@@ -119,6 +120,17 @@ static int check_flow_steering_support(struct mlx4_dev *dev)
return dmfs;
}
+static int num_ib_ports(struct mlx4_dev *dev)
+{
+ int ib_ports = 0;
+ int i;
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_ports++;
+
+ return ib_ports;
+}
+
static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
@@ -126,6 +138,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
+ int have_ib_ports;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -142,6 +155,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
memset(props, 0, sizeof *props);
+ have_ib_ports = num_ib_ports(dev->dev);
+
props->fw_ver = dev->dev->caps.fw_ver;
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
@@ -152,13 +167,15 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
- if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
+ if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports)
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
- if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
+ if (dev->dev->caps.max_gso_sz &&
+ (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) &&
+ (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH))
props->device_cap_flags |= IB_DEVICE_UD_TSO;
if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
@@ -357,7 +374,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->state = IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state);
props->active_mtu = IB_MTU_256;
- spin_lock(&iboe->lock);
+ spin_lock_bh(&iboe->lock);
ndev = iboe->netdevs[port - 1];
if (!ndev)
goto out_unlock;
@@ -369,7 +386,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
IB_PORT_ACTIVE : IB_PORT_DOWN;
props->phys_state = state_to_phys_state(props->state);
out_unlock:
- spin_unlock(&iboe->lock);
+ spin_unlock_bh(&iboe->lock);
out:
mlx4_free_cmd_mailbox(mdev->dev, mailbox);
return err;
@@ -811,11 +828,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
if (!mqp->port)
return 0;
- spin_lock(&mdev->iboe.lock);
+ spin_lock_bh(&mdev->iboe.lock);
ndev = mdev->iboe.netdevs[mqp->port - 1];
if (ndev)
dev_hold(ndev);
- spin_unlock(&mdev->iboe.lock);
+ spin_unlock_bh(&mdev->iboe.lock);
if (ndev) {
ret = 1;
@@ -910,8 +927,7 @@ static int __mlx4_ib_default_rules_match(struct ib_qp *qp,
const struct default_rules *pdefault_rules = default_table;
u8 link_layer = rdma_port_get_link_layer(qp->device, flow_attr->port);
- for (i = 0; i < sizeof(default_table)/sizeof(default_table[0]); i++,
- pdefault_rules++) {
+ for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) {
__u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS];
memset(&field_types, 0, sizeof(field_types));
@@ -965,8 +981,7 @@ static int __mlx4_ib_create_default_rules(
int size = 0;
int i;
- for (i = 0; i < sizeof(pdefault_rules->rules_create_list)/
- sizeof(pdefault_rules->rules_create_list[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
int ret;
union ib_flow_spec ib_spec;
switch (pdefault_rules->rules_create_list[i]) {
@@ -1091,6 +1106,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
return err;
}
+static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
+ u64 *reg_id)
+{
+ void *ib_flow;
+ union ib_flow_spec *ib_spec;
+ struct mlx4_dev *dev = to_mdev(qp->device)->dev;
+ int err = 0;
+
+ if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ return 0; /* do nothing */
+
+ ib_flow = flow_attr + 1;
+ ib_spec = (union ib_flow_spec *)ib_flow;
+
+ if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1)
+ return 0; /* do nothing */
+
+ err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac,
+ flow_attr->port, qp->qp_num,
+ MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff),
+ reg_id);
+ return err;
+}
+
static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain)
@@ -1138,6 +1177,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
i++;
}
+ if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
+ err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
+ if (err)
+ goto err_free;
+ }
+
return &mflow->ibflow;
err_free:
@@ -1264,11 +1309,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mutex_lock(&mqp->mutex);
ge = find_gid_entry(mqp, gid->raw);
if (ge) {
- spin_lock(&mdev->iboe.lock);
+ spin_lock_bh(&mdev->iboe.lock);
ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
if (ndev)
dev_hold(ndev);
- spin_unlock(&mdev->iboe.lock);
+ spin_unlock_bh(&mdev->iboe.lock);
if (ndev)
dev_put(ndev);
list_del(&ge->list);
@@ -1389,6 +1434,9 @@ static void update_gids_task(struct work_struct *work)
int err;
struct mlx4_dev *dev = gw->dev->dev;
+ if (!gw->dev->ib_active)
+ return;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
pr_warn("update gid table failed %ld\n", PTR_ERR(mailbox));
@@ -1419,6 +1467,9 @@ static void reset_gids_task(struct work_struct *work)
int err;
struct mlx4_dev *dev = gw->dev->dev;
+ if (!gw->dev->ib_active)
+ return;
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
pr_warn("reset gid table failed\n");
@@ -1553,7 +1604,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
return 0;
iboe = &ibdev->iboe;
- spin_lock(&iboe->lock);
+ spin_lock_bh(&iboe->lock);
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
if ((netif_is_bond_master(real_dev) &&
@@ -1563,7 +1614,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
update_gid_table(ibdev, port, gid,
event == NETDEV_DOWN, 0);
- spin_unlock(&iboe->lock);
+ spin_unlock_bh(&iboe->lock);
return 0;
}
@@ -1636,13 +1687,21 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
new_smac = mlx4_mac_to_u64(dev->dev_addr);
read_unlock(&dev_base_lock);
+ atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
+
+ /* no need for update QP1 and mac registration in non-SRIOV */
+ if (!mlx4_is_mfunc(ibdev->dev))
+ return;
+
mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
qp = ibdev->qp1_proxy[port - 1];
if (qp) {
int new_smac_index;
- u64 old_smac = qp->pri.smac;
+ u64 old_smac;
struct mlx4_update_qp_params update_params;
+ mutex_lock(&qp->mutex);
+ old_smac = qp->pri.smac;
if (new_smac == old_smac)
goto unlock;
@@ -1652,22 +1711,25 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
goto unlock;
update_params.smac_index = new_smac_index;
- if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC,
+ if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
&update_params)) {
release_mac = new_smac;
goto unlock;
}
-
+ /* if old port was zero, no mac was yet registered for this QP */
+ if (qp->pri.smac_port)
+ release_mac = old_smac;
qp->pri.smac = new_smac;
+ qp->pri.smac_port = port;
qp->pri.smac_index = new_smac_index;
-
- release_mac = old_smac;
}
unlock:
- mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
if (release_mac != MLX4_IB_INVALID_MAC)
mlx4_unregister_mac(ibdev->dev, port, release_mac);
+ if (qp)
+ mutex_unlock(&qp->mutex);
+ mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
}
static void mlx4_ib_get_dev_addr(struct net_device *dev,
@@ -1678,6 +1740,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
struct inet6_dev *in6_dev;
union ib_gid *pgid;
struct inet6_ifaddr *ifp;
+ union ib_gid default_gid;
#endif
union ib_gid gid;
@@ -1698,12 +1761,15 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
in_dev_put(in_dev);
}
#if IS_ENABLED(CONFIG_IPV6)
+ mlx4_make_default_gid(dev, &default_gid);
/* IPv6 gids */
in6_dev = in6_dev_get(dev);
if (in6_dev) {
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
+ if (!memcmp(pgid, &default_gid, sizeof(*pgid)))
+ continue;
update_gid_table(ibdev, port, pgid, 0, 0);
}
read_unlock_bh(&in6_dev->lock);
@@ -1725,24 +1791,33 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
struct net_device *dev;
struct mlx4_ib_iboe *iboe = &ibdev->iboe;
int i;
+ int err = 0;
- for (i = 1; i <= ibdev->num_ports; ++i)
- if (reset_gid_table(ibdev, i))
- return -1;
+ for (i = 1; i <= ibdev->num_ports; ++i) {
+ if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = reset_gid_table(ibdev, i);
+ if (err)
+ goto out;
+ }
+ }
read_lock(&dev_base_lock);
- spin_lock(&iboe->lock);
+ spin_lock_bh(&iboe->lock);
for_each_netdev(&init_net, dev) {
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
- if (port)
+ /* port will be non-zero only for ETH ports */
+ if (port) {
+ mlx4_ib_set_default_gid(ibdev, dev, port);
mlx4_ib_get_dev_addr(dev, ibdev, port);
+ }
}
- spin_unlock(&iboe->lock);
+ spin_unlock_bh(&iboe->lock);
read_unlock(&dev_base_lock);
-
- return 0;
+out:
+ return err;
}
static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
@@ -1756,7 +1831,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
iboe = &ibdev->iboe;
- spin_lock(&iboe->lock);
+ spin_lock_bh(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
enum ib_port_state port_state = IB_PORT_NOP;
struct net_device *old_master = iboe->masters[port - 1];
@@ -1788,35 +1863,47 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
IB_PORT_ACTIVE : IB_PORT_DOWN;
mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- } else {
- reset_gid_table(ibdev, port);
- }
- /* if using bonding/team and a slave port is down, we don't the bond IP
- * based gids in the table since flows that select port by gid may get
- * the down port.
- */
- if (curr_master && (port_state == IB_PORT_DOWN)) {
- reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- }
- /* if bonding is used it is possible that we add it to masters
- * only after IP address is assigned to the net bonding
- * interface.
- */
- if (curr_master && (old_master != curr_master)) {
- reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- mlx4_ib_get_dev_addr(curr_master, ibdev, port);
- }
+ if (curr_master) {
+ /* if using bonding/team and a slave port is down, we
+ * don't want the bond IP based gids in the table since
+ * flows that select port by gid may get the down port.
+ */
+ if (port_state == IB_PORT_DOWN) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev,
+ curr_netdev,
+ port);
+ } else {
+ /* gids from the upper dev (bond/team)
+ * should appear in port's gid table
+ */
+ mlx4_ib_get_dev_addr(curr_master,
+ ibdev, port);
+ }
+ }
+ /* if bonding is used it is possible that we add it to
+ * masters only after IP address is assigned to the
+ * net bonding interface.
+ */
+ if (curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev,
+ curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+ }
- if (!curr_master && (old_master != curr_master)) {
+ if (!curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev,
+ curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+ }
+ } else {
reset_gid_table(ibdev, port);
- mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
- mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
}
}
- spin_unlock(&iboe->lock);
+ spin_unlock_bh(&iboe->lock);
if (update_qps_port > 0)
mlx4_ib_update_qps(ibdev, dev, update_qps_port);
@@ -2007,6 +2094,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
(1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
(1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_REREG_MR) |
(1ull << IB_USER_VERBS_CMD_DEREG_MR) |
(1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
(1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
@@ -2059,6 +2147,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
+ ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
@@ -2156,6 +2245,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_steer_free_bitmap;
}
+ for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
+ atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
+
if (ib_register_device(&ibdev->ib_dev, NULL))
goto err_steer_free_bitmap;
@@ -2192,12 +2284,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
}
#endif
- for (i = 1 ; i <= ibdev->num_ports ; ++i)
- reset_gid_table(ibdev, i);
- rtnl_lock();
- mlx4_ib_scan_netdevs(ibdev, NULL, 0);
- rtnl_unlock();
- mlx4_ib_init_gid_table(ibdev);
+ if (mlx4_ib_init_gid_table(ibdev))
+ goto err_notif;
}
for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
@@ -2345,6 +2433,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
struct mlx4_ib_dev *ibdev = ibdev_ptr;
int p;
+ ibdev->ib_active = false;
+ flush_workqueue(wq);
+
mlx4_ib_close_sriov(ibdev);
mlx4_ib_mad_cleanup(ibdev);
ib_unregister_device(&ibdev->ib_dev);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 369da3ca5d64..6eb743f65f6f 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -451,6 +451,7 @@ struct mlx4_ib_iboe {
spinlock_t lock;
struct net_device *netdevs[MLX4_MAX_PORTS];
struct net_device *masters[MLX4_MAX_PORTS];
+ atomic64_t mac[MLX4_MAX_PORTS];
struct notifier_block nb;
struct notifier_block nb_inet;
struct notifier_block nb_inet6;
@@ -788,5 +789,9 @@ int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
int is_attach);
+int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+ u64 start, u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index cb2a8727f3fb..8f9325cfc85d 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -144,8 +144,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
+ /* Force registering the memory as writable. */
+ /* Used for memory re-registeration. HCA protects the access */
mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags, 0);
+ access_flags | IB_ACCESS_LOCAL_WRITE, 0);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -183,6 +185,93 @@ err_free:
return ERR_PTR(err);
}
+int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+ u64 start, u64 length, u64 virt_addr,
+ int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata)
+{
+ struct mlx4_ib_dev *dev = to_mdev(mr->device);
+ struct mlx4_ib_mr *mmr = to_mmr(mr);
+ struct mlx4_mpt_entry *mpt_entry;
+ struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
+ int err;
+
+ /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
+ * we assume that the calls can't run concurrently. Otherwise, a
+ * race exists.
+ */
+ err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
+
+ if (err)
+ return err;
+
+ if (flags & IB_MR_REREG_PD) {
+ err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
+ to_mpd(pd)->pdn);
+
+ if (err)
+ goto release_mpt_entry;
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
+ err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
+ convert_access(mr_access_flags));
+
+ if (err)
+ goto release_mpt_entry;
+ }
+
+ if (flags & IB_MR_REREG_TRANS) {
+ int shift;
+ int err;
+ int n;
+
+ mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
+ ib_umem_release(mmr->umem);
+ mmr->umem = ib_umem_get(mr->uobject->context, start, length,
+ mr_access_flags |
+ IB_ACCESS_LOCAL_WRITE,
+ 0);
+ if (IS_ERR(mmr->umem)) {
+ err = PTR_ERR(mmr->umem);
+ /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
+ mmr->umem = NULL;
+ goto release_mpt_entry;
+ }
+ n = ib_umem_page_count(mmr->umem);
+ shift = ilog2(mmr->umem->page_size);
+
+ err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
+ virt_addr, length, n, shift,
+ *pmpt_entry);
+ if (err) {
+ ib_umem_release(mmr->umem);
+ goto release_mpt_entry;
+ }
+ mmr->mmr.iova = virt_addr;
+ mmr->mmr.size = length;
+
+ err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
+ if (err) {
+ mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
+ ib_umem_release(mmr->umem);
+ goto release_mpt_entry;
+ }
+ }
+
+ /* If we couldn't transfer the MR to the HCA, just remember to
+ * return a failure. But dereg_mr will free the resources.
+ */
+ err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
+ if (!err && flags & IB_MR_REREG_ACCESS)
+ mmr->mmr.access = mr_access_flags;
+
+release_mpt_entry:
+ mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
+
+ return err;
+}
+
int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
{
struct mlx4_ib_mr *mr = to_mmr(ibmr);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 67780452f0cf..9c5150c3cb31 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -964,9 +964,10 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
pr_warn("modify QP %06x to RESET failed.\n",
qp->mqp.qpn);
- if (qp->pri.smac) {
+ if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
qp->pri.smac = 0;
+ qp->pri.smac_port = 0;
}
if (qp->alt.smac) {
mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1325,7 +1326,8 @@ static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
* If one was already assigned, but the new mac differs,
* unregister the old one and register the new one.
*/
- if (!smac_info->smac || smac_info->smac != smac) {
+ if ((!smac_info->smac && !smac_info->smac_port) ||
+ smac_info->smac != smac) {
/* register candidate now, unreg if needed, after success */
smac_index = mlx4_register_mac(dev->dev, port, smac);
if (smac_index >= 0) {
@@ -1390,21 +1392,13 @@ static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
static int handle_eth_ud_smac_index(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp, u8 *smac,
struct mlx4_qp_context *context)
{
- struct net_device *ndev;
u64 u64_mac;
int smac_index;
-
- ndev = dev->iboe.netdevs[qp->port - 1];
- if (ndev) {
- smac = ndev->dev_addr;
- u64_mac = mlx4_mac_to_u64(smac);
- } else {
- u64_mac = dev->dev->caps.def_mac[qp->port];
- }
+ u64_mac = atomic64_read(&dev->iboe.mac[qp->port - 1]);
context->pri_path.sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | ((qp->port - 1) << 6);
- if (!qp->pri.smac) {
+ if (!qp->pri.smac && !qp->pri.smac_port) {
smac_index = mlx4_register_mac(dev->dev, qp->port, u64_mac);
if (smac_index >= 0) {
qp->pri.candidate_smac_index = smac_index;
@@ -1432,6 +1426,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
int steer_qp = 0;
int err = -EINVAL;
+ /* APM is not supported under RoCE */
+ if (attr_mask & IB_QP_ALT_PATH &&
+ rdma_port_get_link_layer(&dev->ib_dev, qp->port) ==
+ IB_LINK_LAYER_ETHERNET)
+ return -ENOTSUPP;
+
context = kzalloc(sizeof *context, GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET)
+ if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
context->pri_path.ackto = (context->pri_path.ackto & 0xf8) |
MLX4_IB_LINK_TYPE_ETH;
+ if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ /* set QP to receive both tunneled & non-tunneled packets */
+ if (!(context->flags & cpu_to_be32(1 << MLX4_RSS_QPC_FLAG_OFFSET)))
+ context->srqn = cpu_to_be32(7 << 28);
+ }
+ }
if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) {
int is_eth = rdma_port_get_link_layer(
@@ -1780,9 +1786,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
if (qp->flags & MLX4_IB_QP_NETIF)
mlx4_ib_steer_qp_reg(dev, qp, 0);
}
- if (qp->pri.smac) {
+ if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port)) {
mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
qp->pri.smac = 0;
+ qp->pri.smac_port = 0;
}
if (qp->alt.smac) {
mlx4_unregister_mac(dev->dev, qp->alt.smac_port, qp->alt.smac);
@@ -1806,11 +1813,12 @@ out:
if (err && steer_qp)
mlx4_ib_steer_qp_reg(dev, qp, 0);
kfree(context);
- if (qp->pri.candidate_smac) {
+ if (qp->pri.candidate_smac ||
+ (!qp->pri.candidate_smac && qp->pri.candidate_smac_port)) {
if (err) {
mlx4_unregister_mac(dev->dev, qp->pri.candidate_smac_port, qp->pri.candidate_smac);
} else {
- if (qp->pri.smac)
+ if (qp->pri.smac || (!qp->pri.smac && qp->pri.smac_port))
mlx4_unregister_mac(dev->dev, qp->pri.smac_port, qp->pri.smac);
qp->pri.smac = qp->pri.candidate_smac;
qp->pri.smac_index = qp->pri.candidate_smac_index;
@@ -2083,6 +2091,16 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
return 0;
}
+static void mlx4_u64_to_smac(u8 *dst_mac, u64 src_mac)
+{
+ int i;
+
+ for (i = ETH_ALEN; i; i--) {
+ dst_mac[i - 1] = src_mac & 0xff;
+ src_mac >>= 8;
+ }
+}
+
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
@@ -2197,7 +2215,6 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
}
if (is_eth) {
- u8 *smac;
struct in6_addr in6;
u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
@@ -2210,12 +2227,17 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
memcpy(&ctrl->imm, ah->av.eth.mac + 2, 4);
memcpy(&in6, sgid.raw, sizeof(in6));
- if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev))
- smac = to_mdev(sqp->qp.ibqp.device)->
- iboe.netdevs[sqp->qp.port - 1]->dev_addr;
- else /* use the src mac of the tunnel */
- smac = ah->av.eth.s_mac;
- memcpy(sqp->ud_header.eth.smac_h, smac, 6);
+ if (!mlx4_is_mfunc(to_mdev(ib_dev)->dev)) {
+ u64 mac = atomic64_read(&to_mdev(ib_dev)->iboe.mac[sqp->qp.port - 1]);
+ u8 smac[ETH_ALEN];
+
+ mlx4_u64_to_smac(smac, mac);
+ memcpy(sqp->ud_header.eth.smac_h, smac, ETH_ALEN);
+ } else {
+ /* use the src mac of the tunnel */
+ memcpy(sqp->ud_header.eth.smac_h, ah->av.eth.s_mac, ETH_ALEN);
+ }
+
if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6))
mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
if (!is_vlan) {
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 7efe6e3f3542..8c574b63d77b 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -2501,7 +2501,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
for (nreq = 0; wr; nreq++, wr = wr->next) {
- if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) {
+ if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n");
err = -EINVAL;
*bad_wr = wr;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index b6f7f457fc55..8881fa376e06 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -294,7 +294,7 @@ int mthca_create_agents(struct mthca_dev *dev)
agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
q ? IB_QPT_GSI : IB_QPT_SMI,
NULL, 0, send_handler,
- NULL, NULL);
+ NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 90200245c5eb..02120d340d50 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1003,13 +1003,13 @@ int nes_init_cqp(struct nes_device *nesdev)
(sizeof(struct nes_hw_aeqe) * nesadapter->max_qp) +
sizeof(struct nes_hw_cqp_qp_context);
- nesdev->cqp_vbase = pci_alloc_consistent(nesdev->pcidev, nesdev->cqp_mem_size,
- &nesdev->cqp_pbase);
+ nesdev->cqp_vbase = pci_zalloc_consistent(nesdev->pcidev,
+ nesdev->cqp_mem_size,
+ &nesdev->cqp_pbase);
if (!nesdev->cqp_vbase) {
nes_debug(NES_DBG_INIT, "Unable to allocate memory for host descriptor rings\n");
return -ENOMEM;
}
- memset(nesdev->cqp_vbase, 0, nesdev->cqp_mem_size);
/* Allocate a twice the number of CQP requests as the SQ size */
nesdev->nes_cqp_requests = kzalloc(sizeof(struct nes_cqp_request) *
@@ -1691,13 +1691,13 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
(NES_NIC_WQ_SIZE * 2 * sizeof(struct nes_hw_nic_cqe)) +
sizeof(struct nes_hw_nic_qp_context);
- nesvnic->nic_vbase = pci_alloc_consistent(nesdev->pcidev, nesvnic->nic_mem_size,
- &nesvnic->nic_pbase);
+ nesvnic->nic_vbase = pci_zalloc_consistent(nesdev->pcidev,
+ nesvnic->nic_mem_size,
+ &nesvnic->nic_pbase);
if (!nesvnic->nic_vbase) {
nes_debug(NES_DBG_INIT, "Unable to allocate memory for NIC host descriptor rings\n");
return -ENOMEM;
}
- memset(nesvnic->nic_vbase, 0, nesvnic->nic_mem_size);
nes_debug(NES_DBG_INIT, "Allocated NIC QP structures at %p (phys = %016lX), size = %u.\n",
nesvnic->nic_vbase, (unsigned long)nesvnic->nic_pbase, nesvnic->nic_mem_size);
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 218dd3574285..fef067c959fc 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1616,8 +1616,8 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
entries, nescq->cq_mem_size, nescq->hw_cq.cq_number);
/* allocate the physical buffer space */
- mem = pci_alloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
- &nescq->hw_cq.cq_pbase);
+ mem = pci_zalloc_consistent(nesdev->pcidev, nescq->cq_mem_size,
+ &nescq->hw_cq.cq_pbase);
if (!mem) {
printk(KERN_ERR PFX "Unable to allocate pci memory for cq\n");
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
@@ -1625,7 +1625,6 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(-ENOMEM);
}
- memset(mem, 0, nescq->cq_mem_size);
nescq->hw_cq.cq_vbase = mem;
nescq->hw_cq.cq_head = 0;
nes_debug(NES_DBG_CQ, "CQ%u virtual address @ %p, phys = 0x%08X\n",
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 19011dbb930f..b43456ae124b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -40,7 +40,7 @@
#include <be_roce.h>
#include "ocrdma_sli.h"
-#define OCRDMA_ROCE_DRV_VERSION "10.2.145.0u"
+#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -137,6 +137,7 @@ struct mqe_ctx {
u16 cqe_status;
u16 ext_status;
bool cmd_done;
+ bool fw_error_state;
};
struct ocrdma_hw_mr {
@@ -235,7 +236,10 @@ struct ocrdma_dev {
struct list_head entry;
struct rcu_head rcu;
int id;
- u64 stag_arr[OCRDMA_MAX_STAG];
+ u64 *stag_arr;
+ u8 sl; /* service level */
+ bool pfc_state;
+ atomic_t update_sl;
u16 pvid;
u32 asic_id;
@@ -518,4 +522,22 @@ static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
}
+static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio)
+{
+ return *(pfc + prio);
+}
+
+static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio)
+{
+ return *(app_prio + prio);
+}
+
+static inline u8 ocrdma_is_enabled_and_synced(u32 state)
+{ /* May also be used to interpret TC-state, QCN-state
+ * Appl-state and Logical-link-state in future.
+ */
+ return (state & OCRDMA_STATE_FLAG_ENABLED) &&
+ (state & OCRDMA_STATE_FLAG_SYNC);
+}
+
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index d4cc01f10c01..ac02ce4e8040 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -35,8 +35,10 @@
#include "ocrdma_ah.h"
#include "ocrdma_hw.h"
+#define OCRDMA_VID_PCP_SHIFT 0xD
+
static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
- struct ib_ah_attr *attr, int pdid)
+ struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
{
int status = 0;
u16 vlan_tag; bool vlan_enabled = false;
@@ -47,15 +49,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
memset(&eth, 0, sizeof(eth));
memset(&grh, 0, sizeof(grh));
- ah->sgid_index = attr->grh.sgid_index;
-
+ /* VLAN */
vlan_tag = attr->vlan_id;
if (!vlan_tag || (vlan_tag > 0xFFF))
vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
- vlan_tag |= (attr->sl & 7) << 13;
+ vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
eth.vlan_tag = cpu_to_be16(vlan_tag);
eth_sz = sizeof(struct ocrdma_eth_vlan);
vlan_enabled = true;
@@ -63,15 +64,14 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
eth_sz = sizeof(struct ocrdma_eth_basic);
}
+ /* MAC */
memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- memcpy(&eth.dmac[0], attr->dmac, ETH_ALEN);
status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
if (status)
return status;
- status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index,
- (union ib_gid *)&grh.sgid[0]);
- if (status)
- return status;
+ ah->sgid_index = attr->grh.sgid_index;
+ memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+ memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
grh.tclass_flow = cpu_to_be32((6 << 28) |
(attr->grh.traffic_class << 24) |
@@ -79,8 +79,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
/* 0x1b is next header value in GRH */
grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
(0x1b << 8) | attr->grh.hop_limit);
-
- memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
+ /* Eth HDR */
memcpy(&ah->av->eth_hdr, &eth, eth_sz);
memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
if (vlan_enabled)
@@ -96,10 +95,14 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
struct ocrdma_ah *ah;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ union ib_gid sgid;
+ u8 zmac[ETH_ALEN];
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
+ if (atomic_cmpxchg(&dev->update_sl, 1, 0))
+ ocrdma_init_service_level(dev);
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
@@ -107,7 +110,27 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
status = ocrdma_alloc_av(dev, ah);
if (status)
goto av_err;
- status = set_av_attr(dev, ah, attr, pd->id);
+
+ status = ocrdma_query_gid(&dev->ibdev, 1, attr->grh.sgid_index, &sgid);
+ if (status) {
+ pr_err("%s(): Failed to query sgid, status = %d\n",
+ __func__, status);
+ goto av_conf_err;
+ }
+
+ memset(&zmac, 0, ETH_ALEN);
+ if (pd->uctx &&
+ memcmp(attr->dmac, &zmac, ETH_ALEN)) {
+ status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
+ attr->dmac, &attr->vlan_id);
+ if (status) {
+ pr_err("%s(): Failed to resolve dmac from gid."
+ "status = %d\n", __func__, status);
+ goto av_conf_err;
+ }
+ }
+
+ status = set_av_attr(dev, ah, attr, &sgid, pd->id);
if (status)
goto av_conf_err;
@@ -141,7 +164,7 @@ int ocrdma_query_ah(struct ib_ah *ibah, struct ib_ah_attr *attr)
struct ocrdma_av *av = ah->av;
struct ocrdma_grh *grh;
attr->ah_flags |= IB_AH_GRH;
- if (ah->av->valid & Bit(1)) {
+ if (ah->av->valid & OCRDMA_AV_VALID) {
grh = (struct ocrdma_grh *)((u8 *)ah->av +
sizeof(struct ocrdma_eth_vlan));
attr->sl = be16_to_cpu(av->eth_hdr.vlan_tag) >> 13;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 3bbf2010a821..dd35ae558ae1 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -525,7 +525,7 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
cmd->eqn = eq->id;
- cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
+ cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
cq->dma, PAGE_SIZE_4K);
@@ -661,7 +661,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
{
struct ocrdma_qp *qp = NULL;
struct ocrdma_cq *cq = NULL;
- struct ib_event ib_evt = { 0 };
+ struct ib_event ib_evt;
int cq_event = 0;
int qp_event = 1;
int srq_event = 0;
@@ -674,6 +674,8 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
+ memset(&ib_evt, 0, sizeof(ib_evt));
+
ib_evt.device = &dev->ibdev;
switch (type) {
@@ -771,6 +773,10 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
break;
+
+ case OCRDMA_ASYNC_EVENT_COS_VALUE:
+ atomic_set(&dev->update_sl, 1);
+ break;
default:
/* Not interested evts. */
break;
@@ -962,8 +968,12 @@ static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
msecs_to_jiffies(30000));
if (status)
return 0;
- else
+ else {
+ dev->mqe_ctx.fw_error_state = true;
+ pr_err("%s(%d) mailbox timeout: fw not responding\n",
+ __func__, dev->id);
return -1;
+ }
}
/* issue a mailbox command on the MQ */
@@ -975,6 +985,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
struct ocrdma_mbx_rsp *rsp = NULL;
mutex_lock(&dev->mqe_ctx.lock);
+ if (dev->mqe_ctx.fw_error_state)
+ goto mbx_err;
ocrdma_post_mqe(dev, mqe);
status = ocrdma_wait_mqe_cmpl(dev);
if (status)
@@ -1078,7 +1090,8 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
attr->max_mw = rsp->max_mw;
attr->max_mr = rsp->max_mr;
- attr->max_mr_size = ~0ull;
+ attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
+ rsp->max_mr_size_lo;
attr->max_fmr = 0;
attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
@@ -1252,7 +1265,9 @@ static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
- dev->hba_port_num = hba_attribs->phy_port;
+ dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
+ OCRDMA_HBA_ATTRB_PTNUM_MASK)
+ >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
strncpy(dev->model_number,
hba_attribs->controller_model_number, 31);
}
@@ -1302,7 +1317,8 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
goto mbx_err;
rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
- *lnk_speed = rsp->phys_port_speed;
+ *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
+ >> OCRDMA_PHY_PS_SHIFT;
mbx_err:
kfree(cmd);
@@ -1328,11 +1344,16 @@ static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
goto mbx_err;
rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
- dev->phy.phy_type = le16_to_cpu(rsp->phy_type);
+ dev->phy.phy_type =
+ (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
+ dev->phy.interface_type =
+ (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
+ >> OCRDMA_IF_TYPE_SHIFT;
dev->phy.auto_speeds_supported =
- le16_to_cpu(rsp->auto_speeds_supported);
+ (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
dev->phy.fixed_speeds_supported =
- le16_to_cpu(rsp->fixed_speeds_supported);
+ (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
+ >> OCRDMA_FSPEED_SUPP_SHIFT;
mbx_err:
kfree(cmd);
return status;
@@ -1457,8 +1478,8 @@ static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
- pbes[i].pa_lo = (u32) (pa & 0xffffffff);
- pbes[i].pa_hi = (u32) upper_32_bits(pa);
+ pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
+ pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
pa += PAGE_SIZE;
}
cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
@@ -1501,6 +1522,7 @@ static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
dev->av_tbl.pa);
+ dev->av_tbl.va = NULL;
dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
dev->av_tbl.pbl.pa);
kfree(cmd);
@@ -1624,14 +1646,16 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
OCRDMA_CREATE_CQ_TYPE_SHIFT;
cq->phase_change = false;
- cmd->cmd.cqe_count = (cq->len / cqe_size);
+ cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
} else {
- cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;
+ cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
cq->phase_change = true;
}
- cmd->cmd.pd_id = pd_id; /* valid only for v3 */
+ /* pd_id valid only for v3 */
+ cmd->cmd.pdid_cqecnt |= (pd_id <<
+ OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
@@ -2206,7 +2230,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
qp->rq_cq = cq;
- if (pd->dpp_enabled && pd->num_dpp_qp) {
+ if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
+ (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id);
}
@@ -2264,6 +2289,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
return -EINVAL;
+ if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
+ ocrdma_init_service_level(qp->dev);
cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
@@ -2297,6 +2324,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
cmd->params.vlan_dmac_b4_to_b5 |=
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
+ cmd->params.rnt_rc_sl_fl |=
+ (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
}
return 0;
}
@@ -2604,6 +2633,168 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
return status;
}
+static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
+ struct ocrdma_dcbx_cfg *dcbxcfg)
+{
+ int status = 0;
+ dma_addr_t pa;
+ struct ocrdma_mqe cmd;
+
+ struct ocrdma_get_dcbx_cfg_req *req = NULL;
+ struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
+
+ memset(&cmd, 0, sizeof(struct ocrdma_mqe));
+ cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
+ sizeof(struct ocrdma_get_dcbx_cfg_req));
+ req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
+ if (!req) {
+ status = -ENOMEM;
+ goto mem_err;
+ }
+
+ cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
+ mqe_sge->pa_hi = (u32) upper_32_bits(pa);
+ mqe_sge->len = cmd.hdr.pyld_len;
+
+ memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
+ ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
+ OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
+ req->param_type = ptype;
+
+ status = ocrdma_mbx_cmd(dev, &cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
+ ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
+ memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
+
+mbx_err:
+ dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
+mem_err:
+ return status;
+}
+
+#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
+#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
+
+static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
+ struct ocrdma_dcbx_cfg *dcbxcfg,
+ u8 *srvc_lvl)
+{
+ int status = -EINVAL, indx, slindx;
+ int ventry_cnt;
+ struct ocrdma_app_parameter *app_param;
+ u8 valid, proto_sel;
+ u8 app_prio, pfc_prio;
+ u16 proto;
+
+ if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
+ pr_info("%s ocrdma%d DCBX is disabled\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ goto out;
+ }
+
+ if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
+ pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id,
+ (ptype > 0 ? "operational" : "admin"),
+ (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
+ "enabled" : "disabled",
+ (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
+ "" : ", not sync'ed");
+ goto out;
+ } else {
+ pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ }
+
+ ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
+ OCRDMA_DCBX_APP_ENTRY_SHIFT)
+ & OCRDMA_DCBX_STATE_MASK;
+
+ for (indx = 0; indx < ventry_cnt; indx++) {
+ app_param = &dcbxcfg->app_param[indx];
+ valid = (app_param->valid_proto_app >>
+ OCRDMA_APP_PARAM_VALID_SHIFT)
+ & OCRDMA_APP_PARAM_VALID_MASK;
+ proto_sel = (app_param->valid_proto_app
+ >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
+ & OCRDMA_APP_PARAM_PROTO_SEL_MASK;
+ proto = app_param->valid_proto_app &
+ OCRDMA_APP_PARAM_APP_PROTO_MASK;
+
+ if (
+ valid && proto == OCRDMA_APP_PROTO_ROCE &&
+ proto_sel == OCRDMA_PROTO_SELECT_L2) {
+ for (slindx = 0; slindx <
+ OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
+ app_prio = ocrdma_get_app_prio(
+ (u8 *)app_param->app_prio,
+ slindx);
+ pfc_prio = ocrdma_get_pfc_prio(
+ (u8 *)dcbxcfg->pfc_prio,
+ slindx);
+
+ if (app_prio && pfc_prio) {
+ *srvc_lvl = slindx;
+ status = 0;
+ goto out;
+ }
+ }
+ if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
+ pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
+ dev_name(&dev->nic_info.pdev->dev),
+ dev->id, proto);
+ }
+ }
+ }
+
+out:
+ return status;
+}
+
+void ocrdma_init_service_level(struct ocrdma_dev *dev)
+{
+ int status = 0, indx;
+ struct ocrdma_dcbx_cfg dcbxcfg;
+ u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
+ int ptype = OCRDMA_PARAMETER_TYPE_OPER;
+
+ for (indx = 0; indx < 2; indx++) {
+ status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
+ if (status) {
+ pr_err("%s(): status=%d\n", __func__, status);
+ ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
+ continue;
+ }
+
+ status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
+ &dcbxcfg, &srvc_lvl);
+ if (status) {
+ ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
+ continue;
+ }
+
+ break;
+ }
+
+ if (status)
+ pr_info("%s ocrdma%d service level default\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ else
+ pr_info("%s ocrdma%d service level %d\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id,
+ srvc_lvl);
+
+ dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
+ dev->sl = srvc_lvl;
+}
+
int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
{
int i;
@@ -2709,13 +2900,15 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
goto conf_err;
status = ocrdma_mbx_get_phy_info(dev);
if (status)
- goto conf_err;
+ goto info_attrb_err;
status = ocrdma_mbx_get_ctrl_attribs(dev);
if (status)
- goto conf_err;
+ goto info_attrb_err;
return 0;
+info_attrb_err:
+ ocrdma_mbx_delete_ah_tbl(dev);
conf_err:
ocrdma_destroy_mq(dev);
mq_err:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e513f7293142..6eed8f191322 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -135,4 +135,6 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
char *port_speed_string(struct ocrdma_dev *dev);
+void ocrdma_init_service_level(struct ocrdma_dev *);
+
#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 7c504e079744..256a06bc0b68 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -324,6 +324,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
if (!dev->qp_tbl)
goto alloc_err;
}
+
+ dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL);
+ if (dev->stag_arr == NULL)
+ goto alloc_err;
+
spin_lock_init(&dev->av_tbl.lock);
spin_lock_init(&dev->flush_q_lock);
return 0;
@@ -334,6 +339,7 @@ alloc_err:
static void ocrdma_free_resources(struct ocrdma_dev *dev)
{
+ kfree(dev->stag_arr);
kfree(dev->qp_tbl);
kfree(dev->cq_tbl);
kfree(dev->sgid_tbl);
@@ -353,15 +359,25 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
{
struct ocrdma_dev *dev = dev_get_drvdata(device);
- return scnprintf(buf, PAGE_SIZE, "%s", &dev->attr.fw_ver[0]);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->attr.fw_ver[0]);
+}
+
+static ssize_t show_hca_type(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
static struct device_attribute *ocrdma_attributes[] = {
&dev_attr_hw_rev,
- &dev_attr_fw_ver
+ &dev_attr_fw_ver,
+ &dev_attr_hca_type
};
static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
@@ -372,6 +388,58 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
}
+static void ocrdma_init_ipv4_gids(struct ocrdma_dev *dev,
+ struct net_device *net)
+{
+ struct in_device *in_dev;
+ union ib_gid gid;
+ in_dev = in_dev_get(net);
+ if (in_dev) {
+ for_ifa(in_dev) {
+ ipv6_addr_set_v4mapped(ifa->ifa_address,
+ (struct in6_addr *)&gid);
+ ocrdma_add_sgid(dev, &gid);
+ }
+ endfor_ifa(in_dev);
+ in_dev_put(in_dev);
+ }
+}
+
+static void ocrdma_init_ipv6_gids(struct ocrdma_dev *dev,
+ struct net_device *net)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_dev *in6_dev;
+ union ib_gid *pgid;
+ struct inet6_ifaddr *ifp;
+ in6_dev = in6_dev_get(net);
+ if (in6_dev) {
+ read_lock_bh(&in6_dev->lock);
+ list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
+ pgid = (union ib_gid *)&ifp->addr;
+ ocrdma_add_sgid(dev, pgid);
+ }
+ read_unlock_bh(&in6_dev->lock);
+ in6_dev_put(in6_dev);
+ }
+#endif
+}
+
+static void ocrdma_init_gid_table(struct ocrdma_dev *dev)
+{
+ struct net_device *net_dev;
+
+ for_each_netdev(&init_net, net_dev) {
+ struct net_device *real_dev = rdma_vlan_dev_real_dev(net_dev) ?
+ rdma_vlan_dev_real_dev(net_dev) : net_dev;
+
+ if (real_dev == dev->nic_info.netdev) {
+ ocrdma_init_ipv4_gids(dev, net_dev);
+ ocrdma_init_ipv6_gids(dev, net_dev);
+ }
+ }
+}
+
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
int status = 0, i;
@@ -399,6 +467,8 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
+ ocrdma_init_service_level(dev);
+ ocrdma_init_gid_table(dev);
status = ocrdma_register_device(dev);
if (status)
goto alloc_err;
@@ -508,6 +578,12 @@ static int ocrdma_close(struct ocrdma_dev *dev)
return 0;
}
+static void ocrdma_shutdown(struct ocrdma_dev *dev)
+{
+ ocrdma_close(dev);
+ ocrdma_remove(dev);
+}
+
/* event handling via NIC driver ensures that all the NIC specific
* initialization done before RoCE driver notifies
* event to stack.
@@ -521,6 +597,9 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
case BE_DEV_DOWN:
ocrdma_close(dev);
break;
+ case BE_DEV_SHUTDOWN:
+ ocrdma_shutdown(dev);
+ break;
}
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 96c9ee602ba4..904989ec5eaa 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -44,35 +44,39 @@ enum {
#define OCRDMA_SUBSYS_ROCE 10
enum {
OCRDMA_CMD_QUERY_CONFIG = 1,
- OCRDMA_CMD_ALLOC_PD,
- OCRDMA_CMD_DEALLOC_PD,
-
- OCRDMA_CMD_CREATE_AH_TBL,
- OCRDMA_CMD_DELETE_AH_TBL,
-
- OCRDMA_CMD_CREATE_QP,
- OCRDMA_CMD_QUERY_QP,
- OCRDMA_CMD_MODIFY_QP,
- OCRDMA_CMD_DELETE_QP,
-
- OCRDMA_CMD_RSVD1,
- OCRDMA_CMD_ALLOC_LKEY,
- OCRDMA_CMD_DEALLOC_LKEY,
- OCRDMA_CMD_REGISTER_NSMR,
- OCRDMA_CMD_REREGISTER_NSMR,
- OCRDMA_CMD_REGISTER_NSMR_CONT,
- OCRDMA_CMD_QUERY_NSMR,
- OCRDMA_CMD_ALLOC_MW,
- OCRDMA_CMD_QUERY_MW,
-
- OCRDMA_CMD_CREATE_SRQ,
- OCRDMA_CMD_QUERY_SRQ,
- OCRDMA_CMD_MODIFY_SRQ,
- OCRDMA_CMD_DELETE_SRQ,
-
- OCRDMA_CMD_ATTACH_MCAST,
- OCRDMA_CMD_DETACH_MCAST,
- OCRDMA_CMD_GET_RDMA_STATS,
+ OCRDMA_CMD_ALLOC_PD = 2,
+ OCRDMA_CMD_DEALLOC_PD = 3,
+
+ OCRDMA_CMD_CREATE_AH_TBL = 4,
+ OCRDMA_CMD_DELETE_AH_TBL = 5,
+
+ OCRDMA_CMD_CREATE_QP = 6,
+ OCRDMA_CMD_QUERY_QP = 7,
+ OCRDMA_CMD_MODIFY_QP = 8 ,
+ OCRDMA_CMD_DELETE_QP = 9,
+
+ OCRDMA_CMD_RSVD1 = 10,
+ OCRDMA_CMD_ALLOC_LKEY = 11,
+ OCRDMA_CMD_DEALLOC_LKEY = 12,
+ OCRDMA_CMD_REGISTER_NSMR = 13,
+ OCRDMA_CMD_REREGISTER_NSMR = 14,
+ OCRDMA_CMD_REGISTER_NSMR_CONT = 15,
+ OCRDMA_CMD_QUERY_NSMR = 16,
+ OCRDMA_CMD_ALLOC_MW = 17,
+ OCRDMA_CMD_QUERY_MW = 18,
+
+ OCRDMA_CMD_CREATE_SRQ = 19,
+ OCRDMA_CMD_QUERY_SRQ = 20,
+ OCRDMA_CMD_MODIFY_SRQ = 21,
+ OCRDMA_CMD_DELETE_SRQ = 22,
+
+ OCRDMA_CMD_ATTACH_MCAST = 23,
+ OCRDMA_CMD_DETACH_MCAST = 24,
+
+ OCRDMA_CMD_CREATE_RBQ = 25,
+ OCRDMA_CMD_DESTROY_RBQ = 26,
+
+ OCRDMA_CMD_GET_RDMA_STATS = 27,
OCRDMA_CMD_MAX
};
@@ -103,7 +107,7 @@ enum {
#define OCRDMA_MAX_QP 2048
#define OCRDMA_MAX_CQ 2048
-#define OCRDMA_MAX_STAG 8192
+#define OCRDMA_MAX_STAG 16384
enum {
OCRDMA_DB_RQ_OFFSET = 0xE0,
@@ -422,7 +426,12 @@ struct ocrdma_ae_qp_mcqe {
#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
-#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3
+
+enum ocrdma_async_grp5_events {
+ OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
+ OCRDMA_ASYNC_EVENT_COS_VALUE = 0x02,
+ OCRDMA_ASYNC_EVENT_PVID_STATE = 0x03
+};
enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_CQ_ERROR = 0x00,
@@ -525,8 +534,8 @@ struct ocrdma_mbx_query_config {
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
- u32 max_mr_size_lo;
u32 max_mr_size_hi;
+ u32 max_mr_size_lo;
u32 max_num_mr_pbl;
u32 max_mw;
u32 max_fmr;
@@ -580,17 +589,26 @@ enum {
OCRDMA_FN_MODE_RDMA = 0x4
};
+enum {
+ OCRDMA_IF_TYPE_MASK = 0xFFFF0000,
+ OCRDMA_IF_TYPE_SHIFT = 0x10,
+ OCRDMA_PHY_TYPE_MASK = 0x0000FFFF,
+ OCRDMA_FUTURE_DETAILS_MASK = 0xFFFF0000,
+ OCRDMA_FUTURE_DETAILS_SHIFT = 0x10,
+ OCRDMA_EX_PHY_DETAILS_MASK = 0x0000FFFF,
+ OCRDMA_FSPEED_SUPP_MASK = 0xFFFF0000,
+ OCRDMA_FSPEED_SUPP_SHIFT = 0x10,
+ OCRDMA_ASPEED_SUPP_MASK = 0x0000FFFF
+};
+
struct ocrdma_get_phy_info_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
- u16 phy_type;
- u16 interface_type;
+ u32 ityp_ptyp;
u32 misc_params;
- u16 ext_phy_details;
- u16 rsvd;
- u16 auto_speeds_supported;
- u16 fixed_speeds_supported;
+ u32 ftrdtl_exphydtl;
+ u32 fspeed_aspeed;
u32 future_use[2];
};
@@ -603,19 +621,34 @@ enum {
OCRDMA_PHY_SPEED_40GBPS = 0x20
};
+enum {
+ OCRDMA_PORT_NUM_MASK = 0x3F,
+ OCRDMA_PT_MASK = 0xC0,
+ OCRDMA_PT_SHIFT = 0x6,
+ OCRDMA_LINK_DUP_MASK = 0x0000FF00,
+ OCRDMA_LINK_DUP_SHIFT = 0x8,
+ OCRDMA_PHY_PS_MASK = 0x00FF0000,
+ OCRDMA_PHY_PS_SHIFT = 0x10,
+ OCRDMA_PHY_PFLT_MASK = 0xFF000000,
+ OCRDMA_PHY_PFLT_SHIFT = 0x18,
+ OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
+ OCRDMA_QOS_LNKSP_SHIFT = 0x10,
+ OCRDMA_LLST_MASK = 0xFF,
+ OCRDMA_PLFC_MASK = 0x00000400,
+ OCRDMA_PLFC_SHIFT = 0x8,
+ OCRDMA_PLRFC_MASK = 0x00000200,
+ OCRDMA_PLRFC_SHIFT = 0x8,
+ OCRDMA_PLTFC_MASK = 0x00000100,
+ OCRDMA_PLTFC_SHIFT = 0x8
+};
struct ocrdma_get_link_speed_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
- u8 pt_port_num;
- u8 link_duplex;
- u8 phys_port_speed;
- u8 phys_port_fault;
- u16 rsvd1;
- u16 qos_lnk_speed;
- u8 logical_lnk_status;
- u8 rsvd2[3];
+ u32 pflt_pps_ld_pnum;
+ u32 qos_lsp;
+ u32 res_lls;
};
enum {
@@ -666,8 +699,7 @@ struct ocrdma_create_cq_cmd {
u32 pgsz_pgcnt;
u32 ev_cnt_flags;
u32 eqn;
- u16 cqe_count;
- u16 pd_id;
+ u32 pdid_cqecnt;
u32 rsvd6;
struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
};
@@ -678,6 +710,10 @@ struct ocrdma_create_cq {
};
enum {
+ OCRDMA_CREATE_CQ_CMD_PDID_SHIFT = 0x10
+};
+
+enum {
OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
};
@@ -1231,7 +1267,6 @@ struct ocrdma_destroy_srq {
enum {
OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16),
- OCRDMA_PD_MAX_DPP_ENABLED_QP = 8,
OCRDMA_DPP_PAGE_SIZE = 4096
};
@@ -1896,12 +1931,62 @@ struct ocrdma_rdma_stats_resp {
struct ocrdma_rx_dbg_stats rx_dbg_stats;
} __packed;
+enum {
+ OCRDMA_HBA_ATTRB_EPROM_VER_LO_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_EPROM_VER_HI_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_EPROM_VER_HI_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_CDBLEN_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_ASIC_REV_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_ASIC_REV_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_GUID0_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_GUID0_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_GUID13_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_GUID14_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_GUID14_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_GUID15_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_GUID15_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCNT_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_PCNT_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_LDTOUT_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_ISCSI_VER_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_ISCSI_VER_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_MFUNC_DEV_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_MFUNC_DEV_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_CV_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_HBA_ST_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_HBA_ST_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_MAX_DOMS_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_MAX_DOMS_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PTNUM_MASK = 0x3F000000,
+ OCRDMA_HBA_ATTRB_PTNUM_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_PT_MASK = 0xC0000000,
+ OCRDMA_HBA_ATTRB_PT_SHIFT = 0x1E,
+ OCRDMA_HBA_ATTRB_ISCSI_FET_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_ASIC_GEN_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_ASIC_GEN_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_PCI_VID_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_PCI_DID_MASK = 0xFFFF0000,
+ OCRDMA_HBA_ATTRB_PCI_DID_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCI_SVID_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_PCI_SSID_MASK = 0xFFFF0000,
+ OCRDMA_HBA_ATTRB_PCI_SSID_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCI_BUSNUM_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_PCI_DEVNUM_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_PCI_DEVNUM_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_PCI_FUNCNUM_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_PCI_FUNCNUM_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_IF_TYPE_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_IF_TYPE_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_NETFIL_MASK =0xFF
+};
struct mgmt_hba_attribs {
u8 flashrom_version_string[32];
u8 manufacturer_name[32];
u32 supported_modes;
- u32 rsvd0[3];
+ u32 rsvd_eprom_verhi_verlo;
+ u32 mbx_ds_ver;
+ u32 epfw_ds_ver;
u8 ncsi_ver_string[12];
u32 default_extended_timeout;
u8 controller_model_number[32];
@@ -1914,34 +1999,26 @@ struct mgmt_hba_attribs {
u8 driver_version_string[32];
u8 fw_on_flash_version_string[32];
u32 functionalities_supported;
- u16 max_cdblength;
- u8 asic_revision;
- u8 generational_guid[16];
- u8 hba_port_count;
- u16 default_link_down_timeout;
- u8 iscsi_ver_min_max;
- u8 multifunction_device;
- u8 cache_valid;
- u8 hba_status;
- u8 max_domains_supported;
- u8 phy_port;
+ u32 guid0_asicrev_cdblen;
+ u8 generational_guid[12];
+ u32 portcnt_guid15;
+ u32 mfuncdev_iscsi_ldtout;
+ u32 ptpnum_maxdoms_hbast_cv;
u32 firmware_post_status;
u32 hba_mtu[8];
- u32 rsvd1[4];
+ u32 res_asicgen_iscsi_feaures;
+ u32 rsvd1[3];
};
struct mgmt_controller_attrib {
struct mgmt_hba_attribs hba_attribs;
- u16 pci_vendor_id;
- u16 pci_device_id;
- u16 pci_sub_vendor_id;
- u16 pci_sub_system_id;
- u8 pci_bus_number;
- u8 pci_device_number;
- u8 pci_function_number;
- u8 interface_type;
- u64 unique_identifier;
- u32 rsvd0[5];
+ u32 pci_did_vid;
+ u32 pci_ssid_svid;
+ u32 ityp_fnum_devnum_bnum;
+ u32 uid_hi;
+ u32 uid_lo;
+ u32 res_nnetfil;
+ u32 rsvd0[4];
};
struct ocrdma_get_ctrl_attribs_rsp {
@@ -1949,5 +2026,79 @@ struct ocrdma_get_ctrl_attribs_rsp {
struct mgmt_controller_attrib ctrl_attribs;
};
+#define OCRDMA_SUBSYS_DCBX 0x10
+
+enum OCRDMA_DCBX_OPCODE {
+ OCRDMA_CMD_GET_DCBX_CONFIG = 0x01
+};
+
+enum OCRDMA_DCBX_PARAM_TYPE {
+ OCRDMA_PARAMETER_TYPE_ADMIN = 0x00,
+ OCRDMA_PARAMETER_TYPE_OPER = 0x01,
+ OCRDMA_PARAMETER_TYPE_PEER = 0x02
+};
+
+enum OCRDMA_DCBX_APP_PROTO {
+ OCRDMA_APP_PROTO_ROCE = 0x8915
+};
+
+enum OCRDMA_DCBX_PROTO {
+ OCRDMA_PROTO_SELECT_L2 = 0x00,
+ OCRDMA_PROTO_SELECT_L4 = 0x01
+};
+
+enum OCRDMA_DCBX_APP_PARAM {
+ OCRDMA_APP_PARAM_APP_PROTO_MASK = 0xFFFF,
+ OCRDMA_APP_PARAM_PROTO_SEL_MASK = 0xFF,
+ OCRDMA_APP_PARAM_PROTO_SEL_SHIFT = 0x10,
+ OCRDMA_APP_PARAM_VALID_MASK = 0xFF,
+ OCRDMA_APP_PARAM_VALID_SHIFT = 0x18
+};
+
+enum OCRDMA_DCBX_STATE_FLAGS {
+ OCRDMA_STATE_FLAG_ENABLED = 0x01,
+ OCRDMA_STATE_FLAG_ADDVERTISED = 0x02,
+ OCRDMA_STATE_FLAG_WILLING = 0x04,
+ OCRDMA_STATE_FLAG_SYNC = 0x08,
+ OCRDMA_STATE_FLAG_UNSUPPORTED = 0x40000000,
+ OCRDMA_STATE_FLAG_NEG_FAILD = 0x80000000
+};
+
+enum OCRDMA_TCV_AEV_OPV_ST {
+ OCRDMA_DCBX_TC_SUPPORT_MASK = 0xFF,
+ OCRDMA_DCBX_TC_SUPPORT_SHIFT = 0x18,
+ OCRDMA_DCBX_APP_ENTRY_SHIFT = 0x10,
+ OCRDMA_DCBX_OP_PARAM_SHIFT = 0x08,
+ OCRDMA_DCBX_STATE_MASK = 0xFF
+};
+
+struct ocrdma_app_parameter {
+ u32 valid_proto_app;
+ u32 oui;
+ u32 app_prio[2];
+};
+
+struct ocrdma_dcbx_cfg {
+ u32 tcv_aev_opv_st;
+ u32 tc_state;
+ u32 pfc_state;
+ u32 qcn_state;
+ u32 appl_state;
+ u32 ll_state;
+ u32 tc_bw[2];
+ u32 tc_prio[8];
+ u32 pfc_prio[2];
+ struct ocrdma_app_parameter app_param[15];
+};
+
+struct ocrdma_get_dcbx_cfg_req {
+ struct ocrdma_mbx_hdr hdr;
+ u32 param_type;
+} __packed;
+
+struct ocrdma_get_dcbx_cfg_rsp {
+ struct ocrdma_mbx_rsp hdr;
+ struct ocrdma_dcbx_cfg cfg;
+} __packed;
#endif /* __OCRDMA_SLI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 8bae8718fa53..e8b8569788c0 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -69,11 +69,11 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
- attr->max_mr_size = ~0ull;
+ attr->max_mr_size = dev->attr.max_mr_size;
attr->page_size_cap = 0xffff000;
attr->vendor_id = dev->nic_info.pdev->vendor;
attr->vendor_part_id = dev->nic_info.pdev->device;
- attr->hw_ver = 0;
+ attr->hw_ver = dev->asic_id;
attr->max_qp = dev->attr.max_qp;
attr->max_ah = OCRDMA_MAX_AH;
attr->max_qp_wr = dev->attr.max_wqe;
@@ -101,7 +101,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->max_srq_sge = dev->attr.max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
- attr->max_fast_reg_page_list_len = 0;
+ attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
attr->max_pkeys = 1;
return 0;
}
@@ -268,7 +268,8 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
pd->dpp_enabled =
ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
pd->num_dpp_qp =
- pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ pd->dpp_enabled ? (dev->nic_info.db_page_size /
+ dev->attr.wqe_size) : 0;
}
retry:
@@ -328,7 +329,10 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
struct ocrdma_pd *pd = uctx->cntxt_pd;
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
- BUG_ON(uctx->pd_in_use);
+ if (uctx->pd_in_use) {
+ pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
+ __func__, dev->id, pd->id);
+ }
uctx->cntxt_pd = NULL;
status = _ocrdma_dealloc_pd(dev, pd);
return status;
@@ -843,6 +847,13 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
+
+ /* Don't stop cleanup, in case FW is unresponsive */
+ if (dev->mqe_ctx.fw_error_state) {
+ status = 0;
+ pr_err("%s(%d) fw not responding.\n",
+ __func__, dev->id);
+ }
return status;
}
@@ -2054,6 +2065,13 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
while (wr) {
+ if (qp->qp_type == IB_QPT_UD &&
+ (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)) {
+ *bad_wr = wr;
+ status = -EINVAL;
+ break;
+ }
if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
wr->num_sge > qp->sq.max_sges) {
*bad_wr = wr;
@@ -2488,6 +2506,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
*stop = true;
expand = false;
}
+ } else if (is_hw_sq_empty(qp)) {
+ /* Do nothing */
+ expand = false;
+ *polled = false;
+ *stop = false;
} else {
*polled = true;
expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
@@ -2593,6 +2616,11 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
*stop = true;
expand = false;
}
+ } else if (is_hw_rq_empty(qp)) {
+ /* Do nothing */
+ expand = false;
+ *polled = false;
+ *stop = false;
} else {
*polled = true;
expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
@@ -2818,11 +2846,9 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
if (cq->first_arm) {
ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
cq->first_arm = false;
- goto skip_defer;
}
- cq->deferred_arm = true;
-skip_defer:
+ cq->deferred_arm = true;
cq->deferred_sol = sol_needed;
spin_unlock_irqrestore(&cq->cq_lock, flags);
diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c
index 799a0c3bffc4..6abd3ed3cd51 100644
--- a/drivers/infiniband/hw/qib/qib_debugfs.c
+++ b/drivers/infiniband/hw/qib/qib_debugfs.c
@@ -193,6 +193,7 @@ static void *_qp_stats_seq_start(struct seq_file *s, loff_t *pos)
struct qib_qp_iter *iter;
loff_t n = *pos;
+ rcu_read_lock();
iter = qib_qp_iter_init(s->private);
if (!iter)
return NULL;
@@ -224,7 +225,7 @@ static void *_qp_stats_seq_next(struct seq_file *s, void *iter_ptr,
static void _qp_stats_seq_stop(struct seq_file *s, void *iter_ptr)
{
- /* nothing for now */
+ rcu_read_unlock();
}
static int _qp_stats_seq_show(struct seq_file *s, void *iter_ptr)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 8d3c78ddc906..729da39c49ed 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1222,7 +1222,7 @@ static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
-static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
+static const struct pci_device_id qib_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 22c720e5740d..636be117b578 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -2476,7 +2476,7 @@ int qib_create_agents(struct qib_ibdev *dev)
ibp = &dd->pport[p].ibport_data;
agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI,
NULL, 0, send_handler,
- NULL, NULL);
+ NULL, NULL, 0);
if (IS_ERR(agent)) {
ret = PTR_ERR(agent);
goto err;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 7fcc150d603c..6ddc0264aad2 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -1325,7 +1325,6 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
struct qib_qp *pqp = iter->qp;
struct qib_qp *qp;
- rcu_read_lock();
for (; n < dev->qp_table_size; n++) {
if (pqp)
qp = rcu_dereference(pqp->next);
@@ -1333,18 +1332,11 @@ int qib_qp_iter_next(struct qib_qp_iter *iter)
qp = rcu_dereference(dev->qp_table[n]);
pqp = qp;
if (qp) {
- if (iter->qp)
- atomic_dec(&iter->qp->refcount);
- atomic_inc(&qp->refcount);
- rcu_read_unlock();
iter->qp = qp;
iter->n = n;
return 0;
}
}
- rcu_read_unlock();
- if (iter->qp)
- atomic_dec(&iter->qp->refcount);
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 2bc1d2b96298..74f90b2619f6 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -52,7 +52,7 @@ static void __qib_release_user_pages(struct page **p, size_t num_pages,
* Call with current->mm->mmap_sem held.
*/
static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
- struct page **p, struct vm_area_struct **vma)
+ struct page **p)
{
unsigned long lock_limit;
size_t got;
@@ -69,7 +69,7 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
num_pages - got, 1, 1,
- p + got, vma);
+ p + got, NULL);
if (ret < 0)
goto bail_release;
}
@@ -136,7 +136,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_write(&current->mm->mmap_sem);
- ret = __qib_get_user_pages(start_page, num_pages, p, NULL);
+ ret = __qib_get_user_pages(start_page, num_pages, p);
up_write(&current->mm->mmap_sem);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index fb6d026f92cd..0d0f98695d53 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -490,7 +490,7 @@ out:
/* Start of PCI section */
-static DEFINE_PCI_DEVICE_TABLE(usnic_ib_pci_ids) = {
+static const struct pci_device_id usnic_ib_pci_ids[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
{0,}
};