summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mana
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mana')
-rw-r--r--drivers/infiniband/hw/mana/counters.c78
-rw-r--r--drivers/infiniband/hw/mana/counters.h18
-rw-r--r--drivers/infiniband/hw/mana/cq.c4
-rw-r--r--drivers/infiniband/hw/mana/device.c232
-rw-r--r--drivers/infiniband/hw/mana/main.c103
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h37
-rw-r--r--drivers/infiniband/hw/mana/mr.c37
-rw-r--r--drivers/infiniband/hw/mana/qp.c7
8 files changed, 360 insertions, 156 deletions
diff --git a/drivers/infiniband/hw/mana/counters.c b/drivers/infiniband/hw/mana/counters.c
index e533ce21013d..e964e74be48d 100644
--- a/drivers/infiniband/hw/mana/counters.c
+++ b/drivers/infiniband/hw/mana/counters.c
@@ -32,8 +32,32 @@ static const struct rdma_stat_desc mana_ib_port_stats_desc[] = {
[MANA_IB_RATE_INC_EVENTS].name = "rate_inc_events",
[MANA_IB_NUM_QPS_RECOVERED].name = "num_qps_recovered",
[MANA_IB_CURRENT_RATE].name = "current_rate",
+ [MANA_IB_DUP_RX_REQ].name = "dup_rx_requests",
+ [MANA_IB_TX_BYTES].name = "tx_bytes",
+ [MANA_IB_RX_BYTES].name = "rx_bytes",
+ [MANA_IB_RX_SEND_REQ].name = "rx_send_requests",
+ [MANA_IB_RX_WRITE_REQ].name = "rx_write_requests",
+ [MANA_IB_RX_READ_REQ].name = "rx_read_requests",
+ [MANA_IB_TX_PKT].name = "tx_packets",
+ [MANA_IB_RX_PKT].name = "rx_packets",
};
+static const struct rdma_stat_desc mana_ib_device_stats_desc[] = {
+ [MANA_IB_SENT_CNPS].name = "sent_cnps",
+ [MANA_IB_RECEIVED_ECNS].name = "received_ecns",
+ [MANA_IB_RECEIVED_CNP_COUNT].name = "received_cnp_count",
+ [MANA_IB_QP_CONGESTED_EVENTS].name = "qp_congested_events",
+ [MANA_IB_QP_RECOVERED_EVENTS].name = "qp_recovered_events",
+ [MANA_IB_DEV_RATE_INC_EVENTS].name = "rate_inc_events",
+};
+
+struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ return rdma_alloc_hw_stats_struct(mana_ib_device_stats_desc,
+ ARRAY_SIZE(mana_ib_device_stats_desc),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
@@ -42,8 +66,39 @@ struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
-int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
- u32 port_num, int index)
+static int mana_ib_get_hw_device_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
+ ib_dev);
+ struct mana_rnic_query_device_cntrs_resp resp = {};
+ struct mana_rnic_query_device_cntrs_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_DEVICE_COUNTERS,
+ sizeof(req), sizeof(resp));
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
+ req.adapter = mdev->adapter_handle;
+
+ err = mana_gd_send_request(mdev_to_gc(mdev), sizeof(req), &req,
+ sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&mdev->ib_dev, "Failed to query device counters err %d",
+ err);
+ return err;
+ }
+
+ stats->value[MANA_IB_SENT_CNPS] = resp.sent_cnps;
+ stats->value[MANA_IB_RECEIVED_ECNS] = resp.received_ecns;
+ stats->value[MANA_IB_RECEIVED_CNP_COUNT] = resp.received_cnp_count;
+ stats->value[MANA_IB_QP_CONGESTED_EVENTS] = resp.qp_congested_events;
+ stats->value[MANA_IB_QP_RECOVERED_EVENTS] = resp.qp_recovered_events;
+ stats->value[MANA_IB_DEV_RATE_INC_EVENTS] = resp.rate_inc_events;
+
+ return ARRAY_SIZE(mana_ib_device_stats_desc);
+}
+
+static int mana_ib_get_hw_port_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev,
ib_dev);
@@ -53,6 +108,7 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
mana_gd_init_req_hdr(&req.hdr, MANA_IB_QUERY_VF_COUNTERS,
sizeof(req), sizeof(resp));
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
@@ -101,5 +157,23 @@ int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
stats->value[MANA_IB_NUM_QPS_RECOVERED] = resp.num_qps_recovered;
stats->value[MANA_IB_CURRENT_RATE] = resp.current_rate;
+ stats->value[MANA_IB_DUP_RX_REQ] = resp.dup_rx_req;
+ stats->value[MANA_IB_TX_BYTES] = resp.tx_bytes;
+ stats->value[MANA_IB_RX_BYTES] = resp.rx_bytes;
+ stats->value[MANA_IB_RX_SEND_REQ] = resp.rx_send_req;
+ stats->value[MANA_IB_RX_WRITE_REQ] = resp.rx_write_req;
+ stats->value[MANA_IB_RX_READ_REQ] = resp.rx_read_req;
+ stats->value[MANA_IB_TX_PKT] = resp.tx_pkt;
+ stats->value[MANA_IB_RX_PKT] = resp.rx_pkt;
+
return ARRAY_SIZE(mana_ib_port_stats_desc);
}
+
+int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ if (!port_num)
+ return mana_ib_get_hw_device_stats(ibdev, stats);
+ else
+ return mana_ib_get_hw_port_stats(ibdev, stats, port_num);
+}
diff --git a/drivers/infiniband/hw/mana/counters.h b/drivers/infiniband/hw/mana/counters.h
index 7ff92d27f6c3..f68e776bb41d 100644
--- a/drivers/infiniband/hw/mana/counters.h
+++ b/drivers/infiniband/hw/mana/counters.h
@@ -35,10 +35,28 @@ enum mana_ib_port_counters {
MANA_IB_RATE_INC_EVENTS,
MANA_IB_NUM_QPS_RECOVERED,
MANA_IB_CURRENT_RATE,
+ MANA_IB_DUP_RX_REQ,
+ MANA_IB_TX_BYTES,
+ MANA_IB_RX_BYTES,
+ MANA_IB_RX_SEND_REQ,
+ MANA_IB_RX_WRITE_REQ,
+ MANA_IB_RX_READ_REQ,
+ MANA_IB_TX_PKT,
+ MANA_IB_RX_PKT,
+};
+
+enum mana_ib_device_counters {
+ MANA_IB_SENT_CNPS,
+ MANA_IB_RECEIVED_ECNS,
+ MANA_IB_RECEIVED_CNP_COUNT,
+ MANA_IB_QP_CONGESTED_EVENTS,
+ MANA_IB_QP_RECOVERED_EVENTS,
+ MANA_IB_DEV_RATE_INC_EVENTS,
};
struct rdma_hw_stats *mana_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num);
+struct rdma_hw_stats *mana_ib_alloc_hw_device_stats(struct ib_device *ibdev);
int mana_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
u32 port_num, int index);
#endif /* _COUNTERS_H_ */
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 0fc4e2679218..28e154bbb50f 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -15,14 +15,12 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_device *ibdev = ibcq->device;
struct mana_ib_create_cq ucmd = {};
struct mana_ib_dev *mdev;
- struct gdma_context *gc;
bool is_rnic_cq;
u32 doorbell;
u32 buf_size;
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev_to_gc(mdev);
cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
cq->cq_handle = INVALID_MANA_HANDLE;
@@ -65,7 +63,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
ibdev_dbg(ibdev, "Failed to create kernel queue for create cq, %d\n", err);
return err;
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
}
if (is_rnic_cq) {
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index b31089320aa5..fa60872f169f 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -65,6 +65,10 @@ static const struct ib_device_ops mana_ib_stats_ops = {
.get_hw_stats = mana_ib_get_hw_stats,
};
+static const struct ib_device_ops mana_ib_device_stats_ops = {
+ .alloc_hw_device_stats = mana_ib_alloc_hw_device_stats,
+};
+
static int mana_ib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -73,131 +77,131 @@ static int mana_ib_netdev_event(struct notifier_block *this,
struct gdma_context *gc = dev->gdma_dev->gdma_context;
struct mana_context *mc = gc->mana.driver_data;
struct net_device *ndev;
+ int i;
/* Only process events from our parent device */
- if (event_dev != mc->ports[0])
- return NOTIFY_DONE;
-
- switch (event) {
- case NETDEV_CHANGEUPPER:
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- /*
- * RDMA core will setup GID based on updated netdev.
- * It's not possible to race with the core as rtnl lock is being
- * held.
- */
- ib_device_set_netdev(&dev->ib_dev, ndev, 1);
-
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
-
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
+ for (i = 0; i < dev->ib_dev.phys_port_cnt; i++)
+ if (event_dev == mc->ports[i]) {
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
+ /*
+ * RDMA core will setup GID based on updated netdev.
+ * It's not possible to race with the core as rtnl lock is being
+ * held.
+ */
+ ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
+
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ if (ndev)
+ netdev_put(ndev, &dev->dev_tracker);
+
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+ }
+ return NOTIFY_DONE;
}
static int mana_ib_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mana_adev *madev = container_of(adev, struct mana_adev, adev);
+ struct gdma_context *gc = madev->mdev->gdma_context;
+ struct mana_context *mc = gc->mana.driver_data;
struct gdma_dev *mdev = madev->mdev;
struct net_device *ndev;
- struct mana_context *mc;
struct mana_ib_dev *dev;
u8 mac_addr[ETH_ALEN];
- int ret;
-
- mc = mdev->driver_data;
+ int ret, i;
dev = ib_alloc_device(mana_ib_dev, ib_dev);
if (!dev)
return -ENOMEM;
ib_set_device_ops(&dev->ib_dev, &mana_ib_dev_ops);
-
- dev->ib_dev.phys_port_cnt = mc->num_ports;
-
- ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
- mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
-
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
-
- /*
- * num_comp_vectors needs to set to the max MSIX index
- * when interrupts and event queues are implemented
- */
- dev->ib_dev.num_comp_vectors = mdev->gdma_context->max_num_queues;
- dev->ib_dev.dev.parent = mdev->gdma_context->dev;
-
- ndev = mana_get_primary_netdev(mc, 0, &dev->dev_tracker);
- if (!ndev) {
- ret = -ENODEV;
- ibdev_err(&dev->ib_dev, "Failed to get netdev for IB port 1");
- goto free_ib_device;
- }
- ether_addr_copy(mac_addr, ndev->dev_addr);
- addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, ndev->dev_addr);
- ret = ib_device_set_netdev(&dev->ib_dev, ndev, 1);
- /* mana_get_primary_netdev() returns ndev with refcount held */
- netdev_put(ndev, &dev->dev_tracker);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
- goto free_ib_device;
- }
-
- ret = mana_gd_register_device(&mdev->gdma_context->mana_ib);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register device, ret %d",
- ret);
- goto free_ib_device;
- }
- dev->gdma_dev = &mdev->gdma_context->mana_ib;
-
- dev->nb.notifier_call = mana_ib_netdev_event;
- ret = register_netdevice_notifier(&dev->nb);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d",
- ret);
- goto deregister_device;
- }
-
- ret = mana_ib_gd_query_adapter_caps(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d",
- ret);
- goto deregister_net_notifier;
- }
-
- ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
-
- ret = mana_ib_create_eqs(dev);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
- goto deregister_net_notifier;
- }
-
- ret = mana_ib_gd_create_rnic_adapter(dev);
- if (ret)
- goto destroy_eqs;
-
+ dev->ib_dev.num_comp_vectors = gc->max_num_queues;
+ dev->ib_dev.dev.parent = gc->dev;
+ dev->gdma_dev = mdev;
xa_init_flags(&dev->qp_table_wq, XA_FLAGS_LOCK_IRQ);
- ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
- if (ret) {
- ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d",
- ret);
- goto destroy_rnic;
+
+ if (mana_ib_is_rnic(dev)) {
+ dev->ib_dev.phys_port_cnt = 1;
+ addrconf_addr_eui48((u8 *)&dev->ib_dev.node_guid, mc->ports[0]->dev_addr);
+ ret = mana_ib_gd_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query device caps, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_stats_ops);
+ if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT)
+ ib_set_device_ops(&dev->ib_dev, &mana_ib_device_stats_ops);
+
+ ret = mana_ib_create_eqs(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to create EQs, ret %d", ret);
+ goto free_ib_device;
+ }
+
+ ret = mana_ib_gd_create_rnic_adapter(dev);
+ if (ret)
+ goto destroy_eqs;
+
+ if (dev->adapter_caps.feature_flags & MANA_IB_FEATURE_MULTI_PORTS_SUPPORT)
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+
+ for (i = 0; i < dev->ib_dev.phys_port_cnt; i++) {
+ ndev = mana_get_primary_netdev(mc, i, &dev->dev_tracker);
+ if (!ndev) {
+ ret = -ENODEV;
+ ibdev_err(&dev->ib_dev,
+ "Failed to get netdev for IB port %d", i + 1);
+ goto destroy_rnic;
+ }
+ ether_addr_copy(mac_addr, ndev->dev_addr);
+ ret = ib_device_set_netdev(&dev->ib_dev, ndev, i + 1);
+ /* mana_get_primary_netdev() returns ndev with refcount held */
+ netdev_put(ndev, &dev->dev_tracker);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to set ib netdev, ret %d", ret);
+ goto destroy_rnic;
+ }
+ ret = mana_ib_gd_config_mac(dev, ADDR_OP_ADD, mac_addr);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to add Mac address, ret %d", ret);
+ goto destroy_rnic;
+ }
+ }
+ dev->nb.notifier_call = mana_ib_netdev_event;
+ ret = register_netdevice_notifier(&dev->nb);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to register net notifier, %d", ret);
+ goto destroy_rnic;
+ }
+ } else {
+ dev->ib_dev.phys_port_cnt = mc->num_ports;
+ ret = mana_eth_query_adapter_caps(dev);
+ if (ret) {
+ ibdev_err(&dev->ib_dev, "Failed to query ETH device caps, ret %d", ret);
+ goto free_ib_device;
+ }
}
- dev->av_pool = dma_pool_create("mana_ib_av", mdev->gdma_context->dev,
- MANA_AV_BUFFER_SIZE, MANA_AV_BUFFER_SIZE, 0);
+ dev->av_pool = dma_pool_create("mana_ib_av", gc->dev, MANA_AV_BUFFER_SIZE,
+ MANA_AV_BUFFER_SIZE, 0);
if (!dev->av_pool) {
ret = -ENOMEM;
- goto destroy_rnic;
+ goto deregister_net_notifier;
}
- ret = ib_register_device(&dev->ib_dev, "mana_%d",
- mdev->gdma_context->dev);
+ ibdev_dbg(&dev->ib_dev, "mdev=%p id=%d num_ports=%d\n", mdev,
+ mdev->dev_id.as_uint32, dev->ib_dev.phys_port_cnt);
+
+ ret = ib_register_device(&dev->ib_dev, mana_ib_is_rnic(dev) ? "mana_%d" : "manae_%d",
+ gc->dev);
if (ret)
goto deallocate_pool;
@@ -207,16 +211,17 @@ static int mana_ib_probe(struct auxiliary_device *adev,
deallocate_pool:
dma_pool_destroy(dev->av_pool);
+deregister_net_notifier:
+ if (mana_ib_is_rnic(dev))
+ unregister_netdevice_notifier(&dev->nb);
destroy_rnic:
- xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_gd_destroy_rnic_adapter(dev);
destroy_eqs:
- mana_ib_destroy_eqs(dev);
-deregister_net_notifier:
- unregister_netdevice_notifier(&dev->nb);
-deregister_device:
- mana_gd_deregister_device(dev->gdma_dev);
+ if (mana_ib_is_rnic(dev))
+ mana_ib_destroy_eqs(dev);
free_ib_device:
+ xa_destroy(&dev->qp_table_wq);
ib_dealloc_device(&dev->ib_dev);
return ret;
}
@@ -227,25 +232,24 @@ static void mana_ib_remove(struct auxiliary_device *adev)
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
+ if (mana_ib_is_rnic(dev)) {
+ unregister_netdevice_notifier(&dev->nb);
+ mana_ib_gd_destroy_rnic_adapter(dev);
+ mana_ib_destroy_eqs(dev);
+ }
xa_destroy(&dev->qp_table_wq);
- mana_ib_gd_destroy_rnic_adapter(dev);
- mana_ib_destroy_eqs(dev);
- unregister_netdevice_notifier(&dev->nb);
- mana_gd_deregister_device(dev->gdma_dev);
ib_dealloc_device(&dev->ib_dev);
}
static const struct auxiliary_device_id mana_id_table[] = {
- {
- .name = "mana.rdma",
- },
+ { .name = "mana.rdma", },
+ { .name = "mana.eth", },
{},
};
MODULE_DEVICE_TABLE(auxiliary, mana_id_table);
static struct auxiliary_driver mana_driver = {
- .name = "rdma",
.probe = mana_ib_probe,
.remove = mana_ib_remove,
.id_table = mana_id_table,
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index eda9c5b971de..6a2471f2e804 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -4,6 +4,7 @@
*/
#include "mana_ib.h"
+#include "linux/pci.h"
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
@@ -243,7 +244,6 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_queue_type type,
struct mana_ib_queue *queue)
{
- struct gdma_context *gc = mdev_to_gc(mdev);
struct gdma_queue_spec spec = {};
int err;
@@ -252,7 +252,7 @@ int mana_ib_create_kernel_queue(struct mana_ib_dev *mdev, u32 size, enum gdma_qu
spec.type = type;
spec.monitor_avl_buf = false;
spec.queue_size = size;
- err = mana_gd_create_mana_wq_cq(&gc->mana_ib, &spec, &queue->kmem);
+ err = mana_gd_create_mana_wq_cq(mdev->gdma_dev, &spec, &queue->kmem);
if (err)
return err;
/* take ownership into mana_ib from mana */
@@ -479,7 +479,7 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
{
unsigned long page_sz;
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ page_sz = ib_umem_find_best_pgsz(umem, dev->adapter_caps.page_size_cap, virt);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -494,7 +494,7 @@ int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_ume
unsigned long page_sz;
/* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ page_sz = ib_umem_find_best_pgoff(umem, dev->adapter_caps.page_size_cap, 0);
if (!page_sz) {
ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
return -EINVAL;
@@ -551,6 +551,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct ib_port_attr attr;
int err;
@@ -560,10 +561,18 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
immutable->pkey_tbl_len = attr.pkey_tbl_len;
immutable->gid_tbl_len = attr.gid_tbl_len;
- immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
- if (port_num == 1) {
- immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ if (mana_ib_is_rnic(dev)) {
+ if (port_num == 1) {
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP
+ | RDMA_CORE_CAP_ETH_AH;
+ immutable->max_mad_size = 0;
+ }
+ } else {
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
}
return 0;
@@ -572,12 +581,14 @@ int mana_ib_get_port_immutable(struct ib_device *ibdev, u32 port_num,
int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
- struct mana_ib_dev *dev = container_of(ibdev,
- struct mana_ib_dev, ib_dev);
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct pci_dev *pdev = to_pci_dev(mdev_to_gc(dev)->dev);
memset(props, 0, sizeof(*props));
+ props->vendor_id = pdev->vendor;
+ props->vendor_part_id = dev->gdma_dev->dev_id.type;
props->max_mr_size = MANA_IB_MAX_MR_SIZE;
- props->page_size_cap = PAGE_SZ_BM;
+ props->page_size_cap = dev->adapter_caps.page_size_cap;
props->max_qp = dev->adapter_caps.max_qp_count;
props->max_qp_wr = dev->adapter_caps.max_qp_wr;
props->device_cap_flags = IB_DEVICE_RC_RNR_NAK_GEN;
@@ -596,6 +607,8 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
props->max_ah = INT_MAX;
props->max_pkeys = 1;
props->local_ca_ack_delay = MANA_CA_ACK_DELAY;
+ if (!mana_ib_is_rnic(dev))
+ props->raw_packet_caps = IB_RAW_PACKET_CAP_IP_CSUM;
return 0;
}
@@ -603,6 +616,7 @@ int mana_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
int mana_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props)
{
+ struct mana_ib_dev *dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
struct net_device *ndev = mana_ib_get_netdev(ibdev, port);
if (!ndev)
@@ -623,10 +637,11 @@ int mana_ib_query_port(struct ib_device *ibdev, u32 port,
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_EDR;
props->pkey_tbl_len = 1;
- if (port == 1) {
+ if (mana_ib_is_rnic(dev)) {
props->gid_tbl_len = 16;
- props->port_cap_flags = IB_PORT_CM_SUP;
props->ip_gids = true;
+ if (port == 1)
+ props->port_cap_flags = IB_PORT_CM_SUP;
}
return 0;
@@ -696,6 +711,41 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
caps->max_recv_sge_count = resp.max_recv_sge_count;
caps->feature_flags = resp.feature_flags;
+ caps->page_size_cap = PAGE_SZ_BM;
+ if (mdev_to_gc(dev)->pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_GDMA_PAGES_4MB_1GB_2GB)
+ caps->page_size_cap |= (SZ_4M | SZ_1G | SZ_2G);
+
+ return 0;
+}
+
+int mana_eth_query_adapter_caps(struct mana_ib_dev *dev)
+{
+ struct mana_ib_adapter_caps *caps = &dev->adapter_caps;
+ struct gdma_query_max_resources_resp resp = {};
+ struct gdma_general_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, GDMA_QUERY_MAX_RESOURCES,
+ sizeof(req), sizeof(resp));
+
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp);
+ if (err) {
+ ibdev_err(&dev->ib_dev,
+ "Failed to query adapter caps err %d", err);
+ return err;
+ }
+
+ caps->max_qp_count = min_t(u32, resp.max_sq, resp.max_rq);
+ caps->max_cq_count = resp.max_cq;
+ caps->max_mr_count = resp.max_mst;
+ caps->max_pd_count = 0x6000;
+ caps->max_qp_wr = min_t(u32,
+ 0x100000 / GDMA_MAX_SQE_SIZE,
+ 0x100000 / GDMA_MAX_RQE_SIZE);
+ caps->max_send_sge_count = 30;
+ caps->max_recv_sge_count = 15;
+ caps->page_size_cap = PAGE_SZ_BM;
+
return 0;
}
@@ -740,7 +790,7 @@ int mana_ib_create_eqs(struct mana_ib_dev *mdev)
spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
spec.eq.msix_index = 0;
- err = mana_gd_create_mana_eq(&gc->mana_ib, &spec, &mdev->fatal_err_eq);
+ err = mana_gd_create_mana_eq(mdev->gdma_dev, &spec, &mdev->fatal_err_eq);
if (err)
return err;
@@ -791,7 +841,7 @@ int mana_ib_gd_create_rnic_adapter(struct mana_ib_dev *mdev)
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_ADAPTER, sizeof(req), sizeof(resp));
req.hdr.req.msg_version = GDMA_MESSAGE_V2;
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.notify_eq_id = mdev->fatal_err_eq->id;
if (mdev->adapter_caps.feature_flags & MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT)
@@ -816,7 +866,7 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
gc = mdev_to_gc(mdev);
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -843,7 +893,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_ADD;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -873,7 +923,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
}
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_IP_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = ADDR_OP_REMOVE;
req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
@@ -896,7 +946,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.op = op;
copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
@@ -917,8 +967,11 @@ int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 do
struct mana_rnic_create_cq_req req = {};
int err;
+ if (!mdev->eqs)
+ return -EINVAL;
+
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.gdma_region = cq->queue.gdma_region;
req.eq_id = mdev->eqs[cq->comp_vector]->id;
@@ -950,7 +1003,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
return 0;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_CQ, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.cq_handle = cq->cq_handle;
@@ -976,7 +1029,7 @@ int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1012,7 +1065,7 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.rc_qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
@@ -1035,7 +1088,7 @@ int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
int err, i;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_CREATE_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.pd_handle = pd->pd_handle;
req.send_cq_handle = send_cq->cq_handle;
@@ -1070,7 +1123,7 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6903946677e5..5d31034ac7fb 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -60,6 +60,7 @@ struct mana_ib_adapter_caps {
u32 max_recv_sge_count;
u32 max_inline_data_size;
u64 feature_flags;
+ u64 page_size_cap;
};
struct mana_ib_queue {
@@ -209,6 +210,7 @@ enum mana_ib_command_code {
MANA_IB_DESTROY_RC_QP = 0x3000b,
MANA_IB_SET_QP_STATE = 0x3000d,
MANA_IB_QUERY_VF_COUNTERS = 0x30022,
+ MANA_IB_QUERY_DEVICE_COUNTERS = 0x30023,
};
struct mana_ib_query_adapter_caps_req {
@@ -217,6 +219,8 @@ struct mana_ib_query_adapter_caps_req {
enum mana_ib_adapter_features {
MANA_IB_FEATURE_CLIENT_ERROR_CQE_SUPPORT = BIT(4),
+ MANA_IB_FEATURE_DEV_COUNTERS_SUPPORT = BIT(5),
+ MANA_IB_FEATURE_MULTI_PORTS_SUPPORT = BIT(6),
};
struct mana_ib_query_adapter_caps_resp {
@@ -513,6 +517,31 @@ struct mana_rnic_query_vf_cntrs_resp {
u64 rate_inc_events;
u64 num_qps_recovered;
u64 current_rate;
+ u64 dup_rx_req;
+ u64 tx_bytes;
+ u64 rx_bytes;
+ u64 rx_send_req;
+ u64 rx_write_req;
+ u64 rx_read_req;
+ u64 tx_pkt;
+ u64 rx_pkt;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_req {
+ struct gdma_req_hdr hdr;
+ mana_handle_t adapter;
+}; /* HW Data */
+
+struct mana_rnic_query_device_cntrs_resp {
+ struct gdma_resp_hdr hdr;
+ u32 sent_cnps;
+ u32 received_ecns;
+ u32 reserved1;
+ u32 received_cnp_count;
+ u32 qp_congested_events;
+ u32 qp_recovered_events;
+ u32 rate_inc_events;
+ u32 reserved2;
}; /* HW Data */
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
@@ -543,6 +572,11 @@ static inline void mana_put_qp_ref(struct mana_ib_qp *qp)
complete(&qp->free);
}
+static inline bool mana_ib_is_rnic(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->dev_id.type == GDMA_DEVICE_MANA_IB;
+}
+
static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
{
struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
@@ -599,6 +633,7 @@ struct ib_mr *mana_ib_get_dma_mr(struct ib_pd *ibpd, int access_flags);
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
@@ -642,6 +677,7 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
+int mana_eth_query_adapter_caps(struct mana_ib_dev *mdev);
int mana_ib_create_eqs(struct mana_ib_dev *mdev);
@@ -687,5 +723,6 @@ int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index f99557ec7767..55701046ffba 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -5,8 +5,8 @@
#include "mana_ib.h"
-#define VALID_MR_FLAGS \
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
+#define VALID_MR_FLAGS (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC | IB_ZERO_BASED)
#define VALID_DMA_MR_FLAGS (IB_ACCESS_LOCAL_WRITE)
@@ -24,6 +24,9 @@ mana_ib_verbs_to_gdma_access_flags(int access_flags)
if (access_flags & IB_ACCESS_REMOTE_READ)
flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= GDMA_ACCESS_FLAG_REMOTE_ATOMIC;
+
return flags;
}
@@ -48,7 +51,10 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
req.gva.virtual_address = mr_params->gva.virtual_address;
req.gva.access_flags = mr_params->gva.access_flags;
break;
-
+ case GDMA_MR_TYPE_ZBVA:
+ req.zbva.dma_region_handle = mr_params->zbva.dma_region_handle;
+ req.zbva.access_flags = mr_params->zbva.access_flags;
+ break;
default:
ibdev_dbg(&dev->ib_dev,
"invalid param (GDMA_MR_TYPE) passed, type %d\n",
@@ -100,6 +106,7 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -110,6 +117,9 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 dma_region_handle;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
ibdev_dbg(ibdev,
@@ -144,11 +154,18 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
- mr_params.mr_type = GDMA_MR_TYPE_GVA;
- mr_params.gva.dma_region_handle = dma_region_handle;
- mr_params.gva.virtual_address = iova;
- mr_params.gva.access_flags =
- mana_ib_verbs_to_gdma_access_flags(access_flags);
+ if (access_flags & IB_ZERO_BASED) {
+ mr_params.mr_type = GDMA_MR_TYPE_ZBVA;
+ mr_params.zbva.dma_region_handle = dma_region_handle;
+ mr_params.zbva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ } else {
+ mr_params.mr_type = GDMA_MR_TYPE_GVA;
+ mr_params.gva.dma_region_handle = dma_region_handle;
+ mr_params.gva.virtual_address = iova;
+ mr_params.gva.access_flags =
+ mana_ib_verbs_to_gdma_access_flags(access_flags);
+ }
err = mana_ib_gd_create_mr(dev, mr, &mr_params);
if (err)
@@ -175,6 +192,7 @@ err_free:
struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 length,
u64 iova, int fd, int access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs)
{
struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -186,6 +204,9 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
u64 dma_region_handle;
int err;
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
access_flags &= ~IB_ACCESS_OPTIONAL;
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index c928af58f38b..a6bf4d539e67 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -635,7 +635,6 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
{
struct mana_ib_dev *mdev = container_of(ibpd->device, struct mana_ib_dev, ib_dev);
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
- struct gdma_context *gc = mdev_to_gc(mdev);
u32 doorbell, queue_size;
int i, err;
@@ -654,7 +653,7 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
goto destroy_queues;
}
}
- doorbell = gc->mana_ib.doorbell;
+ doorbell = mdev->gdma_dev->doorbell;
err = create_shadow_queue(&qp->shadow_rq, attr->cap.max_recv_wr,
sizeof(struct ud_rq_shadow_wqe));
@@ -736,7 +735,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
- req.hdr.dev_id = gc->mana_ib.dev_id;
+ req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
req.qp_state = attr->qp_state;
@@ -773,7 +772,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
req.ah_attr.dest_port = ROCE_V2_UDP_DPORT;
req.ah_attr.src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
ibqp->qp_num, attr->dest_qp_num);
- req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class;
+ req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
}