summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4')
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/main.c182
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c6
7 files changed, 58 insertions, 166 deletions
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index db4aa13ebae0..d1de3285fd88 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,7 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
+ depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
depends on MAY_USE_DEVLINK
select NET_VENDOR_MELLANOX
select MLX4_CORE
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index e5466d786bb1..8942f5f7f04d 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -807,15 +807,17 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int err;
struct ib_port_attr pattr;
- if (in_wc && in_wc->qp->qp_num) {
- pr_debug("received MAD: slid:%d sqpn:%d "
- "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
- in_wc->slid, in_wc->src_qp,
- in_wc->dlid_path_bits,
- in_wc->qp->qp_num,
- in_wc->wc_flags,
- in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
- be16_to_cpu(in_mad->mad_hdr.attr_id));
+ if (in_wc && in_wc->qp) {
+ pr_debug("received MAD: port:%d slid:%d sqpn:%d "
+ "dlid_bits:%d dqpn:%d wc_flags:0x%x tid:%016llx cls:%x mtd:%x atr:%x\n",
+ port_num,
+ in_wc->slid, in_wc->src_qp,
+ in_wc->dlid_path_bits,
+ in_wc->qp->qp_num,
+ in_wc->wc_flags,
+ be64_to_cpu(in_mad->mad_hdr.tid),
+ in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
+ be16_to_cpu(in_mad->mad_hdr.attr_id));
if (in_wc->wc_flags & IB_WC_GRH) {
pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
be64_to_cpu(in_grh->sgid.global.subnet_prefix),
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0bbeaaae47e0..0def2323459c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1140,144 +1140,50 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
-static void mlx4_ib_vma_open(struct vm_area_struct *area)
-{
- /* vma_open is called when a new VMA is created on top of our VMA.
- * This is done through either mremap flow or split_vma (usually due
- * to mlock, madvise, munmap, etc.). We do not support a clone of the
- * vma, as this VMA is strongly hardware related. Therefore we set the
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
- * calling us again and trying to do incorrect actions. We assume that
- * the original vma size is exactly a single page that there will be no
- * "splitting" operations on.
- */
- area->vm_ops = NULL;
-}
-
-static void mlx4_ib_vma_close(struct vm_area_struct *area)
-{
- struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
-
- /* It's guaranteed that all VMAs opened on a FD are closed before the
- * file itself is closed, therefore no sync is needed with the regular
- * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
- * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
- * The close operation is usually called under mm->mmap_sem except when
- * process is exiting. The exiting case is handled explicitly as part
- * of mlx4_ib_disassociate_ucontext.
- */
- mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
- area->vm_private_data;
-
- /* set the vma context pointer to null in the mlx4_ib driver's private
- * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
- */
- mlx4_ib_vma_priv_data->vma = NULL;
-}
-
-static const struct vm_operations_struct mlx4_ib_vm_ops = {
- .open = mlx4_ib_vma_open,
- .close = mlx4_ib_vma_close
-};
-
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- int i;
- struct vm_area_struct *vma;
- struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
-
- /* need to protect from a race on closing the vma as part of
- * mlx4_ib_vma_close().
- */
- for (i = 0; i < HW_BAR_COUNT; i++) {
- vma = context->hw_bar_info[i].vma;
- if (!vma)
- continue;
-
- zap_vma_ptes(context->hw_bar_info[i].vma,
- context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
-
- context->hw_bar_info[i].vma->vm_flags &=
- ~(VM_SHARED | VM_MAYSHARE);
- /* context going to be destroyed, should not access ops any more */
- context->hw_bar_info[i].vma->vm_ops = NULL;
- }
-}
-
-static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
- struct mlx4_ib_vma_private_data *vma_private_data)
-{
- vma_private_data->vma = vma;
- vma->vm_private_data = vma_private_data;
- vma->vm_ops = &mlx4_ib_vm_ops;
}
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct mlx4_ib_dev *dev = to_mdev(context->device);
- struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- if (vma->vm_pgoff == 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_DB].vma)
- return -EINVAL;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
+ switch (vma->vm_pgoff) {
+ case 0:
+ return rdma_user_mmap_io(context, vma,
+ to_mucontext(context)->uar.pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
- } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_BF].vma)
+ case 1:
+ if (dev->dev->caps.bf_reg_size == 0)
return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ to_mucontext(context)->uar.pfn +
+ dev->dev->caps.num_uars,
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn +
- dev->dev->caps.num_uars,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
-
- } else if (vma->vm_pgoff == 3) {
+ case 3: {
struct mlx4_clock_params params;
int ret;
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
- return -EINVAL;
-
ret = mlx4_get_internal_clock_params(dev->dev, &params);
-
if (ret)
return ret;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (pci_resource_start(dev->dev->persist->pdev,
- params.bar) +
- params.offset)
- >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma,
- &mucontext->hw_bar_info[HW_BAR_CLOCK]);
- } else {
- return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ (pci_resource_start(dev->dev->persist->pdev,
+ params.bar) +
+ params.offset) >>
+ PAGE_SHIFT,
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
}
- return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
@@ -2133,39 +2039,43 @@ out:
return err;
}
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
dev->dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *mlx4_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *mlx4_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group mlx4_attr_group = {
+ .attrs = mlx4_class_attributes,
};
struct diag_counter {
@@ -2636,7 +2546,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->dev = dev;
ibdev->bond_next_port = 0;
- strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
@@ -2898,8 +2807,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap;
+ rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4;
- if (ib_register_device(&ibdev->ib_dev, NULL))
+ if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", NULL))
goto err_diag_counters;
if (mlx4_ib_mad_init(ibdev))
@@ -2922,12 +2832,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_notif;
}
- for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
- if (device_create_file(&ibdev->ib_dev.dev,
- mlx4_class_attributes[j]))
- goto err_notif;
- }
-
ibdev->ib_active = true;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 81ffc007e0a1..d844831179cf 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -673,7 +673,7 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
if (!list_empty(&group->pending_list))
req = list_first_entry(&group->pending_list,
struct mcast_req, group_list);
- if ((method == IB_MGMT_METHOD_GET_RESP)) {
+ if (method == IB_MGMT_METHOD_GET_RESP) {
if (req) {
send_reply_to_slave(req->func, group, &req->sa_mad, status);
--group->func[req->func].num_pend_reqs;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e10dccc7958f..8850dfc3826d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -80,16 +80,11 @@ enum hw_bar_type {
HW_BAR_COUNT
};
-struct mlx4_ib_vma_private_data {
- struct vm_area_struct *vma;
-};
-
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
struct list_head db_page_list;
struct mutex db_page_mutex;
- struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
struct list_head wqn_ranges_list;
struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
};
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 6dd3cd2c2f80..0711ca1dfb8f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2629,7 +2629,6 @@ enum {
static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
@@ -2639,13 +2638,8 @@ static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (cur_state != new_state || cur_state != IB_QPS_RESET) {
- int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- ll = rdma_port_get_link_layer(&dev->ib_dev, port);
- }
-
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, ll)) {
+ attr_mask)) {
pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n",
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index e219093d2764..752bdd536130 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -818,9 +818,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
if (!mlx4_is_master(dev->dev))
return 0;
- dev->iov_parent =
- kobject_create_and_add("iov",
- kobject_get(dev->ib_dev.ports_parent->parent));
+ dev->iov_parent = kobject_create_and_add("iov", &dev->ib_dev.dev.kobj);
if (!dev->iov_parent) {
ret = -ENOMEM;
goto err;
@@ -850,7 +848,6 @@ err_add_entries:
err_ports:
kobject_put(dev->iov_parent);
err:
- kobject_put(dev->ib_dev.ports_parent->parent);
pr_err("mlx4_ib_device_register_sysfs error (%d)\n", ret);
return ret;
}
@@ -886,5 +883,4 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device)
kobject_put(device->ports_parent);
kobject_put(device->iov_parent);
kobject_put(device->iov_parent);
- kobject_put(device->ib_dev.ports_parent->parent);
}