summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@mellanox.com>2018-07-29 11:34:56 +0300
committerJason Gunthorpe <jgg@mellanox.com>2018-08-03 05:27:43 +0300
commit9f49a5b5c21d58aa84e16cfdc5e99e49faefcb7a (patch)
treec2c4b7a369b6c4b2690e31d2a8c1b3c0f8023076 /drivers/net/ethernet/mellanox
parenteaeb398425089cb3c8edc81a406109db94b2705c (diff)
downloadlinux-9f49a5b5c21d58aa84e16cfdc5e99e49faefcb7a.tar.xz
RDMA/netdev: Use priv_destructor for netdev cleanup
Now that the unregister_netdev flow for IPoIB no longer relies on external code we can now introduce the use of priv_destructor and needs_free_netdev. The rdma_netdev flow is switched to use the netdev common priv_destructor instead of the special free_rdma_netdev and the IPOIB ULP adjusted: - priv_destructor needs to switch to point to the ULP's destructor which will then call the rdma_ndev's in the right order - We need to be careful around the error unwind of register_netdev as it sometimes calls priv_destructor on failure - ULPs need to use ndo_init/uninit to ensure proper ordering of failures around register_netdev Switching to priv_destructor is a necessary pre-requisite to using the rtnl new_link mechanism. The VNIC user for rdma_netdev should also be revised, but that is left for another patch. Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Denis Drozdov <denisd@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index af3bb2f7a504..b8d150d2fd72 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -580,6 +580,22 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
return 0;
}
+static void mlx5_rdma_netdev_free(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ const struct mlx5e_profile *profile = priv->profile;
+
+ mlx5e_detach_netdev(priv);
+ profile->cleanup(priv);
+ destroy_workqueue(priv->wq);
+
+ if (!ipriv->sub_interface) {
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+ mlx5e_destroy_mdev_resources(priv->mdev);
+ }
+}
+
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
const char *name,
@@ -653,6 +669,9 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
rn->detach_mcast = mlx5i_detach_mcast;
rn->set_id = mlx5i_set_pkey_index;
+ netdev->priv_destructor = mlx5_rdma_netdev_free;
+ netdev->needs_free_netdev = 1;
+
return netdev;
destroy_ht:
@@ -665,21 +684,3 @@ err_free_netdev:
return NULL;
}
EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
-
-void mlx5_rdma_netdev_free(struct net_device *netdev)
-{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- struct mlx5i_priv *ipriv = priv->ppriv;
- const struct mlx5e_profile *profile = priv->profile;
-
- mlx5e_detach_netdev(priv);
- profile->cleanup(priv);
- destroy_workqueue(priv->wq);
-
- if (!ipriv->sub_interface) {
- mlx5i_pkey_qpn_ht_cleanup(netdev);
- mlx5e_destroy_mdev_resources(priv->mdev);
- }
- free_netdev(netdev);
-}
-EXPORT_SYMBOL(mlx5_rdma_netdev_free);