diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_rep.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 137 |
1 files changed, 110 insertions, 27 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 3e7041bd5705..152b62138450 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -374,7 +374,9 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { struct mlx5e_rep_sq *rep_sq, *tmp; + struct mlx5e_rep_sq_peer *sq_peer; struct mlx5e_rep_priv *rpriv; + unsigned long i; if (esw->mode != MLX5_ESWITCH_OFFLOADS) return; @@ -382,31 +384,78 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, rpriv = mlx5e_rep_to_rep_priv(rep); list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); - if (rep_sq->send_to_vport_rule_peer) - mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); + xa_for_each(&rep_sq->sq_peer, i, sq_peer) { + if (sq_peer->rule) + mlx5_eswitch_del_send_to_vport_rule(sq_peer->rule); + + xa_erase(&rep_sq->sq_peer, i); + kfree(sq_peer); + } + + xa_destroy(&rep_sq->sq_peer); list_del(&rep_sq->list); kfree(rep_sq); } } +static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, + struct mlx5_devcom *devcom, + struct mlx5e_rep_sq *rep_sq, int i) +{ + struct mlx5_eswitch *peer_esw = NULL; + struct mlx5_flow_handle *flow_rule; + int tmp; + + mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS, + peer_esw, tmp) { + u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id); + struct mlx5e_rep_sq_peer *sq_peer; + int err; + + sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL); + if (!sq_peer) + return -ENOMEM; + + flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, + rep, rep_sq->sqn); + if (IS_ERR(flow_rule)) { + kfree(sq_peer); + return PTR_ERR(flow_rule); + } + + sq_peer->rule = flow_rule; + sq_peer->peer = peer_esw; + err = xa_insert(&rep_sq->sq_peer, peer_rule_idx, sq_peer, GFP_KERNEL); + if (err) { + kfree(sq_peer); + mlx5_eswitch_del_send_to_vport_rule(flow_rule); + return err; + } + } + + return 0; +} + static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u32 *sqns_array, int sqns_num) { - struct mlx5_eswitch *peer_esw = NULL; struct mlx5_flow_handle *flow_rule; struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_sq *rep_sq; + struct mlx5_devcom *devcom; + bool devcom_locked = false; int err; int i; if (esw->mode != MLX5_ESWITCH_OFFLOADS) return 0; + devcom = esw->dev->priv.devcom; rpriv = mlx5e_rep_to_rep_priv(rep); - if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS)) - peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom, - MLX5_DEVCOM_ESW_OFFLOADS); + if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) && + mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + devcom_locked = true; for (i = 0; i < sqns_num; i++) { rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); @@ -426,31 +475,30 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, rep_sq->send_to_vport_rule = flow_rule; rep_sq->sqn = sqns_array[i]; - if (peer_esw) { - flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, - rep, sqns_array[i]); - if (IS_ERR(flow_rule)) { - err = PTR_ERR(flow_rule); + xa_init(&rep_sq->sq_peer); + if (devcom_locked) { + err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i); + if (err) { mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); + xa_destroy(&rep_sq->sq_peer); kfree(rep_sq); goto out_err; } - rep_sq->send_to_vport_rule_peer = flow_rule; } list_add(&rep_sq->list, &rpriv->vport_sqs_list); } - if (peer_esw) - mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (devcom_locked) + mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); return 0; out_err: mlx5e_sqs2vport_stop(esw, rep); - if (peer_esw) - mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (devcom_locked) + mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS); return err; } @@ -1530,17 +1578,24 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) return rpriv->netdev; } -static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep) +static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep, + struct mlx5_eswitch *peer_esw) { + u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id); struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_sq *rep_sq; + WARN_ON_ONCE(!peer_esw); rpriv = mlx5e_rep_to_rep_priv(rep); list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { - if (!rep_sq->send_to_vport_rule_peer) + struct mlx5e_rep_sq_peer *sq_peer = xa_load(&rep_sq->sq_peer, i); + + if (!sq_peer || sq_peer->peer != peer_esw) continue; - mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); - rep_sq->send_to_vport_rule_peer = NULL; + + mlx5_eswitch_del_send_to_vport_rule(sq_peer->rule); + xa_erase(&rep_sq->sq_peer, i); + kfree(sq_peer); } } @@ -1548,24 +1603,52 @@ static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, struct mlx5_eswitch *peer_esw) { + u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id); struct mlx5_flow_handle *flow_rule; + struct mlx5e_rep_sq_peer *sq_peer; struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_sq *rep_sq; + int err; rpriv = mlx5e_rep_to_rep_priv(rep); list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { - if (rep_sq->send_to_vport_rule_peer) + sq_peer = xa_load(&rep_sq->sq_peer, i); + + if (sq_peer && sq_peer->peer) continue; - flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn); - if (IS_ERR(flow_rule)) + + flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, + rep_sq->sqn); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); goto err_out; - rep_sq->send_to_vport_rule_peer = flow_rule; + } + + if (sq_peer) { + sq_peer->rule = flow_rule; + sq_peer->peer = peer_esw; + continue; + } + sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL); + if (!sq_peer) { + err = -ENOMEM; + goto err_sq_alloc; + } + err = xa_insert(&rep_sq->sq_peer, i, sq_peer, GFP_KERNEL); + if (err) + goto err_xa; + sq_peer->rule = flow_rule; + sq_peer->peer = peer_esw; } return 0; +err_xa: + kfree(sq_peer); +err_sq_alloc: + mlx5_eswitch_del_send_to_vport_rule(flow_rule); err_out: - mlx5e_vport_rep_event_unpair(rep); - return PTR_ERR(flow_rule); + mlx5e_vport_rep_event_unpair(rep, peer_esw); + return err; } static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw, @@ -1578,7 +1661,7 @@ static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw, if (event == MLX5_SWITCHDEV_EVENT_PAIR) err = mlx5e_vport_rep_event_pair(esw, rep, data); else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR) - mlx5e_vport_rep_event_unpair(rep); + mlx5e_vport_rep_event_unpair(rep, data); return err; } |