diff options
Diffstat (limited to 'drivers/net/macsec.c')
-rw-r--r-- | drivers/net/macsec.c | 133 |
1 files changed, 73 insertions, 60 deletions
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index bf8ac7a3ded7..25616247d7a5 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -528,9 +528,9 @@ static void count_tx(struct net_device *dev, int ret, int len) } } -static void macsec_encrypt_done(struct crypto_async_request *base, int err) +static void macsec_encrypt_done(void *data, int err) { - struct sk_buff *skb = base->data; + struct sk_buff *skb = data; struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; @@ -835,9 +835,9 @@ static void count_rx(struct net_device *dev, int len) u64_stats_update_end(&stats->syncp); } -static void macsec_decrypt_done(struct crypto_async_request *base, int err) +static void macsec_decrypt_done(void *data, int err) { - struct sk_buff *skb = base->data; + struct sk_buff *skb = data; struct net_device *dev = skb->dev; struct macsec_dev *macsec = macsec_priv(dev); struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; @@ -2583,16 +2583,56 @@ static bool macsec_is_configured(struct macsec_dev *macsec) return false; } +static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) +{ + enum macsec_offload prev_offload; + const struct macsec_ops *ops; + struct macsec_context ctx; + struct macsec_dev *macsec; + int ret = 0; + + macsec = macsec_priv(dev); + + /* Check if the offloading mode is supported by the underlying layers */ + if (offload != MACSEC_OFFLOAD_OFF && + !macsec_check_offload(offload, macsec)) + return -EOPNOTSUPP; + + /* Check if the net device is busy. */ + if (netif_running(dev)) + return -EBUSY; + + /* Check if the device already has rules configured: we do not support + * rules migration. + */ + if (macsec_is_configured(macsec)) + return -EBUSY; + + prev_offload = macsec->offload; + + ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, + macsec, &ctx); + if (!ops) + return -EOPNOTSUPP; + + macsec->offload = offload; + + ctx.secy = &macsec->secy; + ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) + : macsec_offload(ops->mdo_add_secy, &ctx); + if (ret) + macsec->offload = prev_offload; + + return ret; +} + static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1]; - enum macsec_offload offload, prev_offload; - int (*func)(struct macsec_context *ctx); struct nlattr **attrs = info->attrs; - struct net_device *dev; - const struct macsec_ops *ops; - struct macsec_context ctx; + enum macsec_offload offload; struct macsec_dev *macsec; + struct net_device *dev; int ret = 0; if (!attrs[MACSEC_ATTR_IFINDEX]) @@ -2621,55 +2661,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) } offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); - if (macsec->offload == offload) - goto out; - - /* Check if the offloading mode is supported by the underlying layers */ - if (offload != MACSEC_OFFLOAD_OFF && - !macsec_check_offload(offload, macsec)) { - ret = -EOPNOTSUPP; - goto out; - } - - /* Check if the net device is busy. */ - if (netif_running(dev)) { - ret = -EBUSY; - goto out; - } - - prev_offload = macsec->offload; - macsec->offload = offload; - /* Check if the device already has rules configured: we do not support - * rules migration. - */ - if (macsec_is_configured(macsec)) { - ret = -EBUSY; - goto rollback; - } - - ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload, - macsec, &ctx); - if (!ops) { - ret = -EOPNOTSUPP; - goto rollback; - } - - if (prev_offload == MACSEC_OFFLOAD_OFF) - func = ops->mdo_add_secy; - else - func = ops->mdo_del_secy; - - ctx.secy = &macsec->secy; - ret = macsec_offload(func, &ctx); - if (ret) - goto rollback; - - rtnl_unlock(); - return 0; - -rollback: - macsec->offload = prev_offload; + if (macsec->offload != offload) + ret = macsec_update_offload(dev, offload); out: rtnl_unlock(); return ret; @@ -3817,6 +3811,8 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct macsec_dev *macsec = macsec_priv(dev); + bool macsec_offload_state_change = false; + enum macsec_offload offload; struct macsec_tx_sc tx_sc; struct macsec_secy secy; int ret; @@ -3840,8 +3836,18 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], if (ret) goto cleanup; + if (data[IFLA_MACSEC_OFFLOAD]) { + offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); + if (macsec->offload != offload) { + macsec_offload_state_change = true; + ret = macsec_update_offload(dev, offload); + if (ret) + goto cleanup; + } + } + /* If h/w offloading is available, propagate to the device */ - if (macsec_is_offloaded(macsec)) { + if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) { const struct macsec_ops *ops; struct macsec_context ctx; @@ -4240,16 +4246,22 @@ static size_t macsec_get_size(const struct net_device *dev) nla_total_size(1) + /* IFLA_MACSEC_SCB */ nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */ nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */ + nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */ 0; } static int macsec_fill_info(struct sk_buff *skb, const struct net_device *dev) { - struct macsec_secy *secy = &macsec_priv(dev)->secy; - struct macsec_tx_sc *tx_sc = &secy->tx_sc; + struct macsec_tx_sc *tx_sc; + struct macsec_dev *macsec; + struct macsec_secy *secy; u64 csid; + macsec = macsec_priv(dev); + secy = &macsec->secy; + tx_sc = &secy->tx_sc; + switch (secy->key_len) { case MACSEC_GCM_AES_128_SAK_LEN: csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID; @@ -4274,6 +4286,7 @@ static int macsec_fill_info(struct sk_buff *skb, nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || + nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) || 0) goto nla_put_failure; |