diff options
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_lib.c')
-rw-r--r-- | drivers/net/ethernet/intel/idpf/idpf_lib.c | 313 |
1 files changed, 262 insertions, 51 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 80382ff4a5fa..8a941f0fb048 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -4,6 +4,8 @@ #include "idpf.h" #include "idpf_virtchnl.h" #include "idpf_ptp.h" +#include "xdp.h" +#include "xsk.h" static const struct net_device_ops idpf_netdev_ops; @@ -88,6 +90,8 @@ void idpf_intr_rel(struct idpf_adapter *adapter) idpf_deinit_vector_stack(adapter); kfree(adapter->msix_entries); adapter->msix_entries = NULL; + kfree(adapter->rdma_msix_entries); + adapter->rdma_msix_entries = NULL; } /** @@ -299,13 +303,33 @@ rel_lock: */ int idpf_intr_req(struct idpf_adapter *adapter) { + u16 num_lan_vecs, min_lan_vecs, num_rdma_vecs = 0, min_rdma_vecs = 0; u16 default_vports = idpf_get_default_vports(adapter); int num_q_vecs, total_vecs, num_vec_ids; - int min_vectors, v_actual, err; + int min_vectors, actual_vecs, err; unsigned int vector; u16 *vecids; + int i; total_vecs = idpf_get_reserved_vecs(adapter); + num_lan_vecs = total_vecs; + if (idpf_is_rdma_cap_ena(adapter)) { + num_rdma_vecs = idpf_get_reserved_rdma_vecs(adapter); + min_rdma_vecs = IDPF_MIN_RDMA_VEC; + + if (!num_rdma_vecs) { + /* If idpf_get_reserved_rdma_vecs is 0, vectors are + * pulled from the LAN pool. + */ + num_rdma_vecs = min_rdma_vecs; + } else if (num_rdma_vecs < min_rdma_vecs) { + dev_err(&adapter->pdev->dev, + "Not enough vectors reserved for RDMA (min: %u, current: %u)\n", + min_rdma_vecs, num_rdma_vecs); + return -EINVAL; + } + } + num_q_vecs = total_vecs - IDPF_MBX_Q_VEC; err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs); @@ -316,52 +340,76 @@ int idpf_intr_req(struct idpf_adapter *adapter) return -EAGAIN; } - min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; - v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors, - total_vecs, PCI_IRQ_MSIX); - if (v_actual < min_vectors) { - dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n", - v_actual); - err = -EAGAIN; + min_lan_vecs = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports; + min_vectors = min_lan_vecs + min_rdma_vecs; + actual_vecs = pci_alloc_irq_vectors(adapter->pdev, min_vectors, + total_vecs, PCI_IRQ_MSIX); + if (actual_vecs < 0) { + dev_err(&adapter->pdev->dev, "Failed to allocate minimum MSIX vectors required: %d\n", + min_vectors); + err = actual_vecs; goto send_dealloc_vecs; } - adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry), - GFP_KERNEL); + if (idpf_is_rdma_cap_ena(adapter)) { + if (actual_vecs < total_vecs) { + dev_warn(&adapter->pdev->dev, + "Warning: %d vectors requested, only %d available. Defaulting to minimum (%d) for RDMA and remaining for LAN.\n", + total_vecs, actual_vecs, IDPF_MIN_RDMA_VEC); + num_rdma_vecs = IDPF_MIN_RDMA_VEC; + } + adapter->rdma_msix_entries = kcalloc(num_rdma_vecs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!adapter->rdma_msix_entries) { + err = -ENOMEM; + goto free_irq; + } + } + + num_lan_vecs = actual_vecs - num_rdma_vecs; + adapter->msix_entries = kcalloc(num_lan_vecs, sizeof(struct msix_entry), + GFP_KERNEL); if (!adapter->msix_entries) { err = -ENOMEM; - goto free_irq; + goto free_rdma_msix; } adapter->mb_vector.v_idx = le16_to_cpu(adapter->caps.mailbox_vector_id); - vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL); + vecids = kcalloc(actual_vecs, sizeof(u16), GFP_KERNEL); if (!vecids) { err = -ENOMEM; goto free_msix; } - num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs, + num_vec_ids = idpf_get_vec_ids(adapter, vecids, actual_vecs, &adapter->req_vec_chunks->vchunks); - if (num_vec_ids < v_actual) { + if (num_vec_ids < actual_vecs) { err = -EINVAL; goto free_vecids; } - for (vector = 0; vector < v_actual; vector++) { + for (vector = 0; vector < num_lan_vecs; vector++) { adapter->msix_entries[vector].entry = vecids[vector]; adapter->msix_entries[vector].vector = pci_irq_vector(adapter->pdev, vector); } + for (i = 0; i < num_rdma_vecs; vector++, i++) { + adapter->rdma_msix_entries[i].entry = vecids[vector]; + adapter->rdma_msix_entries[i].vector = + pci_irq_vector(adapter->pdev, vector); + } - adapter->num_req_msix = total_vecs; - adapter->num_msix_entries = v_actual; /* 'num_avail_msix' is used to distribute excess vectors to the vports * after considering the minimum vectors required per each default * vport */ - adapter->num_avail_msix = v_actual - min_vectors; + adapter->num_avail_msix = num_lan_vecs - min_lan_vecs; + adapter->num_msix_entries = num_lan_vecs; + if (idpf_is_rdma_cap_ena(adapter)) + adapter->num_rdma_msix_entries = num_rdma_vecs; /* Fill MSIX vector lifo stack with vector indexes */ err = idpf_init_vector_stack(adapter); @@ -383,6 +431,9 @@ free_vecids: free_msix: kfree(adapter->msix_entries); adapter->msix_entries = NULL; +free_rdma_msix: + kfree(adapter->rdma_msix_entries); + adapter->rdma_msix_entries = NULL; free_irq: pci_free_irq_vectors(adapter->pdev); send_dealloc_vecs: @@ -727,6 +778,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) np->vport_idx = vport->idx; np->vport_id = vport->vport_id; np->max_tx_hdr_size = idpf_get_max_tx_hdr_size(adapter); + np->tx_max_bufs = idpf_get_max_tx_bufs(adapter); spin_lock_init(&np->stats_lock); @@ -755,6 +807,10 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) dflt_features |= NETIF_F_RXHASH; + if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_FLOW_STEER) && + idpf_vport_is_cap_ena(vport, VIRTCHNL2_VPORT_SIDEBAND_FLOW_STEER)) + dflt_features |= NETIF_F_NTUPLE; if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V4)) csum_offloads |= NETIF_F_IP_CSUM; if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_TX_CSUM_L4V6)) @@ -781,6 +837,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) netdev->hw_features |= netdev->features | other_offloads; netdev->vlan_features |= netdev->features | other_offloads; netdev->hw_enc_features |= dflt_features | other_offloads; + idpf_xdp_set_features(vport); + idpf_set_ethtool_ops(netdev); netif_set_affinity_auto(netdev); SET_NETDEV_DEV(netdev, &adapter->pdev->dev); @@ -830,14 +888,18 @@ static void idpf_remove_features(struct idpf_vport *vport) /** * idpf_vport_stop - Disable a vport * @vport: vport to disable + * @rtnl: whether to take RTNL lock */ -static void idpf_vport_stop(struct idpf_vport *vport) +static void idpf_vport_stop(struct idpf_vport *vport, bool rtnl) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); if (np->state <= __IDPF_VPORT_DOWN) return; + if (rtnl) + rtnl_lock(); + netif_carrier_off(vport->netdev); netif_tx_disable(vport->netdev); @@ -856,9 +918,13 @@ static void idpf_vport_stop(struct idpf_vport *vport) vport->link_up = false; idpf_vport_intr_deinit(vport); + idpf_xdp_rxq_info_deinit_all(vport); idpf_vport_queues_rel(vport); idpf_vport_intr_rel(vport); np->state = __IDPF_VPORT_DOWN; + + if (rtnl) + rtnl_unlock(); } /** @@ -882,7 +948,7 @@ static int idpf_stop(struct net_device *netdev) idpf_vport_ctrl_lock(netdev); vport = idpf_netdev_to_vport(netdev); - idpf_vport_stop(vport); + idpf_vport_stop(vport, false); idpf_vport_ctrl_unlock(netdev); @@ -972,8 +1038,10 @@ static void idpf_vport_dealloc(struct idpf_vport *vport) struct idpf_adapter *adapter = vport->adapter; unsigned int i = vport->idx; + idpf_idc_deinit_vport_aux_device(vport->vdev_info); + idpf_deinit_mac_addr(vport); - idpf_vport_stop(vport); + idpf_vport_stop(vport, true); if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) idpf_decfg_netdev(vport); @@ -1079,8 +1147,10 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, if (!vport) return vport; + num_max_q = max(max_q->max_txq, max_q->max_rxq) + IDPF_RESERVED_VECS; if (!adapter->vport_config[idx]) { struct idpf_vport_config *vport_config; + struct idpf_q_coalesce *q_coal; vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL); if (!vport_config) { @@ -1089,6 +1159,21 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, return NULL; } + q_coal = kcalloc(num_max_q, sizeof(*q_coal), GFP_KERNEL); + if (!q_coal) { + kfree(vport_config); + kfree(vport); + + return NULL; + } + for (int i = 0; i < num_max_q; i++) { + q_coal[i].tx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal[i].tx_coalesce_usecs = IDPF_ITR_TX_DEF; + q_coal[i].rx_intr_mode = IDPF_ITR_DYNAMIC; + q_coal[i].rx_coalesce_usecs = IDPF_ITR_RX_DEF; + } + vport_config->user_config.q_coalesce = q_coal; + adapter->vport_config[idx] = vport_config; } @@ -1098,7 +1183,6 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter, vport->default_vport = adapter->num_alloc_vports < idpf_get_default_vports(adapter); - num_max_q = max(max_q->max_txq, max_q->max_rxq); vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL); if (!vport->q_vector_idxs) goto free_vport; @@ -1237,13 +1321,13 @@ static void idpf_restore_features(struct idpf_vport *vport) */ static int idpf_set_real_num_queues(struct idpf_vport *vport) { - int err; + int err, txq = vport->num_txq - vport->num_xdp_txq; err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq); if (err) return err; - return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq); + return netif_set_real_num_tx_queues(vport->netdev, txq); } /** @@ -1298,8 +1382,9 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport) /** * idpf_vport_open - Bring up a vport * @vport: vport to bring up + * @rtnl: whether to take RTNL lock */ -static int idpf_vport_open(struct idpf_vport *vport) +static int idpf_vport_open(struct idpf_vport *vport, bool rtnl) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); struct idpf_adapter *adapter = vport->adapter; @@ -1309,6 +1394,9 @@ static int idpf_vport_open(struct idpf_vport *vport) if (np->state != __IDPF_VPORT_DOWN) return -EBUSY; + if (rtnl) + rtnl_lock(); + /* we do not allow interface up just yet */ netif_carrier_off(vport->netdev); @@ -1316,7 +1404,7 @@ static int idpf_vport_open(struct idpf_vport *vport) if (err) { dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n", vport->vport_id, err); - return err; + goto err_rtnl_unlock; } err = idpf_vport_queues_alloc(vport); @@ -1337,35 +1425,44 @@ static int idpf_vport_open(struct idpf_vport *vport) goto queues_rel; } - err = idpf_rx_bufs_init_all(vport); + err = idpf_queue_reg_init(vport); if (err) { - dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", + dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", vport->vport_id, err); goto queues_rel; } - err = idpf_queue_reg_init(vport); + err = idpf_rx_bufs_init_all(vport); if (err) { - dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n", + dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n", vport->vport_id, err); goto queues_rel; } idpf_rx_init_buf_tail(vport); + + err = idpf_xdp_rxq_info_init_all(vport); + if (err) { + netdev_err(vport->netdev, + "Failed to initialize XDP RxQ info for vport %u: %pe\n", + vport->vport_id, ERR_PTR(err)); + goto intr_deinit; + } + idpf_vport_intr_ena(vport); err = idpf_send_config_queues_msg(vport); if (err) { dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n", vport->vport_id, err); - goto intr_deinit; + goto rxq_deinit; } err = idpf_send_map_unmap_queue_vector_msg(vport, true); if (err) { dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n", vport->vport_id, err); - goto intr_deinit; + goto rxq_deinit; } err = idpf_send_enable_queues_msg(vport); @@ -1403,6 +1500,9 @@ static int idpf_vport_open(struct idpf_vport *vport) goto deinit_rss; } + if (rtnl) + rtnl_unlock(); + return 0; deinit_rss: @@ -1413,6 +1513,8 @@ disable_queues: idpf_send_disable_queues_msg(vport); unmap_queue_vectors: idpf_send_map_unmap_queue_vector_msg(vport, false); +rxq_deinit: + idpf_xdp_rxq_info_deinit_all(vport); intr_deinit: idpf_vport_intr_deinit(vport); queues_rel: @@ -1420,6 +1522,10 @@ queues_rel: intr_rel: idpf_vport_intr_rel(vport); +err_rtnl_unlock: + if (rtnl) + rtnl_unlock(); + return err; } @@ -1476,11 +1582,10 @@ void idpf_init_task(struct work_struct *work) index = vport->idx; vport_config = adapter->vport_config[index]; - init_waitqueue_head(&vport->sw_marker_wq); - spin_lock_init(&vport_config->mac_filter_list_lock); INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); + INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list); err = idpf_check_supported_desc_ids(vport); if (err) { @@ -1499,7 +1604,7 @@ void idpf_init_task(struct work_struct *work) np = netdev_priv(vport->netdev); np->state = __IDPF_VPORT_DOWN; if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags)) - idpf_vport_open(vport); + idpf_vport_open(vport, true); /* Spawn and return 'idpf_init_task' work queue until all the * default vports are created @@ -1738,6 +1843,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) { bool is_reset = idpf_is_reset_detected(adapter); + idpf_idc_issue_reset_event(adapter->cdev_info); + idpf_set_vport_state(adapter); idpf_vc_core_deinit(adapter); if (!is_reset) @@ -1785,6 +1892,10 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) unlock_mutex: mutex_unlock(&adapter->vport_ctrl_lock); + /* Wait until all vports are created to init RDMA CORE AUX */ + if (!err) + err = idpf_idc_init(adapter); + return err; } @@ -1868,6 +1979,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, idpf_vport_calc_num_q_desc(new_vport); break; case IDPF_SR_MTU_CHANGE: + idpf_idc_vdev_mtu_event(vport->vdev_info, + IIDC_RDMA_EVENT_BEFORE_MTU_CHANGE); + break; case IDPF_SR_RSC_CHANGE: break; default: @@ -1880,7 +1994,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, idpf_send_delete_queues_msg(vport); } else { set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags); - idpf_vport_stop(vport); + idpf_vport_stop(vport, false); } idpf_deinit_rss(vport); @@ -1910,11 +2024,9 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, goto err_open; if (current_state == __IDPF_VPORT_UP) - err = idpf_vport_open(vport); + err = idpf_vport_open(vport, false); - kfree(new_vport); - - return err; + goto free_vport; err_reset: idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, @@ -1922,11 +2034,15 @@ err_reset: err_open: if (current_state == __IDPF_VPORT_UP) - idpf_vport_open(vport); + idpf_vport_open(vport, false); free_vport: kfree(new_vport); + if (reset_cause == IDPF_SR_MTU_CHANGE) + idpf_idc_vdev_mtu_event(vport->vdev_info, + IIDC_RDMA_EVENT_AFTER_MTU_CHANGE); + return err; } @@ -2156,7 +2272,7 @@ static int idpf_open(struct net_device *netdev) if (err) goto unlock; - err = idpf_vport_open(vport); + err = idpf_vport_open(vport, false); unlock: idpf_vport_ctrl_unlock(netdev); @@ -2189,6 +2305,92 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu) } /** + * idpf_chk_tso_segment - Check skb is not using too many buffers + * @skb: send buffer + * @max_bufs: maximum number of buffers + * + * For TSO we need to count the TSO header and segment payload separately. As + * such we need to check cases where we have max_bufs-1 fragments or more as we + * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1 + * for the segment payload in the first descriptor, and another max_buf-1 for + * the fragments. + * + * Returns true if the packet needs to be software segmented by core stack. + */ +static bool idpf_chk_tso_segment(const struct sk_buff *skb, + unsigned int max_bufs) +{ + const struct skb_shared_info *shinfo = skb_shinfo(skb); + const skb_frag_t *frag, *stale; + int nr_frags, sum; + + /* no need to check if number of frags is less than max_bufs - 1 */ + nr_frags = shinfo->nr_frags; + if (nr_frags < (max_bufs - 1)) + return false; + + /* We need to walk through the list and validate that each group + * of max_bufs-2 fragments totals at least gso_size. + */ + nr_frags -= max_bufs - 2; + frag = &shinfo->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We use + * this as the worst case scenario in which the frag ahead of us only + * provides one byte which is why we are limited to max_bufs-2 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - shinfo->gso_size; + + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + for (stale = &shinfo->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + + sum += skb_frag_size(frag++); + + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > IDPF_TX_MAX_DESC_DATA) { + int align_pad = -(skb_frag_off(stale)) & + (IDPF_TX_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED; + stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED; + } while (stale_size > IDPF_TX_MAX_DESC_DATA); + } + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + if (!nr_frags--) + break; + + sum -= stale_size; + } + + return false; +} + +/** * idpf_features_check - Validate packet conforms to limits * @skb: skb buffer * @netdev: This port's netdev @@ -2209,12 +2411,15 @@ static netdev_features_t idpf_features_check(struct sk_buff *skb, if (skb->ip_summed != CHECKSUM_PARTIAL) return features; - /* We cannot support GSO if the MSS is going to be less than - * 88 bytes. If it is then we need to drop support for GSO. - */ - if (skb_is_gso(skb) && - (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS)) - features &= ~NETIF_F_GSO_MASK; + if (skb_is_gso(skb)) { + /* We cannot support GSO if the MSS is going to be less than + * 88 bytes. If it is then we need to drop support for GSO. + */ + if (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS) + features &= ~NETIF_F_GSO_MASK; + else if (idpf_chk_tso_segment(skb, np->tx_max_bufs)) + features &= ~NETIF_F_GSO_MASK; + } /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */ len = skb_network_offset(skb); @@ -2261,6 +2466,7 @@ static int idpf_set_mac(struct net_device *netdev, void *p) struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; struct sockaddr *addr = p; + u8 old_mac_addr[ETH_ALEN]; struct idpf_vport *vport; int err = 0; @@ -2284,17 +2490,19 @@ static int idpf_set_mac(struct net_device *netdev, void *p) if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) goto unlock_mutex; + ether_addr_copy(old_mac_addr, vport->default_mac_addr); + ether_addr_copy(vport->default_mac_addr, addr->sa_data); vport_config = vport->adapter->vport_config[vport->idx]; err = idpf_add_mac_filter(vport, np, addr->sa_data, false); if (err) { __idpf_del_mac_filter(vport_config, addr->sa_data); + ether_addr_copy(vport->default_mac_addr, netdev->dev_addr); goto unlock_mutex; } - if (is_valid_ether_addr(vport->default_mac_addr)) - idpf_del_mac_filter(vport, np, vport->default_mac_addr, false); + if (is_valid_ether_addr(old_mac_addr)) + __idpf_del_mac_filter(vport_config, old_mac_addr); - ether_addr_copy(vport->default_mac_addr, addr->sa_data); eth_hw_addr_set(netdev, addr->sa_data); unlock_mutex: @@ -2409,4 +2617,7 @@ static const struct net_device_ops idpf_netdev_ops = { .ndo_tx_timeout = idpf_tx_timeout, .ndo_hwtstamp_get = idpf_hwtstamp_get, .ndo_hwtstamp_set = idpf_hwtstamp_set, + .ndo_bpf = idpf_xdp, + .ndo_xdp_xmit = idpf_xdp_xmit, + .ndo_xsk_wakeup = idpf_xsk_wakeup, }; |