diff options
Diffstat (limited to 'drivers/net')
43 files changed, 264 insertions, 80 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f298b9b3eb77..b9a882f182d2 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3247,7 +3247,7 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, goto out; saddr = &combined->ip6.saddr; - daddr = &combined->ip6.saddr; + daddr = &combined->ip6.daddr; slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n", __func__, slave->dev->name, bond_slave_state(slave), diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c index ed3d0b8989a0..dc7192ecb001 100644 --- a/drivers/net/can/can327.c +++ b/drivers/net/can/can327.c @@ -796,9 +796,9 @@ static int can327_netdev_close(struct net_device *dev) netif_stop_queue(dev); - /* Give UART one final chance to flush. */ - clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags); - flush_work(&elm->tx_work); + /* We don't flush the UART TX queue here, as we want final stop + * commands (like the above dummy char) to be flushed out. + */ can_rx_offload_disable(&elm->offload); elm->can.state = CAN_STATE_STOPPED; @@ -1069,12 +1069,15 @@ static void can327_ldisc_close(struct tty_struct *tty) { struct can327 *elm = (struct can327 *)tty->disc_data; - /* unregister_netdev() calls .ndo_stop() so we don't have to. - * Our .ndo_stop() also flushes the TTY write wakeup handler, - * so we can safely set elm->tty = NULL after this. - */ + /* unregister_netdev() calls .ndo_stop() so we don't have to. */ unregister_candev(elm->dev); + /* Give UART one final chance to flush. + * No need to clear TTY_DO_WRITE_WAKEUP since .write_wakeup() is + * serialised against .close() and will not be called once we return. + */ + flush_work(&elm->tx_work); + /* Mark channel as dead */ spin_lock_bh(&elm->lock); tty->disc_data = NULL; diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index fbb34139daa1..f4db77007c13 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -864,12 +864,14 @@ static void slcan_close(struct tty_struct *tty) { struct slcan *sl = (struct slcan *)tty->disc_data; - /* unregister_netdev() calls .ndo_stop() so we don't have to. - * Our .ndo_stop() also flushes the TTY write wakeup handler, - * so we can safely set sl->tty = NULL after this. - */ unregister_candev(sl->dev); + /* + * The netdev needn't be UP (so .ndo_stop() is not called). Hence make + * sure this is not running before freeing it up. + */ + flush_work(&sl->tx_work); + /* Mark channel as dead */ spin_lock_bh(&sl->lock); tty->disc_data = NULL; diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c index 81b88e9e5bdc..42323f5e6f3a 100644 --- a/drivers/net/can/usb/esd_usb.c +++ b/drivers/net/can/usb/esd_usb.c @@ -234,6 +234,10 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv, u8 rxerr = msg->msg.rx.data[2]; u8 txerr = msg->msg.rx.data[3]; + netdev_dbg(priv->netdev, + "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n", + msg->msg.rx.dlc, state, ecc, rxerr, txerr); + skb = alloc_can_err_skb(priv->netdev, &cf); if (skb == NULL) { stats->rx_dropped++; @@ -260,6 +264,8 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv, break; default: priv->can.state = CAN_STATE_ERROR_ACTIVE; + txerr = 0; + rxerr = 0; break; } } else { diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 2479be3a1e35..937cb22cb3d4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -833,10 +833,13 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port, chip->info->ops->phylink_get_caps(chip, port, config); - /* Internal ports need GMII for PHYLIB */ - if (mv88e6xxx_phy_is_internal(ds, port)) + if (mv88e6xxx_phy_is_internal(ds, port)) { + __set_bit(PHY_INTERFACE_MODE_INTERNAL, + config->supported_interfaces); + /* Internal ports with no phy-mode need GMII for PHYLIB */ __set_bit(PHY_INTERFACE_MODE_GMII, config->supported_interfaces); + } } static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c index 10c6fea1227f..bdbbff2a7909 100644 --- a/drivers/net/dsa/sja1105/sja1105_devlink.c +++ b/drivers/net/dsa/sja1105/sja1105_devlink.c @@ -95,6 +95,8 @@ static int sja1105_setup_devlink_regions(struct dsa_switch *ds) if (IS_ERR(region)) { while (--i >= 0) dsa_devlink_region_destroy(priv->regions[i]); + + kfree(priv->regions); return PTR_ERR(region); } diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 412666111b0c..b70dcf32a26d 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -1038,7 +1038,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv) policing[bcast].sharindx = port; /* Only SJA1110 has multicast policers */ - if (mcast <= table->ops->max_entry_count) + if (mcast < table->ops->max_entry_count) policing[mcast].sharindx = port; } diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index e104fb02817d..aa0d2f3aaeaa 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -258,6 +258,7 @@ static int greth_init_rings(struct greth_private *greth) if (dma_mapping_error(greth->dev, dma_addr)) { if (netif_msg_ifup(greth)) dev_err(greth->dev, "Could not create initial DMA mapping\n"); + dev_kfree_skb(skb); goto cleanup; } greth->rx_skbuff[i] = skb; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 55dfdb34e37b..f4ca0c6c0f51 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -71,13 +71,14 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835 select MII select PHYLIB select FIXED_PHY select BCM7XXX_PHY select MDIO_BCM_UNIMAC select DIMLIB - select BROADCOM_PHY if (ARCH_BCM2835 && PTP_1588_CLOCK_OPTIONAL) + select BROADCOM_PHY if ARCH_BCM2835 help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 98f3dc460ca7..f2f95493ec89 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2239,7 +2239,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(netdev); if (err) { dev_err(dev, "Failed to register netdevice\n"); - goto err_unregister_interrupts; + goto err_destroy_workqueue; } nic->msg_enable = debug; @@ -2248,6 +2248,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_destroy_workqueue: + destroy_workqueue(nic->nicvf_rx_mode_wq); err_unregister_interrupts: nicvf_unregister_interrupts(nic); err_free_netdev: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index cacd454ac696..c39b866e2582 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -132,6 +132,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { dev_err(dev, "DMA mapping failed\n"); + kfree(cmd_buff); return -EFAULT; } @@ -142,6 +143,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block, DMA_TO_DEVICE); if (err) { dev_err(dev, "dpsw_acl_add_entry() failed %d\n", err); + kfree(cmd_buff); return err; } @@ -172,6 +174,7 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, acl_entry_cfg->key_iova))) { dev_err(dev, "DMA mapping failed\n"); + kfree(cmd_buff); return -EFAULT; } @@ -182,6 +185,7 @@ dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block, DMA_TO_DEVICE); if (err) { dev_err(dev, "dpsw_acl_remove_entry() failed %d\n", err); + kfree(cmd_buff); return err; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2ca2b61b451f..23e1a94b9ce4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1220,7 +1220,8 @@ fec_restart(struct net_device *ndev) writel(0, fep->hwp + FEC_IMASK); /* Init the interrupt coalescing */ - fec_enet_itr_coal_set(ndev); + if (fep->quirks & FEC_QUIRK_HAS_COALESCE) + fec_enet_itr_coal_set(ndev); } static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 93846bace028..ce2571c16e43 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -283,7 +283,7 @@ static int hisi_femac_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&priv->napi, skb); dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += len; next: pos = (pos + 1) % rxq->num; if (rx_pkts_num >= limit) diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index ffcf797dfa90..f867e9531117 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -550,7 +550,7 @@ static int hix5hd2_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); napi_gro_receive(&priv->napi, skb); dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + dev->stats.rx_bytes += len; next: pos = dma_ring_incr(pos, RX_DESC_NUM); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 49e926959ad3..55cf2f62bb30 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5936,9 +5936,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ e1000_maybe_stop_tx(tx_ring, - (MAX_SKB_FRAGS * + ((MAX_SKB_FRAGS + 1) * DIV_ROUND_UP(PAGE_SIZE, - adapter->tx_fifo_limit) + 2)); + adapter->tx_fifo_limit) + 4)); if (!netdev_xmit_more() || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4a6a6e48c615..f6fa63e4253c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -4464,11 +4464,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, return -EOPNOTSUPP; /* First 4 bytes of L4 header */ - if (usr_ip4_spec->l4_4_bytes == htonl(0xFFFFFFFF)) - new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; - else if (!usr_ip4_spec->l4_4_bytes) - new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); - else + if (usr_ip4_spec->l4_4_bytes) return -EOPNOTSUPP; /* Filtering on Type of Service is not supported. */ @@ -4507,11 +4503,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi, else return -EOPNOTSUPP; - if (usr_ip6_spec->l4_4_bytes == htonl(0xFFFFFFFF)) - new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK; - else if (!usr_ip6_spec->l4_4_bytes) - new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK); - else + if (usr_ip6_spec->l4_4_bytes) return -EOPNOTSUPP; /* Filtering on Traffic class is not supported. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b3cb587a2032..6416322d7c18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10655,6 +10655,21 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) } /** + * i40e_clean_xps_state - clean xps state for every tx_ring + * @vsi: ptr to the VSI + **/ +static void i40e_clean_xps_state(struct i40e_vsi *vsi) +{ + int i; + + if (vsi->tx_rings) + for (i = 0; i < vsi->num_queue_pairs; i++) + if (vsi->tx_rings[i]) + clear_bit(__I40E_TX_XPS_INIT_DONE, + vsi->tx_rings[i]->state); +} + +/** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure * @@ -10678,8 +10693,10 @@ static void i40e_prep_for_reset(struct i40e_pf *pf) i40e_pf_quiesce_all_vsi(pf); for (v = 0; v < pf->num_alloc_vsi; v++) { - if (pf->vsi[v]) + if (pf->vsi[v]) { + i40e_clean_xps_state(pf->vsi[v]); pf->vsi[v]->seid = 0; + } } i40e_shutdown_adminq(&pf->hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 72ddcefc45b1..635f93d60318 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1578,6 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) i40e_cleanup_reset_vf(vf); i40e_flush(hw); + usleep_range(20000, 40000); clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states); return true; @@ -1701,6 +1702,7 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) } i40e_flush(hw); + usleep_range(20000, 40000); clear_bit(__I40E_VF_DISABLE, pf->state); return true; diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index e5f3e7680dc6..ff911af16a4b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -1413,6 +1413,8 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) *data = 1; return -1; } + wr32(E1000_IVAR_MISC, E1000_IVAR_VALID << 8); + wr32(E1000_EIMS, BIT(0)); } else if (adapter->flags & IGB_FLAG_HAS_MSI) { shared_int = false; if (request_irq(irq, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index ff3e361e06e7..5aefaaff0871 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4271,7 +4271,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) /* Use the cpu associated to the rxq when it is online, in all * the other cases, use the cpu 0 which can't be offline. */ - if (cpu_online(pp->rxq_def)) + if (pp->rxq_def < nr_cpu_ids && cpu_online(pp->rxq_def)) elected_cpu = pp->rxq_def; max_cpu = num_present_cpus(); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index e64318c110fd..6a01ab1a6e6f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -1134,7 +1134,12 @@ int otx2_init_tc(struct otx2_nic *nic) return err; tc->flow_ht_params = tc_flow_ht_params; - return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); + err = rhashtable_init(&tc->flow_table, &tc->flow_ht_params); + if (err) { + kfree(tc->tc_entries_bitmap); + tc->tc_entries_bitmap = NULL; + } + return err; } EXPORT_SYMBOL(otx2_init_tc); diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 81a8ccca7e5e..5693784eec5b 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -359,7 +359,7 @@ static int regmap_encx24j600_phy_reg_read(void *context, unsigned int reg, goto err_out; usleep_range(26, 100); - while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) && + while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) && (mistat & BUSY)) cpu_relax(); @@ -397,7 +397,7 @@ static int regmap_encx24j600_phy_reg_write(void *context, unsigned int reg, goto err_out; usleep_range(26, 100); - while ((ret = regmap_read(ctx->regmap, MISTAT, &mistat) != 0) && + while (((ret = regmap_read(ctx->regmap, MISTAT, &mistat)) == 0) && (mistat & BUSY)) cpu_relax(); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c index 66360c8c5a38..141897dfe388 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -317,7 +317,7 @@ int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry); db_hw = &next_dcb_hw->db[0]; if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) - tx->dropped++; + return -EINVAL; db = list_first_entry(&tx->db_list, struct sparx5_db, list); list_move_tail(&db->list, &tx->db_list); next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index eeac04b84638..b6bbb3c9bd7a 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -887,6 +887,8 @@ static int mchp_sparx5_probe(struct platform_device *pdev) cleanup_ports: sparx5_cleanup_ports(sparx5); + if (sparx5->mact_queue) + destroy_workqueue(sparx5->mact_queue); cleanup_config: kfree(configs); cleanup_pnode: @@ -911,6 +913,7 @@ static int mchp_sparx5_remove(struct platform_device *pdev) sparx5_cleanup_ports(sparx5); /* Unregister netdevs */ sparx5_unregister_notifier_blocks(sparx5); + destroy_workqueue(sparx5->mact_queue); return 0; } diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 83c16ca5b30f..6db6ac6a3bbc 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -234,9 +234,8 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) sparx5_set_port_ifh(ifh, port->portno); if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { - ret = sparx5_ptp_txtstamp_request(port, skb); - if (ret) - return ret; + if (sparx5_ptp_txtstamp_request(port, skb) < 0) + return NETDEV_TX_BUSY; sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op); sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type); @@ -250,23 +249,31 @@ netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) else ret = sparx5_inject(sparx5, ifh, skb, dev); - if (ret == NETDEV_TX_OK) { - stats->tx_bytes += skb->len; - stats->tx_packets++; + if (ret == -EBUSY) + goto busy; + if (ret < 0) + goto drop; - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) - return ret; + stats->tx_bytes += skb->len; + stats->tx_packets++; + sparx5->tx.packets++; - dev_kfree_skb_any(skb); - } else { - stats->tx_dropped++; + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + return NETDEV_TX_OK; - if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) - sparx5_ptp_txtstamp_release(port, skb); - } - return ret; + dev_consume_skb_any(skb); + return NETDEV_TX_OK; +drop: + stats->tx_dropped++; + sparx5->tx.dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +busy: + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && + SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) + sparx5_ptp_txtstamp_release(port, skb); + return NETDEV_TX_BUSY; } static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr) diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h index 4a6efe6ada08..65c24ee49efd 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -498,7 +498,14 @@ enum { #define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) -#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT +/* Advertise to the NIC firmware: the NAPI work_done variable race is fixed, + * so the driver is able to reliably support features like busy_poll. + */ +#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) + +#define GDMA_DRV_CAP_FLAGS1 \ + (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ + GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX) #define GDMA_DRV_CAP_FLAGS2 0 diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 9259a74eca40..27a0f3af8aab 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1303,10 +1303,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq) xdp_do_flush(); } -static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) +static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue) { struct mana_cq *cq = context; u8 arm_bit; + int w; WARN_ON_ONCE(cq->gdma_cq != gdma_queue); @@ -1315,26 +1316,31 @@ static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) else mana_poll_tx_cq(cq); - if (cq->work_done < cq->budget && - napi_complete_done(&cq->napi, cq->work_done)) { + w = cq->work_done; + + if (w < cq->budget && + napi_complete_done(&cq->napi, w)) { arm_bit = SET_ARM_BIT; } else { arm_bit = 0; } mana_gd_ring_cq(gdma_queue, arm_bit); + + return w; } static int mana_poll(struct napi_struct *napi, int budget) { struct mana_cq *cq = container_of(napi, struct mana_cq, napi); + int w; cq->work_done = 0; cq->budget = budget; - mana_cq_handler(cq, cq->gdma_cq); + w = mana_cq_handler(cq, cq->gdma_cq); - return min(cq->work_done, budget); + return min(w, budget); } static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue) diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c index 2b427d8ccb2f..ccacb6ab6c39 100644 --- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c @@ -282,7 +282,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev) dma_len = skb_headlen(skb); if (skb_is_gso(skb)) type = NFDK_DESC_TX_TYPE_TSO; - else if (!nr_frags && dma_len < NFDK_TX_MAX_DATA_PER_HEAD) + else if (!nr_frags && dma_len <= NFDK_TX_MAX_DATA_PER_HEAD) type = NFDK_DESC_TX_TYPE_SIMPLE; else type = NFDK_DESC_TX_TYPE_GATHER; @@ -927,7 +927,7 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, dma_len = pkt_len; dma_addr = rxbuf->dma_addr + dma_off; - if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD) + if (dma_len <= NFDK_TX_MAX_DATA_PER_HEAD) type = NFDK_DESC_TX_TYPE_SIMPLE; else type = NFDK_DESC_TX_TYPE_GATHER; @@ -1325,7 +1325,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, txbuf = &tx_ring->ktxbufs[wr_idx]; dma_len = skb_headlen(skb); - if (dma_len < NFDK_TX_MAX_DATA_PER_HEAD) + if (dma_len <= NFDK_TX_MAX_DATA_PER_HEAD) type = NFDK_DESC_TX_TYPE_SIMPLE; else type = NFDK_DESC_TX_TYPE_GATHER; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 6bc923326268..33f723a9f471 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -841,7 +841,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) napi_gro_receive(&priv->napi[q], priv->rx_1st_skb); stats->rx_packets++; - stats->rx_bytes += priv->rx_1st_skb->len; + stats->rx_bytes += pkt_len; break; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 50f6b4a14be4..eb6d9cd8e93f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -108,10 +108,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); - axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); - axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); - axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); - axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); + axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe"); + axi->axi_fb = of_property_read_bool(np, "snps,fb"); + axi->axi_mb = of_property_read_bool(np, "snps,mb"); + axi->axi_rb = of_property_read_bool(np, "snps,rb"); if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt)) axi->axi_wr_osr_lmt = 1; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index d04a239c658e..b3b0ba842541 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1454,7 +1454,7 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy if (speed == SPEED_1000) mac_control |= CPSW_SL_CTL_GIG; - if (speed == SPEED_10 && interface == PHY_INTERFACE_MODE_RGMII) + if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface)) /* Can be used with in band mode only */ mac_control |= CPSW_SL_CTL_EXT_EN; if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 450b16ad40a4..e1a569b99e4a 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -885,7 +885,7 @@ static int ca8210_spi_transfer( dev_dbg(&spi->dev, "%s called\n", __func__); - cas_ctl = kmalloc(sizeof(*cas_ctl), GFP_ATOMIC); + cas_ctl = kzalloc(sizeof(*cas_ctl), GFP_ATOMIC); if (!cas_ctl) return -ENOMEM; diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index c69b87d3837d..edc769daad07 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -970,7 +970,7 @@ static int cc2520_hw_init(struct cc2520_private *priv) if (timeout-- <= 0) { dev_err(&priv->spi->dev, "oscillator start failed!\n"); - return ret; + return -ETIMEDOUT; } udelay(1); } while (!(status & CC2520_STATUS_XOSC32M_STABLE)); diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index f41f67b583db..2fbac51b9b19 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3698,6 +3698,7 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, + [IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 }, }; static void macsec_free_netdev(struct net_device *dev) diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index eb344f6d4a7b..b782c35c4ac1 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -98,6 +98,7 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, */ rc = phy_device_register(phy); if (rc) { + device_set_node(&phy->mdio.dev, NULL); fwnode_handle_put(child); return rc; } @@ -153,7 +154,8 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus, /* All data is now stored in the phy struct, so register it */ rc = phy_device_register(phy); if (rc) { - fwnode_handle_put(phy->mdio.dev.fwnode); + phy->mdio.dev.fwnode = NULL; + fwnode_handle_put(child); goto clean_phy; } } else if (is_of_node(child)) { diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c index 796e9c7857d0..510822d6d0d9 100644 --- a/drivers/net/mdio/of_mdio.c +++ b/drivers/net/mdio/of_mdio.c @@ -68,8 +68,9 @@ static int of_mdiobus_register_device(struct mii_bus *mdio, /* All data is now stored in the mdiodev struct; register it. */ rc = mdio_device_register(mdiodev); if (rc) { + device_set_node(&mdiodev->dev, NULL); + fwnode_handle_put(fwnode); mdio_device_free(mdiodev); - of_node_put(child); return rc; } diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index 250742ffdfd9..044828d081d2 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -21,6 +21,7 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/unistd.h> +#include <linux/property.h> void mdio_device_free(struct mdio_device *mdiodev) { @@ -30,6 +31,7 @@ EXPORT_SYMBOL(mdio_device_free); static void mdio_device_release(struct device *dev) { + fwnode_handle_put(dev->fwnode); kfree(to_mdio_device(dev)); } diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c index 24bae27eedef..cae24091fb6f 100644 --- a/drivers/net/phy/mxl-gpy.c +++ b/drivers/net/phy/mxl-gpy.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/bitfield.h> #include <linux/hwmon.h> +#include <linux/mutex.h> #include <linux/phy.h> #include <linux/polynomial.h> #include <linux/netdevice.h> @@ -70,6 +71,14 @@ #define VPSPEC1_TEMP_STA 0x0E #define VPSPEC1_TEMP_STA_DATA GENMASK(9, 0) +/* Mailbox */ +#define VSPEC1_MBOX_DATA 0x5 +#define VSPEC1_MBOX_ADDRLO 0x6 +#define VSPEC1_MBOX_CMD 0x7 +#define VSPEC1_MBOX_CMD_ADDRHI GENMASK(7, 0) +#define VSPEC1_MBOX_CMD_RD (0 << 8) +#define VSPEC1_MBOX_CMD_READY BIT(15) + /* WoL */ #define VPSPEC2_WOL_CTL 0x0E06 #define VPSPEC2_WOL_AD01 0x0E08 @@ -77,7 +86,13 @@ #define VPSPEC2_WOL_AD45 0x0E0A #define WOL_EN BIT(0) +/* Internal registers, access via mbox */ +#define REG_GPIO0_OUT 0xd3ce00 + struct gpy_priv { + /* serialize mailbox acesses */ + struct mutex mbox_lock; + u8 fw_major; u8 fw_minor; }; @@ -187,6 +202,45 @@ static int gpy_hwmon_register(struct phy_device *phydev) } #endif +static int gpy_mbox_read(struct phy_device *phydev, u32 addr) +{ + struct gpy_priv *priv = phydev->priv; + int val, ret; + u16 cmd; + + mutex_lock(&priv->mbox_lock); + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_MBOX_ADDRLO, + addr); + if (ret) + goto out; + + cmd = VSPEC1_MBOX_CMD_RD; + cmd |= FIELD_PREP(VSPEC1_MBOX_CMD_ADDRHI, addr >> 16); + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_MBOX_CMD, cmd); + if (ret) + goto out; + + /* The mbox read is used in the interrupt workaround. It was observed + * that a read might take up to 2.5ms. This is also the time for which + * the interrupt line is stuck low. To be on the safe side, poll the + * ready bit for 10ms. + */ + ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + VSPEC1_MBOX_CMD, val, + (val & VSPEC1_MBOX_CMD_READY), + 500, 10000, false); + if (ret) + goto out; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VSPEC1_MBOX_DATA); + +out: + mutex_unlock(&priv->mbox_lock); + return ret; +} + static int gpy_config_init(struct phy_device *phydev) { int ret; @@ -201,6 +255,13 @@ static int gpy_config_init(struct phy_device *phydev) return ret < 0 ? ret : 0; } +static bool gpy_has_broken_mdint(struct phy_device *phydev) +{ + /* At least these PHYs are known to have broken interrupt handling */ + return phydev->drv->phy_id == PHY_ID_GPY215B || + phydev->drv->phy_id == PHY_ID_GPY215C; +} + static int gpy_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; @@ -218,6 +279,7 @@ static int gpy_probe(struct phy_device *phydev) if (!priv) return -ENOMEM; phydev->priv = priv; + mutex_init(&priv->mbox_lock); fw_version = phy_read(phydev, PHY_FWV); if (fw_version < 0) @@ -492,6 +554,29 @@ static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev) if (!(reg & PHY_IMASK_MASK)) return IRQ_NONE; + /* The PHY might leave the interrupt line asserted even after PHY_ISTAT + * is read. To avoid interrupt storms, delay the interrupt handling as + * long as the PHY drives the interrupt line. An internal bus read will + * stall as long as the interrupt line is asserted, thus just read a + * random register here. + * Because we cannot access the internal bus at all while the interrupt + * is driven by the PHY, there is no way to make the interrupt line + * unstuck (e.g. by changing the pinmux to GPIO input) during that time + * frame. Therefore, polling is the best we can do and won't do any more + * harm. + * It was observed that this bug happens on link state and link speed + * changes on a GPY215B and GYP215C independent of the firmware version + * (which doesn't mean that this list is exhaustive). + */ + if (gpy_has_broken_mdint(phydev) && + (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC))) { + reg = gpy_mbox_read(phydev, REG_GPIO0_OUT); + if (reg < 0) { + phy_error(phydev); + return IRQ_NONE; + } + } + phy_trigger_machine(phydev); return IRQ_HANDLED; diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c index c8791e9b451d..40ce8abe6999 100644 --- a/drivers/net/plip/plip.c +++ b/drivers/net/plip/plip.c @@ -450,12 +450,12 @@ plip_bh_timeout_error(struct net_device *dev, struct net_local *nl, } rcv->state = PLIP_PK_DONE; if (rcv->skb) { - kfree_skb(rcv->skb); + dev_kfree_skb_irq(rcv->skb); rcv->skb = NULL; } snd->state = PLIP_PK_DONE; if (snd->skb) { - dev_kfree_skb(snd->skb); + dev_consume_skb_irq(snd->skb); snd->skb = NULL; } spin_unlock_irq(&nl->lock); diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index a52ee2bf5575..6312f67f260e 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -914,6 +914,7 @@ static int tbnet_open(struct net_device *dev) eof_mask, tbnet_start_poll, net); if (!ring) { netdev_err(dev, "failed to allocate Rx ring\n"); + tb_xdomain_release_out_hopid(xd, hopid); tb_ring_free(net->tx_ring.ring); net->tx_ring.ring = NULL; return -ENOMEM; diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d3e7b27eb933..6f1e560fb15c 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -75,8 +75,14 @@ vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) for (i = 0; i < adapter->intr.num_intrs; i++) vmxnet3_enable_intr(adapter, i); - adapter->shared->devRead.intrConf.intrCtrl &= + if (!VMXNET3_VERSION_GE_6(adapter) || + !adapter->queuesExtEnabled) { + adapter->shared->devRead.intrConf.intrCtrl &= + cpu_to_le32(~VMXNET3_IC_DISABLE_ALL); + } else { + adapter->shared->devReadExt.intrConfExt.intrCtrl &= cpu_to_le32(~VMXNET3_IC_DISABLE_ALL); + } } @@ -85,8 +91,14 @@ vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) { int i; - adapter->shared->devRead.intrConf.intrCtrl |= + if (!VMXNET3_VERSION_GE_6(adapter) || + !adapter->queuesExtEnabled) { + adapter->shared->devRead.intrConf.intrCtrl |= + cpu_to_le32(VMXNET3_IC_DISABLE_ALL); + } else { + adapter->shared->devReadExt.intrConfExt.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL); + } for (i = 0; i < adapter->intr.num_intrs; i++) vmxnet3_disable_intr(adapter, i); } @@ -1396,6 +1408,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, }; u32 num_pkts = 0; bool skip_page_frags = false; + bool encap_lro = false; struct Vmxnet3_RxCompDesc *rcd; struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; u16 segCnt = 0, mss = 0; @@ -1556,13 +1569,18 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, if (VMXNET3_VERSION_GE_2(adapter) && rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) { struct Vmxnet3_RxCompDescExt *rcdlro; + union Vmxnet3_GenericDesc *gdesc; + rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; + gdesc = (union Vmxnet3_GenericDesc *)rcd; segCnt = rcdlro->segCnt; WARN_ON_ONCE(segCnt == 0); mss = rcdlro->mss; if (unlikely(segCnt <= 1)) segCnt = 0; + encap_lro = (le32_to_cpu(gdesc->dword[0]) & + (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)); } else { segCnt = 0; } @@ -1630,7 +1648,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, vmxnet3_rx_csum(adapter, skb, (union Vmxnet3_GenericDesc *)rcd); skb->protocol = eth_type_trans(skb, adapter->netdev); - if (!rcd->tcp || + if ((!rcd->tcp && !encap_lro) || !(adapter->netdev->features & NETIF_F_LRO)) goto not_lro; @@ -1639,7 +1657,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, SKB_GSO_TCPV4 : SKB_GSO_TCPV6; skb_shinfo(skb)->gso_size = mss; skb_shinfo(skb)->gso_segs = segCnt; - } else if (segCnt != 0 || skb->len > mtu) { + } else if ((segCnt != 0 || skb->len > mtu) && !encap_lro) { u32 hlen; hlen = vmxnet3_get_hdr_len(adapter, skb, @@ -1668,6 +1686,7 @@ not_lro: napi_gro_receive(&rq->napi, skb); ctx->skb = NULL; + encap_lro = false; num_pkts++; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c index 9c7a9a2a1f25..fc928b298a98 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c @@ -332,6 +332,7 @@ struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg, if (!ipc_mux->ul_adb.pp_qlt[i]) { for (j = i - 1; j >= 0; j--) kfree(ipc_mux->ul_adb.pp_qlt[j]); + kfree(ipc_mux); return NULL; } } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 9af2b027c19c..dc404e05970c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1862,6 +1862,12 @@ static int netfront_resume(struct xenbus_device *dev) netif_tx_unlock_bh(info->netdev); xennet_disconnect_backend(info); + + rtnl_lock(); + if (info->queues) + xennet_destroy_queues(info); + rtnl_unlock(); + return 0; } |