diff options
Diffstat (limited to 'drivers/net')
33 files changed, 419 insertions, 306 deletions
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 23b45da784cb..b89acaee12d4 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -354,10 +354,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port, /* Locate the first rule available */ if (fs->location == RX_CLS_LOC_ANY) rule_index = find_first_zero_bit(priv->cfp.used, - bcm_sf2_cfp_rule_size(priv)); + priv->num_cfp_rules); else rule_index = fs->location; + if (rule_index > bcm_sf2_cfp_rule_size(priv)) + return -ENOSPC; + layout = &udf_tcpip4_layout; /* We only use one UDF slice for now */ slice_num = bcm_sf2_get_slice_number(layout, 0); @@ -562,19 +565,21 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, * first half because the HW search is by incrementing addresses. */ if (fs->location == RX_CLS_LOC_ANY) - rule_index[0] = find_first_zero_bit(priv->cfp.used, - bcm_sf2_cfp_rule_size(priv)); + rule_index[1] = find_first_zero_bit(priv->cfp.used, + priv->num_cfp_rules); else - rule_index[0] = fs->location; + rule_index[1] = fs->location; + if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) + return -ENOSPC; /* Flag it as used (cleared on error path) such that we can immediately * obtain a second one to chain from. */ - set_bit(rule_index[0], priv->cfp.used); + set_bit(rule_index[1], priv->cfp.used); - rule_index[1] = find_first_zero_bit(priv->cfp.used, - bcm_sf2_cfp_rule_size(priv)); - if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) { + rule_index[0] = find_first_zero_bit(priv->cfp.used, + priv->num_cfp_rules); + if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) { ret = -ENOSPC; goto out_err; } @@ -712,14 +717,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, /* Flag the second half rule as being used now, return it as the * location, and flag it as unique while dumping rules */ - set_bit(rule_index[1], priv->cfp.used); + set_bit(rule_index[0], priv->cfp.used); set_bit(rule_index[1], priv->cfp.unique); fs->location = rule_index[1]; return ret; out_err: - clear_bit(rule_index[0], priv->cfp.used); + clear_bit(rule_index[1], priv->cfp.used); return ret; } @@ -785,10 +790,6 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, int ret; u32 reg; - /* Refuse deletion of unused rules, and the default reserved rule */ - if (!test_bit(loc, priv->cfp.used) || loc == 0) - return -EINVAL; - /* Indicate which rule we want to read */ bcm_sf2_cfp_rule_addr_set(priv, loc); @@ -826,6 +827,13 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 next_loc = 0; int ret; + /* Refuse deleting unused rules, and those that are not unique since + * that could leave IPv6 rules with one of the chained rule in the + * table. + */ + if (!test_bit(loc, priv->cfp.unique) || loc == 0) + return -EINVAL; + ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); if (ret) return ret; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 36c8950dbd2d..176861bd2252 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1212,9 +1212,9 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, vp->mii.reg_num_mask = 0x1f; /* Makes sure rings are at least 16 byte aligned. */ - vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - &vp->rx_ring_dma); + &vp->rx_ring_dma, GFP_KERNEL); retval = -ENOMEM; if (!vp->rx_ring) goto free_device; @@ -1476,11 +1476,10 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, return 0; free_ring: - pci_free_consistent(pdev, - sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - vp->rx_ring, - vp->rx_ring_dma); + dma_free_coherent(&pdev->dev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, vp->rx_ring_dma); free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); @@ -1751,9 +1750,9 @@ vortex_open(struct net_device *dev) break; /* Bad news! */ skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ - dma = pci_map_single(VORTEX_PCI(vp), skb->data, - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma)) + dma = dma_map_single(vp->gendev, skb->data, + PKT_BUF_SZ, DMA_FROM_DEVICE); + if (dma_mapping_error(vp->gendev, dma)) break; vp->rx_ring[i].addr = cpu_to_le32(dma); } @@ -2067,9 +2066,9 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) if (vp->bus_master) { /* Set the bus-master controller to transfer the packet. */ int len = (skb->len + 3) & ~3; - vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, - PCI_DMA_TODEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) { + vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) { dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; @@ -2168,9 +2167,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); if (!skb_shinfo(skb)->nr_frags) { - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, - PCI_DMA_TODEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(vp->gendev, dma_addr)) goto out_dma_err; vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); @@ -2178,9 +2177,9 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } else { int i; - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, - skb_headlen(skb), PCI_DMA_TODEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + dma_addr = dma_map_single(vp->gendev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(vp->gendev, dma_addr)) goto out_dma_err; vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr); @@ -2189,21 +2188,21 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag, + dma_addr = skb_frag_dma_map(vp->gendev, frag, 0, frag->size, DMA_TO_DEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) { + if (dma_mapping_error(vp->gendev, dma_addr)) { for(i = i-1; i >= 0; i--) - dma_unmap_page(&VORTEX_PCI(vp)->dev, + dma_unmap_page(vp->gendev, le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr), le32_to_cpu(vp->tx_ring[entry].frag[i+1].length), DMA_TO_DEVICE); - pci_unmap_single(VORTEX_PCI(vp), + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[entry].frag[0].addr), le32_to_cpu(vp->tx_ring[entry].frag[0].length), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); goto out_dma_err; } @@ -2218,8 +2217,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } } #else - dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) + dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(vp->gendev, dma_addr)) goto out_dma_err; vp->tx_ring[entry].addr = cpu_to_le32(dma_addr); vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); @@ -2254,7 +2253,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) out: return NETDEV_TX_OK; out_dma_err: - dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n"); + dev_err(vp->gendev, "Error mapping dma buffer\n"); goto out; } @@ -2322,7 +2321,7 @@ vortex_interrupt(int irq, void *dev_id) if (status & DMADone) { if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ - pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); + dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE); pkts_compl++; bytes_compl += vp->tx_skb->len; dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ @@ -2459,19 +2458,19 @@ boomerang_interrupt(int irq, void *dev_id) struct sk_buff *skb = vp->tx_skbuff[entry]; #if DO_ZEROCOPY int i; - pci_unmap_single(VORTEX_PCI(vp), + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[entry].frag[0].addr), le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) - pci_unmap_page(VORTEX_PCI(vp), + dma_unmap_page(vp->gendev, le32_to_cpu(vp->tx_ring[entry].frag[i].addr), le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); #else - pci_unmap_single(VORTEX_PCI(vp), - le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(vp->gendev, + le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE); #endif pkts_compl++; bytes_compl += skb->len; @@ -2561,14 +2560,14 @@ static int vortex_rx(struct net_device *dev) /* 'skb_put()' points to the start of sk_buff data area. */ if (vp->bus_master && ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { - dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), - pkt_len, PCI_DMA_FROMDEVICE); + dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len), + pkt_len, DMA_FROM_DEVICE); iowrite32(dma, ioaddr + Wn7_MasterAddr); iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); iowrite16(StartDMAUp, ioaddr + EL3_CMD); while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) ; - pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); + dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE); } else { ioread32_rep(ioaddr + RX_FIFO, skb_put(skb, pkt_len), @@ -2635,11 +2634,11 @@ boomerang_rx(struct net_device *dev) if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ - pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); /* 'skb_put()' points to the start of sk_buff data area. */ skb_put_data(skb, vp->rx_skbuff[entry]->data, pkt_len); - pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); vp->rx_copy++; } else { /* Pre-allocate the replacement skb. If it or its @@ -2651,9 +2650,9 @@ boomerang_rx(struct net_device *dev) dev->stats.rx_dropped++; goto clear_complete; } - newdma = pci_map_single(VORTEX_PCI(vp), newskb->data, - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) { + newdma = dma_map_single(vp->gendev, newskb->data, + PKT_BUF_SZ, DMA_FROM_DEVICE); + if (dma_mapping_error(vp->gendev, newdma)) { dev->stats.rx_dropped++; consume_skb(newskb); goto clear_complete; @@ -2664,7 +2663,7 @@ boomerang_rx(struct net_device *dev) vp->rx_skbuff[entry] = newskb; vp->rx_ring[entry].addr = cpu_to_le32(newdma); skb_put(skb, pkt_len); - pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE); vp->rx_nocopy++; } skb->protocol = eth_type_trans(skb, dev); @@ -2761,8 +2760,8 @@ vortex_close(struct net_device *dev) if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ for (i = 0; i < RX_RING_SIZE; i++) if (vp->rx_skbuff[i]) { - pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr), + PKT_BUF_SZ, DMA_FROM_DEVICE); dev_kfree_skb(vp->rx_skbuff[i]); vp->rx_skbuff[i] = NULL; } @@ -2775,12 +2774,12 @@ vortex_close(struct net_device *dev) int k; for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) - pci_unmap_single(VORTEX_PCI(vp), + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].frag[k].addr), le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); #else - pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE); #endif dev_kfree_skb(skb); vp->tx_skbuff[i] = NULL; @@ -3288,11 +3287,10 @@ static void vortex_remove_one(struct pci_dev *pdev) pci_iounmap(pdev, vp->ioaddr); - pci_free_consistent(pdev, - sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - vp->rx_ring, - vp->rx_ring_dma); + dma_free_coherent(&pdev->dev, + sizeof(struct boom_rx_desc) * RX_RING_SIZE + + sizeof(struct boom_tx_desc) * TX_RING_SIZE, + vp->rx_ring, vp->rx_ring_dma); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index ac99d089ac72..1c97e39b478e 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -164,7 +164,9 @@ bad_clone_list[] __initdata = { #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ -#if defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ +#if defined(CONFIG_MACH_TX49XX) +# define DCR_VAL 0x48 /* 8-bit mode */ +#elif defined(CONFIG_ATARI) /* 8-bit mode on Atari, normal on Q40 */ # define DCR_VAL (MACH_IS_ATARI ? 0x48 : 0x49) #else # define DCR_VAL 0x49 diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index a561705f232c..be198cc0b10c 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1552,22 +1552,26 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) if (!ioaddr) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("card has no PCI IO resources, aborting\n"); - return -ENODEV; + err = -ENODEV; + goto err_disable_dev; } err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK); if (err) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("architecture does not support 32bit PCI busmaster DMA\n"); - return err; + goto err_disable_dev; } if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) { if (pcnet32_debug & NETIF_MSG_PROBE) pr_err("io address range already allocated\n"); - return -EBUSY; + err = -EBUSY; + goto err_disable_dev; } err = pcnet32_probe1(ioaddr, 1, pdev); + +err_disable_dev: if (err < 0) pci_disable_device(pdev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index b57acb8dc35b..dc25066c59a1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -419,15 +419,15 @@ static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ - {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ - {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ - {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ - {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ - {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ - {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */ - {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */ - {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ - {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ + {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */ + {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */ + {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */ + {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */ + {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */ + {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */ + {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */ + {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */ + {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */ }; static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { @@ -444,16 +444,6 @@ static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = { {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */ - {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */ - {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */ - {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */ - {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */ - {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */ - {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */ - {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */ - {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */ - {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */ - {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */ }; static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index db92f1858060..b76447baccaf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -836,7 +836,7 @@ bool is_filter_exact_match(struct adapter *adap, { struct tp_params *tp = &adap->params.tp; u64 hash_filter_mask = tp->hash_filter_mask; - u32 mask; + u64 ntuple_mask = 0; if (!is_hashfilter(adap)) return false; @@ -865,73 +865,45 @@ bool is_filter_exact_match(struct adapter *adap, if (!fs->val.fport || fs->mask.fport != 0xffff) return false; - if (tp->fcoe_shift >= 0) { - mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W; - if (mask && !fs->mask.fcoe) - return false; - } + /* calculate tuple mask and compare with mask configured in hw */ + if (tp->fcoe_shift >= 0) + ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift; - if (tp->port_shift >= 0) { - mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W; - if (mask && !fs->mask.iport) - return false; - } + if (tp->port_shift >= 0) + ntuple_mask |= (u64)fs->mask.iport << tp->port_shift; if (tp->vnic_shift >= 0) { - mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W; - - if ((adap->params.tp.ingress_config & VNIC_F)) { - if (mask && !fs->mask.pfvf_vld) - return false; - } else { - if (mask && !fs->mask.ovlan_vld) - return false; - } + if ((adap->params.tp.ingress_config & VNIC_F)) + ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift; + else + ntuple_mask |= (u64)fs->mask.ovlan_vld << + tp->vnic_shift; } - if (tp->vlan_shift >= 0) { - mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W; - if (mask && !fs->mask.ivlan) - return false; - } + if (tp->vlan_shift >= 0) + ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift; - if (tp->tos_shift >= 0) { - mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W; - if (mask && !fs->mask.tos) - return false; - } + if (tp->tos_shift >= 0) + ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift; - if (tp->protocol_shift >= 0) { - mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W; - if (mask && !fs->mask.proto) - return false; - } + if (tp->protocol_shift >= 0) + ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift; - if (tp->ethertype_shift >= 0) { - mask = (hash_filter_mask >> tp->ethertype_shift) & - FT_ETHERTYPE_W; - if (mask && !fs->mask.ethtype) - return false; - } + if (tp->ethertype_shift >= 0) + ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift; - if (tp->macmatch_shift >= 0) { - mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W; - if (mask && !fs->mask.macidx) - return false; - } + if (tp->macmatch_shift >= 0) + ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift; + + if (tp->matchtype_shift >= 0) + ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift; + + if (tp->frag_shift >= 0) + ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift; + + if (ntuple_mask != hash_filter_mask) + return false; - if (tp->matchtype_shift >= 0) { - mask = (hash_filter_mask >> tp->matchtype_shift) & - FT_MPSHITTYPE_W; - if (mask && !fs->mask.matchtype) - return false; - } - if (tp->frag_shift >= 0) { - mask = (hash_filter_mask >> tp->frag_shift) & - FT_FRAGMENTATION_W; - if (mask && !fs->mask.frag) - return false; - } return true; } diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 81684acf52af..8a8b12b720ef 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2747,11 +2747,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); /* Query PCI controller on system for DMA addressing - * limitation for the device. Try 64-bit first, and + * limitation for the device. Try 47-bit first, and * fail to 32-bit. */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { @@ -2765,10 +2765,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_release_regions; } } else { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47)); if (err) { dev_err(dev, "Unable to obtain %u-bit DMA " - "for consistent allocations, aborting\n", 64); + "for consistent allocations, aborting\n", 47); goto err_out_release_regions; } using_dac = 1; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index d4604bc8eb5b..9d3eed46830d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index f81439796ac7..43d973215040 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -1,20 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Fast Ethernet Controller (ENET) PTP driver for MX6x. * * Copyright (C) 2012 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6e8d6a6f6aaf..5ec1185808e5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -192,6 +192,7 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, if (adapter->fw_done_rc) { dev_err(dev, "Couldn't map long term buffer,rc = %d\n", adapter->fw_done_rc); + dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); return -1; } return 0; @@ -795,9 +796,11 @@ static int ibmvnic_login(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); unsigned long timeout = msecs_to_jiffies(30000); int retry_count = 0; + bool retry; int rc; do { + retry = false; if (retry_count > IBMVNIC_MAX_QUEUES) { netdev_warn(netdev, "Login attempts exceeded\n"); return -1; @@ -821,6 +824,9 @@ static int ibmvnic_login(struct net_device *netdev) retry_count++; release_sub_crqs(adapter, 1); + retry = true; + netdev_dbg(netdev, + "Received partial success, retrying...\n"); adapter->init_done_rc = 0; reinit_completion(&adapter->init_done); send_cap_queries(adapter); @@ -848,7 +854,7 @@ static int ibmvnic_login(struct net_device *netdev) netdev_warn(netdev, "Adapter login failed\n"); return -1; } - } while (adapter->init_done_rc == PARTIALSUCCESS); + } while (retry); /* handle pending MAC address changes after successful login */ if (adapter->mac_change_pending) { @@ -1821,9 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) return rc; } + ibmvnic_disable_irqs(adapter); } - - ibmvnic_disable_irqs(adapter); adapter->state = VNIC_CLOSED; if (reset_state == VNIC_CLOSED) @@ -2617,18 +2622,21 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter, { struct device *dev = &adapter->vdev->dev; unsigned long rc; - u64 val; if (scrq->hw_irq > 0x100000000ULL) { dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq); return 1; } - val = (0xff000000) | scrq->hw_irq; - rc = plpar_hcall_norets(H_EOI, val); - if (rc) - dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", - val, rc); + if (adapter->resetting && + adapter->reset_reason == VNIC_RESET_MOBILITY) { + u64 val = (0xff000000) | scrq->hw_irq; + + rc = plpar_hcall_norets(H_EOI, val); + if (rc) + dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", + val, rc); + } rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address, H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0); @@ -4586,14 +4594,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) release_crq_queue(adapter); } - rc = init_stats_buffers(adapter); - if (rc) - return rc; - - rc = init_stats_token(adapter); - if (rc) - return rc; - return rc; } @@ -4662,13 +4662,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) goto ibmvnic_init_fail; } while (rc == EAGAIN); + rc = init_stats_buffers(adapter); + if (rc) + goto ibmvnic_init_fail; + + rc = init_stats_token(adapter); + if (rc) + goto ibmvnic_stats_fail; + netdev->mtu = adapter->req_mtu - ETH_HLEN; netdev->min_mtu = adapter->min_mtu - ETH_HLEN; netdev->max_mtu = adapter->max_mtu - ETH_HLEN; rc = device_create_file(&dev->dev, &dev_attr_failover); if (rc) - goto ibmvnic_init_fail; + goto ibmvnic_dev_file_err; netif_carrier_off(netdev); rc = register_netdev(netdev); @@ -4687,6 +4695,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) ibmvnic_register_fail: device_remove_file(&dev->dev, &dev_attr_failover); +ibmvnic_dev_file_err: + release_stats_token(adapter); + +ibmvnic_stats_fail: + release_stats_buffers(adapter); + ibmvnic_init_fail: release_sub_crqs(adapter, 1); release_crq_queue(adapter); diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index a822f7a56bc5..685337d58276 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -43,12 +43,12 @@ #include "fw.h" /* - * We allocate in as big chunks as we can, up to a maximum of 256 KB - * per chunk. + * We allocate in page size (default 4KB on many archs) chunks to avoid high + * order memory allocations in fragmented/high usage memory situation. */ enum { - MLX4_ICM_ALLOC_SIZE = 1 << 18, - MLX4_TABLE_CHUNK_SIZE = 1 << 18 + MLX4_ICM_ALLOC_SIZE = PAGE_SIZE, + MLX4_TABLE_CHUNK_SIZE = PAGE_SIZE, }; static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk) @@ -398,9 +398,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, u64 size; obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; + if (WARN_ON(!obj_per_chunk)) + return -EINVAL; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; - table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); + table->icm = kvzalloc(num_icm * sizeof(*table->icm), GFP_KERNEL); if (!table->icm) return -ENOMEM; table->virt = virt; @@ -446,7 +448,7 @@ err: mlx4_free_icm(dev, table->icm[i], use_coherent); } - kfree(table->icm); + kvfree(table->icm); return -ENOMEM; } @@ -462,5 +464,5 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table) mlx4_free_icm(dev, table->icm[i], table->coherent); } - kfree(table->icm); + kvfree(table->icm); } diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 2edcce98ab2d..65482f004e50 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -172,7 +172,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable) list_add_tail(&dev_ctx->list, &priv->ctx_list); spin_unlock_irqrestore(&priv->ctx_lock, flags); - mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", + mlx4_dbg(dev, "Interface for protocol %d restarted with bonded mode %s\n", dev_ctx->intf->protocol, enable ? "enabled" : "disabled"); } diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 211578ffc70d..60172a38c4a4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -2929,6 +2929,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) mlx4_err(dev, "Failed to create file for port %d\n", port); devlink_port_unregister(&info->devlink_port); info->port = -1; + return err; } sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); @@ -2950,9 +2951,10 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) &info->port_attr); devlink_port_unregister(&info->devlink_port); info->port = -1; + return err; } - return err; + return 0; } static void mlx4_cleanup_port_info(struct mlx4_port_info *info) diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 3aaf4bad6c5a..427e7a31862c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; struct mlx4_qp *qp; - spin_lock(&qp_table->lock); + spin_lock_irq(&qp_table->lock); qp = __mlx4_qp_lookup(dev, qpn); - spin_unlock(&qp_table->lock); + spin_unlock_irq(&qp_table->lock); return qp; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 176645762e49..1ff0b0e93804 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -615,6 +615,45 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); } +static __be32 mlx5e_get_fcs(struct sk_buff *skb) +{ + int last_frag_sz, bytes_in_prev, nr_frags; + u8 *fcs_p1, *fcs_p2; + skb_frag_t *last_frag; + __be32 fcs_bytes; + + if (!skb_is_nonlinear(skb)) + return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN); + + nr_frags = skb_shinfo(skb)->nr_frags; + last_frag = &skb_shinfo(skb)->frags[nr_frags - 1]; + last_frag_sz = skb_frag_size(last_frag); + + /* If all FCS data is in last frag */ + if (last_frag_sz >= ETH_FCS_LEN) + return *(__be32 *)(skb_frag_address(last_frag) + + last_frag_sz - ETH_FCS_LEN); + + fcs_p2 = (u8 *)skb_frag_address(last_frag); + bytes_in_prev = ETH_FCS_LEN - last_frag_sz; + + /* Find where the other part of the FCS is - Linear or another frag */ + if (nr_frags == 1) { + fcs_p1 = skb_tail_pointer(skb); + } else { + skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2]; + + fcs_p1 = skb_frag_address(prev_frag) + + skb_frag_size(prev_frag); + } + fcs_p1 -= bytes_in_prev; + + memcpy(&fcs_bytes, fcs_p1, bytes_in_prev); + memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz); + + return fcs_bytes; +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -643,6 +682,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, skb->csum = csum_partial(skb->data + ETH_HLEN, network_depth - ETH_HLEN, skb->csum); + if (unlikely(netdev->features & NETIF_F_RXFCS)) + skb->csum = csum_add(skb->csum, + (__force __wsum)mlx5e_get_fcs(skb)); rq->stats.csum_complete++; return; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 0f5da499a223..fad8c2e3804e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -237,19 +237,17 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, context->buf.sg[0].data = &context->command; spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); - list_add_tail(&context->list, &fdev->ipsec->pending_cmds); + res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); + if (!res) + list_add_tail(&context->list, &fdev->ipsec->pending_cmds); spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); - res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf); if (res) { - mlx5_fpga_warn(fdev, "Failure sending IPSec command: %d\n", - res); - spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags); - list_del(&context->list); - spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags); + mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res); kfree(context); return ERR_PTR(res); } + /* Context will be freed by wait func after completion */ return context; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 1dc424685f4e..35fb31f682af 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -335,7 +335,7 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem); start = mem; - while (mem - start + 8 < nfp_cpp_area_size(area)) { + while (mem - start + 8 <= nfp_cpp_area_size(area)) { u8 __iomem *value; u32 type, length; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 00f41c145d4d..820b226d6ff8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -77,7 +77,7 @@ #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET /* ILT entry structure */ -#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL +#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12) #define ILT_ENTRY_PHY_ADDR_SHIFT 0 #define ILT_ENTRY_VALID_MASK 0x1ULL #define ILT_ENTRY_VALID_SHIFT 52 diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 38502815d681..468c59d2e491 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -292,6 +292,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_tx_packet *p_pkt = NULL; struct qed_ll2_info *p_ll2_conn; struct qed_ll2_tx_queue *p_tx; + unsigned long flags = 0; dma_addr_t tx_frag; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); @@ -300,6 +301,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_tx = &p_ll2_conn->tx_queue; + spin_lock_irqsave(&p_tx->lock, flags); while (!list_empty(&p_tx->active_descq)) { p_pkt = list_first_entry(&p_tx->active_descq, struct qed_ll2_tx_packet, list_entry); @@ -309,6 +311,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) list_del(&p_pkt->list_entry); b_last_packet = list_empty(&p_tx->active_descq); list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + spin_unlock_irqrestore(&p_tx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; @@ -328,7 +331,9 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) b_last_frag, b_last_packet); } + spin_lock_irqsave(&p_tx->lock, flags); } + spin_unlock_irqrestore(&p_tx->lock, flags); } static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) @@ -556,6 +561,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) struct qed_ll2_info *p_ll2_conn = NULL; struct qed_ll2_rx_packet *p_pkt = NULL; struct qed_ll2_rx_queue *p_rx; + unsigned long flags = 0; p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); if (!p_ll2_conn) @@ -563,13 +569,14 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) p_rx = &p_ll2_conn->rx_queue; + spin_lock_irqsave(&p_rx->lock, flags); while (!list_empty(&p_rx->active_descq)) { p_pkt = list_first_entry(&p_rx->active_descq, struct qed_ll2_rx_packet, list_entry); if (!p_pkt) break; - list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); + spin_unlock_irqrestore(&p_rx->lock, flags); if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { struct qed_ooo_buffer *p_buffer; @@ -588,7 +595,30 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) cookie, rx_buf_addr, b_last); } + spin_lock_irqsave(&p_rx->lock, flags); } + spin_unlock_irqrestore(&p_rx->lock, flags); +} + +static bool +qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, + struct core_rx_slow_path_cqe *p_cqe) +{ + struct ooo_opaque *iscsi_ooo; + u32 cid; + + if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) + return false; + + iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; + if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) + return false; + + /* Need to make a flush */ + cid = le32_to_cpu(iscsi_ooo->cid); + qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); + + return true; } static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, @@ -617,6 +647,11 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); cqe_type = cqe->rx_cqe_sp.type; + if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) + if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, + &cqe->rx_cqe_sp)) + continue; + if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { DP_NOTICE(p_hwfn, "Got a non-regular LB LL2 completion [type 0x%02x]\n", @@ -794,6 +829,9 @@ static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; int rc; + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) + return 0; + rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); if (rc) return rc; @@ -814,6 +852,9 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) u16 new_idx = 0, num_bds = 0; int rc; + if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + return 0; + new_idx = le16_to_cpu(*p_tx->p_fw_cons); num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); @@ -1867,17 +1908,25 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) /* Stop Tx & Rx of connection, if needed */ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->tx_queue.b_cb_registred = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; + qed_ll2_txq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); } if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->rx_queue.b_cb_registred = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); if (rc) goto out; + qed_ll2_rxq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); } if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) @@ -1925,16 +1974,6 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle) if (!p_ll2_conn) return; - if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->rx_queue.b_cb_registred = false; - qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); - } - - if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { - p_ll2_conn->tx_queue.b_cb_registred = false; - qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); - } - kfree(p_ll2_conn->tx_queue.descq_mem); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index a01e7d6e5442..f6655e251bbd 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1066,13 +1066,12 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) DP_INFO(edev, "Starting qede_remove\n"); + qede_rdma_dev_remove(edev); unregister_netdev(ndev); cancel_delayed_work_sync(&edev->sp_task); qede_ptp_disable(edev); - qede_rdma_dev_remove(edev); - edev->ops->common->set_power_state(cdev, PCI_D0); pci_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index a5b792ce2ae7..1bf930d4a1e5 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -163,7 +163,7 @@ enum { }; /* Driver's parameters */ -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_RENESAS) #define SH_ETH_RX_ALIGN 32 #else #define SH_ETH_RX_ALIGN 2 diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 450eec264a5e..4377c26f714d 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -792,8 +792,10 @@ static int ipvlan_device_event(struct notifier_block *unused, break; case NETDEV_CHANGEADDR: - list_for_each_entry(ipvlan, &port->ipvlans, pnode) + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr); + call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev); + } break; case NETDEV_PRE_TYPE_CHANGE: diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c index 6838129839ca..e757b09f1889 100644 --- a/drivers/net/phy/bcm-cygnus.c +++ b/drivers/net/phy/bcm-cygnus.c @@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev) return rc; /* make rcal=100, since rdb default is 000 */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10); if (rc < 0) return rc; /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10); if (rc < 0) return rc; /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */ - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00); + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00); return 0; } diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 5ad130c3da43..d5e0833d69b9 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum) /* The register must be written to both the Shadow Register Select and * the Shadow Read Register Selector */ - phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | + phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK | regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT); return phy_read(phydev, MII_BCM54XX_AUX_CTL); } diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index 7c73808cbbde..81cceaa412fe 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h @@ -14,11 +14,18 @@ #ifndef _LINUX_BCM_PHY_LIB_H #define _LINUX_BCM_PHY_LIB_H +#include <linux/brcmphy.h> #include <linux/phy.h> int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val); int bcm_phy_read_exp(struct phy_device *phydev, u16 reg); +static inline int bcm_phy_write_exp_sel(struct phy_device *phydev, + u16 reg, u16 val) +{ + return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val); +} + int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val); int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum); diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 29b1c88b55cc..01d2ff2f6241 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv { static void r_rc_cal_reset(struct phy_device *phydev) { /* Reset R_CAL/RC_CAL Engine */ - bcm_phy_write_exp(phydev, 0x00b0, 0x0010); + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010); /* Disable Reset R_AL/RC_CAL Engine */ - bcm_phy_write_exp(phydev, 0x00b0, 0x0000); + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000); } static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index f41b224a9cdb..ab195f0916d6 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -573,9 +573,40 @@ static int ksz9031_config_init(struct phy_device *phydev) ksz9031_of_load_skew_values(phydev, of_node, MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4, tx_data_skews, 4); + + /* Silicon Errata Sheet (DS80000691D or DS80000692D): + * When the device links in the 1000BASE-T slave mode only, + * the optional 125MHz reference output clock (CLK125_NDO) + * has wide duty cycle variation. + * + * The optional CLK125_NDO clock does not meet the RGMII + * 45/55 percent (min/max) duty cycle requirement and therefore + * cannot be used directly by the MAC side for clocking + * applications that have setup/hold time requirements on + * rising and falling clock edges. + * + * Workaround: + * Force the phy to be the master to receive a stable clock + * which meets the duty cycle requirement. + */ + if (of_property_read_bool(of_node, "micrel,force-master")) { + result = phy_read(phydev, MII_CTRL1000); + if (result < 0) + goto err_force_master; + + /* enable master mode, config & prefer master */ + result |= CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER; + result = phy_write(phydev, MII_CTRL1000, result); + if (result < 0) + goto err_force_master; + } } return ksz9031_center_flp_timing(phydev); + +err_force_master: + phydev_err(phydev, "failed to force the phy to master mode\n"); + return result; } #define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index dc7c7ec43202..02ad03a2fab7 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -605,30 +605,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (cmd == PPPIOCDETACH) { /* - * We have to be careful here... if the file descriptor - * has been dup'd, we could have another process in the - * middle of a poll using the same file *, so we had - * better not free the interface data structures - - * instead we fail the ioctl. Even in this case, we - * shut down the interface if we are the owner of it. - * Actually, we should get rid of PPPIOCDETACH, userland - * (i.e. pppd) could achieve the same effect by closing - * this fd and reopening /dev/ppp. + * PPPIOCDETACH is no longer supported as it was heavily broken, + * and is only known to have been used by pppd older than + * ppp-2.4.2 (released November 2003). */ + pr_warn_once("%s (%d) used obsolete PPPIOCDETACH ioctl\n", + current->comm, current->pid); err = -EINVAL; - if (pf->kind == INTERFACE) { - ppp = PF_TO_PPP(pf); - rtnl_lock(); - if (file == ppp->owner) - unregister_netdevice(ppp->dev); - rtnl_unlock(); - } - if (atomic_long_read(&file->f_count) < 2) { - ppp_release(NULL, file); - err = 0; - } else - pr_warn("PPPIOCDETACH file->f_count=%ld\n", - atomic_long_read(&file->f_count)); goto out; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ef33950a45d9..45d807796a18 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -681,15 +681,6 @@ static void tun_queue_purge(struct tun_file *tfile) skb_queue_purge(&tfile->sk.sk_error_queue); } -static void tun_cleanup_tx_ring(struct tun_file *tfile) -{ - if (tfile->tx_ring.queue) { - ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); - xdp_rxq_info_unreg(&tfile->xdp_rxq); - memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); - } -} - static void __tun_detach(struct tun_file *tfile, bool clean) { struct tun_file *ntfile; @@ -736,7 +727,9 @@ static void __tun_detach(struct tun_file *tfile, bool clean) tun->dev->reg_state == NETREG_REGISTERED) unregister_netdevice(tun->dev); } - tun_cleanup_tx_ring(tfile); + if (tun) + xdp_rxq_info_unreg(&tfile->xdp_rxq); + ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free); sock_put(&tfile->sk); } } @@ -783,14 +776,14 @@ static void tun_detach_all(struct net_device *dev) tun_napi_del(tun, tfile); /* Drop read queue */ tun_queue_purge(tfile); + xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); - tun_cleanup_tx_ring(tfile); } list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) { tun_enable_queue(tfile); tun_queue_purge(tfile); + xdp_rxq_info_unreg(&tfile->xdp_rxq); sock_put(&tfile->sk); - tun_cleanup_tx_ring(tfile); } BUG_ON(tun->numdisabled != 0); @@ -834,7 +827,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file, } if (!tfile->detached && - ptr_ring_init(&tfile->tx_ring, dev->tx_queue_len, GFP_KERNEL)) { + ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len, + GFP_KERNEL, tun_ptr_free)) { err = -ENOMEM; goto out; } @@ -1429,6 +1423,13 @@ static void tun_net_init(struct net_device *dev) dev->max_mtu = MAX_MTU - dev->hard_header_len; } +static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile) +{ + struct sock *sk = tfile->socket.sk; + + return (tun->dev->flags & IFF_UP) && sock_writeable(sk); +} + /* Character device part */ /* Poll */ @@ -1451,10 +1452,14 @@ static __poll_t tun_chr_poll(struct file *file, poll_table *wait) if (!ptr_ring_empty(&tfile->tx_ring)) mask |= EPOLLIN | EPOLLRDNORM; - if (tun->dev->flags & IFF_UP && - (sock_writeable(sk) || - (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && - sock_writeable(sk)))) + /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to + * guarantee EPOLLOUT to be raised by either here or + * tun_sock_write_space(). Then process could get notification + * after it writes to a down device and meets -EIO. + */ + if (tun_sock_writeable(tun, tfile) || + (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) && + tun_sock_writeable(tun, tfile))) mask |= EPOLLOUT | EPOLLWRNORM; if (tun->dev->reg_state != NETREG_REGISTERED) @@ -3219,6 +3224,11 @@ static int tun_chr_open(struct inode *inode, struct file * file) &tun_proto, 0); if (!tfile) return -ENOMEM; + if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) { + sk_free(&tfile->sk); + return -ENOMEM; + } + RCU_INIT_POINTER(tfile->tun, NULL); tfile->flags = 0; tfile->ifindex = 0; @@ -3239,8 +3249,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); - memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); - return 0; } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 770422e953f7..032e1ac10a30 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -707,6 +707,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, void *data; u32 act; + /* Transient failure which in theory could occur if + * in-flight packets from before XDP was enabled reach + * the receive path after XDP is loaded. + */ + if (unlikely(hdr->hdr.gso_type)) + goto err_xdp; + /* This happens when rx buffer size is underestimated * or headroom is not enough because of the buffer * was refilled before XDP is set. This should only @@ -727,14 +734,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, xdp_page = page; } - /* Transient failure which in theory could occur if - * in-flight packets from before XDP was enabled reach - * the receive path after XDP is loaded. In practice I - * was not able to create this condition. - */ - if (unlikely(hdr->hdr.gso_type)) - goto err_xdp; - /* Allow consuming headroom but reserve enough space to push * the descriptor on if we get an XDP_TX return code. */ @@ -775,7 +774,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } *xdp_xmit = true; if (unlikely(xdp_page != page)) - goto err_xdp; + put_page(page); rcu_read_unlock(); goto xdp_xmit; case XDP_REDIRECT: @@ -787,7 +786,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } *xdp_xmit = true; if (unlikely(xdp_page != page)) - goto err_xdp; + put_page(page); rcu_read_unlock(); goto xdp_xmit; default: @@ -875,7 +874,7 @@ err_xdp: rcu_read_unlock(); err_skb: put_page(page); - while (--num_buf) { + while (num_buf-- > 1) { buf = virtqueue_get_buf(rq->vq, &len); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers missing\n", diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 9ebe2a689966..27a9bb8c9611 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -369,6 +369,11 @@ vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { + /* Prevent any &gdesc->tcd field from being (speculatively) + * read before (&gdesc->tcd)->gen is read. + */ + dma_rmb(); + completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX( &gdesc->tcd), tq, adapter->pdev, adapter); @@ -1103,6 +1108,11 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, gdesc->txd.tci = skb_vlan_tag_get(skb); } + /* Ensure that the write to (&gdesc->txd)->gen will be observed after + * all other writes to &gdesc->txd. + */ + dma_wmb(); + /* finally flips the GEN bit of the SOP desc. */ gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^ VMXNET3_TXD_GEN); @@ -1298,6 +1308,12 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, */ break; } + + /* Prevent any rcd field from being (speculatively) read before + * rcd->gen is read. + */ + dma_rmb(); + BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2 && rcd->rqID != rq->dataRingQid); idx = rcd->rxdIdx; @@ -1528,6 +1544,12 @@ rcd_done: ring->next2comp = idx; num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring); ring = rq->rx_ring + ring_idx; + + /* Ensure that the writes to rxd->gen bits will be observed + * after all other writes to rxd objects. + */ + dma_wmb(); + while (num_to_alloc) { vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd, &rxCmdDesc); @@ -2688,7 +2710,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p) /* ==================== initialization and cleanup routines ============ */ static int -vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) +vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter) { int err; unsigned long mmio_start, mmio_len; @@ -2700,30 +2722,12 @@ vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) return err; } - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { - if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { - dev_err(&pdev->dev, - "pci_set_consistent_dma_mask failed\n"); - err = -EIO; - goto err_set_mask; - } - *dma64 = true; - } else { - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { - dev_err(&pdev->dev, - "pci_set_dma_mask failed\n"); - err = -EIO; - goto err_set_mask; - } - *dma64 = false; - } - err = pci_request_selected_regions(pdev, (1 << 2) - 1, vmxnet3_driver_name); if (err) { dev_err(&pdev->dev, "Failed to request region for adapter: error %d\n", err); - goto err_set_mask; + goto err_enable_device; } pci_set_master(pdev); @@ -2751,7 +2755,7 @@ err_bar1: iounmap(adapter->hw_addr0); err_ioremap: pci_release_selected_regions(pdev, (1 << 2) - 1); -err_set_mask: +err_enable_device: pci_disable_device(pdev); return err; } @@ -3254,7 +3258,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, #endif }; int err; - bool dma64 = false; /* stupid gcc */ + bool dma64; u32 ver; struct net_device *netdev; struct vmxnet3_adapter *adapter; @@ -3300,6 +3304,24 @@ vmxnet3_probe_device(struct pci_dev *pdev, adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE; adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE; + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { + dev_err(&pdev->dev, + "pci_set_consistent_dma_mask failed\n"); + err = -EIO; + goto err_set_mask; + } + dma64 = true; + } else { + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + dev_err(&pdev->dev, + "pci_set_dma_mask failed\n"); + err = -EIO; + goto err_set_mask; + } + dma64 = false; + } + spin_lock_init(&adapter->cmd_lock); adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, sizeof(struct vmxnet3_adapter), @@ -3307,7 +3329,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) { dev_err(&pdev->dev, "Failed to map dma\n"); err = -EFAULT; - goto err_dma_map; + goto err_set_mask; } adapter->shared = dma_alloc_coherent( &adapter->pdev->dev, @@ -3358,7 +3380,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, } #endif /* VMXNET3_RSS */ - err = vmxnet3_alloc_pci_resources(adapter, &dma64); + err = vmxnet3_alloc_pci_resources(adapter); if (err < 0) goto err_alloc_pci; @@ -3504,7 +3526,7 @@ err_alloc_queue_desc: err_alloc_shared: dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa, sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE); -err_dma_map: +err_set_mask: free_netdev(netdev); return err; } diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index a3326463b71f..a2c554f8a61b 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,12 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.16.0-k" -/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 +/* Each byte of this 32-bit integer encodes a version number in + * VMXNET3_DRIVER_VERSION_STRING. + */ +#define VMXNET3_DRIVER_VERSION_NUM 0x01041000 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 4a017a0d71ea..920c23e542a5 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3340,7 +3340,7 @@ out_err: static int hwsim_dump_radio_nl(struct sk_buff *skb, struct netlink_callback *cb) { - int last_idx = cb->args[0]; + int last_idx = cb->args[0] - 1; struct mac80211_hwsim_data *data = NULL; int res = 0; void *hdr; @@ -3368,7 +3368,7 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb, last_idx = data->idx; } - cb->args[0] = last_idx; + cb->args[0] = last_idx + 1; /* list changed, but no new element sent, set interrupted flag */ if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { |