diff options
Diffstat (limited to 'drivers')
32 files changed, 443 insertions, 247 deletions
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index f642c4264c27..168fa175d65a 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus); void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); +#ifdef CONFIG_BCMA_DRIVER_MIPS +void bcma_chipco_serial_init(struct bcma_drv_cc *cc); +#endif /* CONFIG_BCMA_DRIVER_MIPS */ /* driver_chipcommon_b.c */ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index b4f6520e74f0..62f5bfa5065d 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c @@ -15,8 +15,6 @@ #include <linux/platform_device.h> #include <linux/bcma/bcma.h> -static void bcma_chipco_serial_init(struct bcma_drv_cc *cc); - static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, u32 mask, u32 value) { @@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc) if (cc->capabilities & BCMA_CC_CAP_PMU) bcma_pmu_early_init(cc); - if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC) - bcma_chipco_serial_init(cc); - if (bus->hosttype == BCMA_HOSTTYPE_SOC) bcma_core_chipcommon_flash_detect(cc); @@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value) return res; } -static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) +#ifdef CONFIG_BCMA_DRIVER_MIPS +void bcma_chipco_serial_init(struct bcma_drv_cc *cc) { -#if IS_BUILTIN(CONFIG_BCM47XX) unsigned int irq; u32 baud_base; u32 i; @@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) ports[i].baud_base = baud_base; ports[i].reg_shift = 0; } -#endif /* CONFIG_BCM47XX */ } +#endif /* CONFIG_BCMA_DRIVER_MIPS */ diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 96f171328200..89af807cf29c 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore) void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { + struct bcma_bus *bus = mcore->core->bus; + if (mcore->early_setup_done) return; + bcma_chipco_serial_init(&bus->drv_cc); bcma_core_mips_nvram_init(mcore); mcore->early_setup_done = true; diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index c12d2618eebf..3872ab96b80a 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev) if (skb == NULL) break; np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->rx_info[i].mapping)) { + dev_kfree_skb(skb); + np->rx_info[i].skb = NULL; + break; + } /* Grrr, we cannot offset to correctly align the IP header. */ np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); } @@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); unsigned int entry; + unsigned int prev_tx; u32 status; - int i; + int i, j; /* * be cautious here, wrapping the queue has weird semantics @@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) } #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ + prev_tx = np->cur_tx; entry = np->cur_tx % TX_RING_SIZE; for (i = 0; i < skb_num_frags(skb); i++) { int wrap_ring = 0; @@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) skb_frag_size(this_frag), PCI_DMA_TODEVICE); } + if (pci_dma_mapping_error(np->pci_dev, + np->tx_info[entry].mapping)) { + dev->stats.tx_dropped++; + goto err_out; + } np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); np->tx_ring[entry].status = cpu_to_le32(status); @@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); return NETDEV_TX_OK; -} +err_out: + entry = prev_tx % TX_RING_SIZE; + np->tx_info[entry].skb = NULL; + if (i > 0) { + pci_unmap_single(np->pci_dev, + np->tx_info[entry].mapping, + skb_first_frag_len(skb), + PCI_DMA_TODEVICE); + np->tx_info[entry].mapping = 0; + entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE; + for (j = 1; j < i; j++) { + pci_unmap_single(np->pci_dev, + np->tx_info[entry].mapping, + skb_frag_size( + &skb_shinfo(skb)->frags[j-1]), + PCI_DMA_TODEVICE); + entry++; + } + } + dev_kfree_skb_any(skb); + np->cur_tx = prev_tx; + return NETDEV_TX_OK; +} /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ @@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev) break; /* Better luck next round. */ np->rx_info[entry].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(np->pci_dev, + np->rx_info[entry].mapping)) { + dev_kfree_skb(skb); + np->rx_info[entry].skb = NULL; + break; + } np->rx_ring[entry].rxaddr = cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index c0fb80acc2da..baba2db9d9c2 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -43,13 +43,13 @@ #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */ #define MIN_RX_RING_SIZE 64 #define MAX_RX_RING_SIZE 8192 -#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ +#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ * (bp)->rx_ring_size) #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */ #define MIN_TX_RING_SIZE 64 #define MAX_TX_RING_SIZE 4096 -#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \ +#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \ * (bp)->tx_ring_size) /* level of occupied TX descriptors under which we wake up TX process */ @@ -78,6 +78,37 @@ */ #define MACB_HALT_TIMEOUT 1230 +/* DMA buffer descriptor might be different size + * depends on hardware configuration. + */ +static unsigned int macb_dma_desc_get_size(struct macb *bp) +{ +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + if (bp->hw_dma_cap == HW_DMA_CAP_64B) + return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64); +#endif + return sizeof(struct macb_dma_desc); +} + +static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx) +{ +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + /* Dma buffer descriptor is 4 words length (instead of 2 words) + * for 64b GEM. + */ + if (bp->hw_dma_cap == HW_DMA_CAP_64B) + idx <<= 1; +#endif + return idx; +} + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) +{ + return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc)); +} +#endif + /* Ring buffer accessors */ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) { @@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, unsigned int index) { - return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)]; + index = macb_tx_ring_wrap(queue->bp, index); + index = macb_adj_dma_desc_idx(queue->bp, index); + return &queue->tx_ring[index]; } static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, @@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) dma_addr_t offset; offset = macb_tx_ring_wrap(queue->bp, index) * - sizeof(struct macb_dma_desc); + macb_dma_desc_get_size(queue->bp); return queue->tx_ring_dma + offset; } @@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) { - return &bp->rx_ring[macb_rx_ring_wrap(bp, index)]; + index = macb_rx_ring_wrap(bp, index); + index = macb_adj_dma_desc_idx(bp, index); + return &bp->rx_ring[index]; } static void *macb_rx_buffer(struct macb *bp, unsigned int index) @@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb) } } -static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr) +static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) { - desc->addr = (u32)addr; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - desc->addrh = (u32)(addr >> 32); + struct macb_dma_desc_64 *desc_64; + + if (bp->hw_dma_cap == HW_DMA_CAP_64B) { + desc_64 = macb_64b_desc(bp, desc); + desc_64->addrh = upper_32_bits(addr); + } #endif + desc->addr = lower_32_bits(addr); +} + +static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) +{ + dma_addr_t addr = 0; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + struct macb_dma_desc_64 *desc_64; + + if (bp->hw_dma_cap == HW_DMA_CAP_64B) { + desc_64 = macb_64b_desc(bp, desc); + addr = ((u64)(desc_64->addrh) << 32); + } +#endif + addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); + return addr; } static void macb_tx_error_task(struct work_struct *work) @@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work) /* Set end of TX queue */ desc = macb_tx_desc(queue, 0); - macb_set_addr(desc, 0); + macb_set_addr(bp, desc, 0); desc->ctrl = MACB_BIT(TX_USED); /* Make descriptor updates visible to hardware */ wmb(); /* Reinitialize the TX desc queue */ - queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); + queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); + if (bp->hw_dma_cap == HW_DMA_CAP_64B) + queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); #endif /* Make TX ring reflect state of hardware */ queue->tx_head = 0; @@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp) unsigned int entry; struct sk_buff *skb; dma_addr_t paddr; + struct macb_dma_desc *desc; while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, bp->rx_ring_size) > 0) { @@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp) rmb(); bp->rx_prepared_head++; + desc = macb_rx_desc(bp, entry); if (!bp->rx_skbuff[entry]) { /* allocate sk_buff for this free entry in ring */ @@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp) if (entry == bp->rx_ring_size - 1) paddr |= MACB_BIT(RX_WRAP); - macb_set_addr(&(bp->rx_ring[entry]), paddr); - bp->rx_ring[entry].ctrl = 0; + macb_set_addr(bp, desc, paddr); + desc->ctrl = 0; /* properly align Ethernet header */ skb_reserve(skb, NET_IP_ALIGN); } else { - bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED); - bp->rx_ring[entry].ctrl = 0; + desc->addr &= ~MACB_BIT(RX_USED); + desc->ctrl = 0; } } @@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget) bool rxused; entry = macb_rx_ring_wrap(bp, bp->rx_tail); - desc = &bp->rx_ring[entry]; + desc = macb_rx_desc(bp, entry); /* Make hw descriptor updates visible to CPU */ rmb(); rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false; - addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - addr |= ((u64)(desc->addrh) << 32); -#endif + addr = macb_get_addr(bp, desc); ctrl = desc->ctrl; if (!rxused) @@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, static inline void macb_init_rx_ring(struct macb *bp) { dma_addr_t addr; + struct macb_dma_desc *desc = NULL; int i; addr = bp->rx_buffers_dma; for (i = 0; i < bp->rx_ring_size; i++) { - bp->rx_ring[i].addr = addr; - bp->rx_ring[i].ctrl = 0; + desc = macb_rx_desc(bp, i); + macb_set_addr(bp, desc, addr); + desc->ctrl = 0; addr += bp->rx_buffer_size; } - bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP); + desc->addr |= MACB_BIT(RX_WRAP); bp->rx_tail = 0; } @@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget) for (tail = bp->rx_tail; budget > 0; tail++) { struct macb_dma_desc *desc = macb_rx_desc(bp, tail); - u32 addr, ctrl; + u32 ctrl; /* Make hw descriptor updates visible to CPU */ rmb(); - addr = desc->addr; ctrl = desc->ctrl; - if (!(addr & MACB_BIT(RX_USED))) + if (!(desc->addr & MACB_BIT(RX_USED))) break; if (ctrl & MACB_BIT(RX_SOF)) { @@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp, i = tx_head; entry = macb_tx_ring_wrap(bp, i); ctrl = MACB_BIT(TX_USED); - desc = &queue->tx_ring[entry]; + desc = macb_tx_desc(queue, entry); desc->ctrl = ctrl; if (lso_ctrl) { @@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp, i--; entry = macb_tx_ring_wrap(bp, i); tx_skb = &queue->tx_skb[entry]; - desc = &queue->tx_ring[entry]; + desc = macb_tx_desc(queue, entry); ctrl = (u32)tx_skb->size; if (eof) { @@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp, ctrl |= MACB_BF(MSS_MFS, mss_mfs); /* Set TX buffer descriptor */ - macb_set_addr(desc, tx_skb->mapping); + macb_set_addr(bp, desc, tx_skb->mapping); /* desc->addr must be visible to hardware before clearing * 'TX_USED' bit in desc->ctrl. */ @@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp) if (!skb) continue; - desc = &bp->rx_ring[i]; - addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - addr |= ((u64)(desc->addrh) << 32); -#endif + desc = macb_rx_desc(bp, i); + addr = macb_get_addr(bp, desc); + dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); @@ -1711,15 +1765,17 @@ out_err: static void gem_init_rings(struct macb *bp) { struct macb_queue *queue; + struct macb_dma_desc *desc = NULL; unsigned int q; int i; for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { for (i = 0; i < bp->tx_ring_size; i++) { - queue->tx_ring[i].addr = 0; - queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); + desc = macb_tx_desc(queue, i); + macb_set_addr(bp, desc, 0); + desc->ctrl = MACB_BIT(TX_USED); } - queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); + desc->ctrl |= MACB_BIT(TX_WRAP); queue->tx_head = 0; queue->tx_tail = 0; } @@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp) static void macb_init_rings(struct macb *bp) { int i; + struct macb_dma_desc *desc = NULL; macb_init_rx_ring(bp); for (i = 0; i < bp->tx_ring_size; i++) { - bp->queues[0].tx_ring[i].addr = 0; - bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); + desc = macb_tx_desc(&bp->queues[0], i); + macb_set_addr(bp, desc, 0); + desc->ctrl = MACB_BIT(TX_USED); } bp->queues[0].tx_head = 0; bp->queues[0].tx_tail = 0; - bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP); + desc->ctrl |= MACB_BIT(TX_WRAP); } static void macb_reset_hw(struct macb *bp) @@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp) dmacfg &= ~GEM_BIT(TXCOEN); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - dmacfg |= GEM_BIT(ADDR64); + if (bp->hw_dma_cap == HW_DMA_CAP_64B) + dmacfg |= GEM_BIT(ADDR64); #endif netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", dmacfg); @@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp) macb_configure_dma(bp); /* Initialize TX and RX buffers */ - macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma)); + macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma)); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32)); + if (bp->hw_dma_cap == HW_DMA_CAP_64B) + macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma)); #endif for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { - queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma)); + queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32)); + if (bp->hw_dma_cap == HW_DMA_CAP_64B) + queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma)); #endif /* Enable interrupts */ @@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev) queue->IMR = GEM_IMR(hw_q - 1); queue->TBQP = GEM_TBQP(hw_q - 1); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - queue->TBQPH = GEM_TBQPH(hw_q -1); + if (bp->hw_dma_cap == HW_DMA_CAP_64B) + queue->TBQPH = GEM_TBQPH(hw_q - 1); #endif } else { /* queue0 uses legacy registers */ @@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev) queue->IMR = MACB_IMR; queue->TBQP = MACB_TBQP; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - queue->TBQPH = MACB_TBQPH; + if (bp->hw_dma_cap == HW_DMA_CAP_64B) + queue->TBQPH = MACB_TBQPH; #endif } @@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev) static int at91ether_start(struct net_device *dev) { struct macb *lp = netdev_priv(dev); + struct macb_dma_desc *desc; dma_addr_t addr; u32 ctl; int i; lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, (AT91ETHER_MAX_RX_DESCR * - sizeof(struct macb_dma_desc)), + macb_dma_desc_get_size(lp)), &lp->rx_ring_dma, GFP_KERNEL); if (!lp->rx_ring) return -ENOMEM; @@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev) if (!lp->rx_buffers) { dma_free_coherent(&lp->pdev->dev, AT91ETHER_MAX_RX_DESCR * - sizeof(struct macb_dma_desc), + macb_dma_desc_get_size(lp), lp->rx_ring, lp->rx_ring_dma); lp->rx_ring = NULL; return -ENOMEM; @@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev) addr = lp->rx_buffers_dma; for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { - lp->rx_ring[i].addr = addr; - lp->rx_ring[i].ctrl = 0; + desc = macb_rx_desc(lp, i); + macb_set_addr(lp, desc, addr); + desc->ctrl = 0; addr += AT91ETHER_MAX_RBUFF_SZ; } /* Set the Wrap bit on the last descriptor */ - lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP); + desc->addr |= MACB_BIT(RX_WRAP); /* Reset buffer index */ lp->rx_tail = 0; @@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev) dma_free_coherent(&lp->pdev->dev, AT91ETHER_MAX_RX_DESCR * - sizeof(struct macb_dma_desc), + macb_dma_desc_get_size(lp), lp->rx_ring, lp->rx_ring_dma); lp->rx_ring = NULL; @@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) static void at91ether_rx(struct net_device *dev) { struct macb *lp = netdev_priv(dev); + struct macb_dma_desc *desc; unsigned char *p_recv; struct sk_buff *skb; unsigned int pktlen; - while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) { + desc = macb_rx_desc(lp, lp->rx_tail); + while (desc->addr & MACB_BIT(RX_USED)) { p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; - pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl); + pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); skb = netdev_alloc_skb(dev, pktlen + 2); if (skb) { skb_reserve(skb, 2); @@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev) lp->stats.rx_dropped++; } - if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) + if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH)) lp->stats.multicast++; /* reset ownership bit */ - lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED); + desc->addr &= ~MACB_BIT(RX_USED); /* wrap after last buffer */ if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) lp->rx_tail = 0; else lp->rx_tail++; + + desc = macb_rx_desc(lp, lp->rx_tail); } } @@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32) + if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { dma_set_mask(&pdev->dev, DMA_BIT_MASK(44)); + bp->hw_dma_cap = HW_DMA_CAP_64B; + } else + bp->hw_dma_cap = HW_DMA_CAP_32B; #endif spin_lock_init(&bp->lock); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index d67adad67be1..fc8550a5d47f 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -385,6 +385,8 @@ /* Bitfields in DCFG6. */ #define GEM_PBUF_LSO_OFFSET 27 #define GEM_PBUF_LSO_SIZE 1 +#define GEM_DAW64_OFFSET 23 +#define GEM_DAW64_SIZE 1 /* Constants for CLK */ #define MACB_CLK_DIV8 0 @@ -487,12 +489,20 @@ struct macb_dma_desc { u32 addr; u32 ctrl; +}; + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - u32 addrh; - u32 resvd; -#endif +enum macb_hw_dma_cap { + HW_DMA_CAP_32B, + HW_DMA_CAP_64B, }; +struct macb_dma_desc_64 { + u32 addrh; + u32 resvd; +}; +#endif + /* DMA descriptor bitfields */ #define MACB_RX_USED_OFFSET 0 #define MACB_RX_USED_SIZE 1 @@ -874,6 +884,10 @@ struct macb { unsigned int jumbo_max_len; u32 wol; + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + enum macb_hw_dma_cap hw_dma_cap; +#endif }; static inline bool macb_is_gem(struct macb *bp) diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c index 67befedef709..578c7f8f11bf 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c @@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed) int speed = 2; if (!xcv) { - dev_err(&xcv->pdev->dev, - "XCV init not done, probe may have failed\n"); + pr_err("XCV init not done, probe may have failed\n"); return; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 1a7f8ad7b9c6..cd49a54c538d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p) status = -EPERM; goto err; } -done: + + /* Remember currently programmed MAC */ ether_addr_copy(adapter->dev_mac, addr->sa_data); +done: ether_addr_copy(netdev->dev_addr, addr->sa_data); dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0; @@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter) { /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */ if (!BEx_chip(adapter) || !be_virtfn(adapter) || - check_privilege(adapter, BE_PRIV_FILTMGMT)) + check_privilege(adapter, BE_PRIV_FILTMGMT)) { be_dev_mac_del(adapter, adapter->pmac_id[0]); + eth_zero_addr(adapter->dev_mac); + } be_clear_uc_list(adapter); be_clear_mc_list(adapter); @@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter) if (status) return status; - /* Don't add MAC on BE3 VFs without FILTMGMT privilege */ - if (!BEx_chip(adapter) || !be_virtfn(adapter) || - check_privilege(adapter, BE_PRIV_FILTMGMT)) { + /* Normally this condition usually true as the ->dev_mac is zeroed. + * But on BE3 VFs the initial MAC is pre-programmed by PF and + * subsequent be_dev_mac_add() can fail (after fresh boot) + */ + if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) { + int old_pmac_id = -1; + + /* Remember old programmed MAC if any - can happen on BE3 VF */ + if (!is_zero_ether_addr(adapter->dev_mac)) + old_pmac_id = adapter->pmac_id[0]; + status = be_dev_mac_add(adapter, adapter->netdev->dev_addr); if (status) return status; + + /* Delete the old programmed MAC as we successfully programmed + * a new MAC + */ + if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0]) + be_dev_mac_del(adapter, old_pmac_id); + ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr); } @@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter) memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); + + /* Initial MAC for BE3 VFs is already programmed by PF */ + if (BEx_chip(adapter) && be_virtfn(adapter)) + memcpy(adapter->dev_mac, mac, ETH_ALEN); } return 0; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index c1b671667920..957bfc220978 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue) if (!rxb->page) continue; - dma_unmap_single(rx_queue->dev, rxb->dma, - PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(rx_queue->dev, rxb->dma, + PAGE_SIZE, DMA_FROM_DEVICE); __free_page(rxb->page); rxb->page = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index c7e939945259..53daa6ca5d83 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev) return -ETIMEDOUT; } -static int mlx4_comm_internal_err(u32 slave_read) +int mlx4_comm_internal_err(u32 slave_read) { return (u32)COMM_CHAN_EVENT_INTERNAL_ERR == (slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 0e8b7c44931f..8258d08acd8c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev) return; mlx4_stop_catas_poll(dev); + if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION && + mlx4_is_slave(dev)) { + /* In mlx4_remove_one on a VF */ + u32 slave_read = + swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read)); + + if (mlx4_comm_internal_err(slave_read)) { + mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n", + __func__); + mlx4_enter_error_state(dev->persist); + } + } mutex_lock(&intf_mutex); list_for_each_entry(intf, &intf_list, list) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 88ee7d8a5923..086920b615af 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type); void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type); void mlx4_enter_error_state(struct mlx4_dev_persistent *persist); +int mlx4_comm_internal_err(u32 slave_read); int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port, enum mlx4_port_type *type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3797cc7c1288..caa837e5e2b9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) if (cmd->cmdif_rev > CMD_IF_REV) { dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); - err = -ENOTSUPP; + err = -EOPNOTSUPP; goto err_free_page; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 951dbd58594d..d5ecb8f53fd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); -void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc, + enum mlx5e_traffic_types tt); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); @@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { - return -ENOTSUPP; + return -EOPNOTSUPP; } #else int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index f0b460f47f29..0523ed47f597 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, int i; if (!MLX5_CAP_GEN(priv->mdev, ets)) - return -ENOTSUPP; + return -EOPNOTSUPP; ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; for (i = 0; i < ets->ets_cap; i++) { @@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, int err; if (!MLX5_CAP_GEN(priv->mdev, ets)) - return -ENOTSUPP; + return -EOPNOTSUPP; err = mlx5e_dbcnl_validate_ets(netdev, ets); if (err) @@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev) struct mlx5_core_dev *mdev = priv->mdev; struct ieee_ets ets; struct ieee_pfc pfc; - int err = -ENOTSUPP; + int err = -EOPNOTSUPP; int i; if (!MLX5_CAP_GEN(mdev, ets)) @@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + if (!MLX5_CAP_GEN(priv->mdev, ets)) { + netdev_err(netdev, "%s, ets is not supported\n", __func__); + return; + } + if (priority >= CEE_DCBX_MAX_PRIO) { netdev_err(netdev, "%s, priority is out of range\n", __func__); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 5197817e4b2f..bb67863aa361 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev, struct mlx5e_priv *priv = netdev_priv(netdev); if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) - return -ENOTSUPP; + return -EOPNOTSUPP; coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; @@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev, int i; if (!MLX5_CAP_GEN(mdev, cq_moderation)) - return -ENOTSUPP; + return -EOPNOTSUPP; mutex_lock(&priv->state_lock); @@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) { - struct mlx5_core_dev *mdev = priv->mdev; void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); - int i; + struct mlx5_core_dev *mdev = priv->mdev; + int ctxlen = MLX5_ST_SZ_BYTES(tirc); + int tt; MLX5_SET(modify_tir_in, in, bitmask.hash, 1); - mlx5e_build_tir_ctx_hash(tirc, priv); - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + memset(tirc, 0, ctxlen); + mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt); + mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); + } } static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, @@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, { struct mlx5e_priv *priv = netdev_priv(dev); int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + bool hash_changed = false; void *in; if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && @@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); } - if (key) + if (hfunc != ETH_RSS_HASH_NO_CHANGE && + hfunc != priv->params.rss_hfunc) { + priv->params.rss_hfunc = hfunc; + hash_changed = true; + } + + if (key) { memcpy(priv->params.toeplitz_hash_key, key, sizeof(priv->params.toeplitz_hash_key)); + hash_changed = hash_changed || + priv->params.rss_hfunc == ETH_RSS_HASH_TOP; + } - if (hfunc != ETH_RSS_HASH_NO_CHANGE) - priv->params.rss_hfunc = hfunc; - - mlx5e_modify_tirs_hash(priv, in, inlen); + if (hash_changed) + mlx5e_modify_tirs_hash(priv, in, inlen); mutex_unlock(&priv->state_lock); @@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) u32 mlx5_wol_mode; if (!wol_supported) - return -ENOTSUPP; + return -EOPNOTSUPP; if (wol->wolopts & ~wol_supported) return -EINVAL; @@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (!rx_mode_changed) return 0; @@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, bool reset; if (!MLX5_CAP_GEN(mdev, cqe_compression)) - return -ENOTSUPP; + return -EOPNOTSUPP; if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 1fe80de5d68f..a0e5a69402b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) MLX5_FLOW_NAMESPACE_KERNEL); if (!priv->fs.ns) - return -EINVAL; + return -EOPNOTSUPP; err = mlx5e_arfs_create_tables(priv); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index d088effd7160..f33f72d0237c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_ETHTOOL); if (!ns) - return ERR_PTR(-ENOTSUPP); + return ERR_PTR(-EOPNOTSUPP); table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, flow_table_properties_nic_receive.log_max_ft_size)), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2b7dd315020c..f14ca3385fdd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); } -void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc, + enum mlx5e_traffic_types tt) { + void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + +#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP) + +#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_L4_SPORT |\ + MLX5_HASH_FIELD_SEL_L4_DPORT) + +#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_IPSEC_SPI) + MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(priv->params.rss_hfunc)); if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { @@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); memcpy(rss_key, priv->params.toeplitz_hash_key, len); } + + switch (tt) { + case MLX5E_TT_IPV4_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV6_TCP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV4_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV6_UDP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + MLX5_L4_PROT_TYPE_UDP); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_L4PORTS); + break; + + case MLX5E_TT_IPV4_IPSEC_AH: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV6_IPSEC_AH: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV4_IPSEC_ESP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV6_IPSEC_ESP: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP_IPSEC_SPI); + break; + + case MLX5E_TT_IPV4: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + + case MLX5E_TT_IPV6: + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + MLX5_L3_PROT_TYPE_IPV6); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + MLX5_HASH_IP); + break; + default: + WARN_ONCE(true, "%s: bad traffic type!\n", __func__); + } } static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) @@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, enum mlx5e_traffic_types tt) { - void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); -#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP) - -#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP |\ - MLX5_HASH_FIELD_SEL_L4_SPORT |\ - MLX5_HASH_FIELD_SEL_L4_DPORT) - -#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP |\ - MLX5_HASH_FIELD_SEL_IPSEC_SPI) - mlx5e_build_tir_ctx_lro(tirc, priv); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); - mlx5e_build_tir_ctx_hash(tirc, priv); - - switch (tt) { - case MLX5E_TT_IPV4_TCP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_TCP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_L4PORTS); - break; - - case MLX5E_TT_IPV6_TCP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_TCP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_L4PORTS); - break; - - case MLX5E_TT_IPV4_UDP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_UDP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_L4PORTS); - break; - - case MLX5E_TT_IPV6_UDP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_UDP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_L4PORTS); - break; - - case MLX5E_TT_IPV4_IPSEC_AH: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_IPSEC_SPI); - break; - - case MLX5E_TT_IPV6_IPSEC_AH: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_IPSEC_SPI); - break; - - case MLX5E_TT_IPV4_IPSEC_ESP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_IPSEC_SPI); - break; - - case MLX5E_TT_IPV6_IPSEC_ESP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_IPSEC_SPI); - break; - - case MLX5E_TT_IPV4: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP); - break; - - case MLX5E_TT_IPV6: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP); - break; - default: - WARN_ONCE(true, - "mlx5e_build_indir_tir_ctx: bad traffic type!\n"); - } + mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt); } static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, @@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) { if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return -ENOTSUPP; + return -EOPNOTSUPP; if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || !MLX5_CAP_GEN(mdev, nic_flow_table) || !MLX5_CAP_ETH(mdev, csum_cap) || @@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) < 3) { mlx5_core_warn(mdev, "Not creating net device, some required device capabilities are missing\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 46bef6a26a8c..c5282b6aba8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, __be32 *saddr, int *out_ttl) { + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct rtable *rt; struct neighbour *n = NULL; int ttl; @@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, #else return -EOPNOTSUPP; #endif - - if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { - pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); - ip_rt_put(rt); - return -EOPNOTSUPP; - } + /* if the egress device isn't on the same HW e-switch, we use the uplink */ + if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) + *out_dev = mlx5_eswitch_get_uplink_netdev(esw); + else + *out_dev = rt->dst.dev; ttl = ip4_dst_hoplimit(&rt->dst); n = dst_neigh_lookup(&rt->dst, &fl4->daddr); @@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, *out_n = n; *saddr = fl4->saddr; *out_ttl = ttl; - *out_dev = rt->dst.dev; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index f14d9c9ba773..d0c8bf014453 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) - return -ENOTSUPP; + return -EOPNOTSUPP; esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", vport, vlan, qos, set_flags); @@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); - return -ENOMEM; + return -EOPNOTSUPP; } flow_group_in = mlx5_vzalloc(inlen); @@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); if (!root_ns) { esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); - return -EIO; + return -EOPNOTSUPP; } flow_group_in = mlx5_vzalloc(inlen); @@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); if (!root_ns) { esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); - return -EIO; + return -EOPNOTSUPP; } flow_group_in = mlx5_vzalloc(inlen); @@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 03293ed1cc22..595f7c7383b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, return 0; out_notsupp: - return -ENOTSUPP; + return -EOPNOTSUPP; } int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, @@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports) root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); if (!root_ns) { esw_warn(dev, "Failed to get FDB flow namespace\n"); + err = -EOPNOTSUPP; goto ns_err; } @@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw) ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); if (!ns) { esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); - return -ENOMEM; + return -EOPNOTSUPP; } ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); @@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw) esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); if (err1) - esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); + esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1); } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { if (mlx5_eswitch_inline_mode_get(esw, @@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) int vport; int err; + /* disable PF RoCE so missed packets don't go through RoCE steering */ + mlx5_dev_list_lock(); + mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_dev_list_unlock(); + err = esw_create_offloads_fdb_table(esw, nvports); if (err) - return err; + goto create_fdb_err; err = esw_create_offloads_table(esw); if (err) @@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) goto err_reps; } - /* disable PF RoCE so missed packets don't go through RoCE steering */ - mlx5_dev_list_lock(); - mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_dev_list_unlock(); - return 0; err_reps: @@ -717,6 +718,13 @@ create_fg_err: create_ft_err: esw_destroy_offloads_fdb_table(esw); + +create_fdb_err: + /* enable back PF RoCE */ + mlx5_dev_list_lock(); + mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_dev_list_unlock(); + return err; } @@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) { int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; - /* enable back PF RoCE */ - mlx5_dev_list_lock(); - mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_dev_list_unlock(); - mlx5_eswitch_disable_sriov(esw); err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); if (err) { @@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw) esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); } + /* enable back PF RoCE */ + mlx5_dev_list_lock(); + mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_dev_list_unlock(); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index c4478ecd8056..b53fc85a2375 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, flow_table_properties_nic_receive. flow_modify_en); if (!atomic_mod_cap) - return -ENOTSUPP; + return -EOPNOTSUPP; opmod = 1; return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0ac7a2fc916c..6346a8f5883b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering) struct mlx5_flow_table *ft; ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); - if (!ns) + if (WARN_ON(!ns)) return -EINVAL; ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); if (IS_ERR(ft)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d01e9f21d469..3c315eb8d270 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) return 0; } - return -ENOTSUPP; + return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index d2ec9d232a70..fd12e0a377a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, u32 out[MLX5_ST_SZ_DW(qtct_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) - return -ENOTSUPP; + return -EOPNOTSUPP; return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), MLX5_REG_QETCR, 0, 1); @@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, u32 in[MLX5_ST_SZ_DW(qtct_reg)]; if (!MLX5_CAP_GEN(mdev, ets)) - return -ENOTSUPP; + return -EOPNOTSUPP; memset(in, 0, sizeof(in)); return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index 269e4401c342..7129c30a2ab4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, if (!MLX5_CAP_GEN(mdev, vport_group_manager)) return -EACCES; if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) - return -ENOTSUPP; + return -EOPNOTSUPP; in = mlx5_vzalloc(inlen); if (!in) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index be3c91c7f211..5484fd726d5a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw, { void __iomem *ioaddr = hw->pcsr; u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + u32 intr_mask = readl(ioaddr + GMAC_INT_MASK); int ret = 0; + /* Discard masked bits */ + intr_status &= ~intr_mask; + /* Not used events (e.g. MMC interrupts) are not handled. */ if ((intr_status & GMAC_INT_STATUS_MMCTIS)) x->mmc_tx_irq_n++; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index e55809c5beb7..6742070ca676 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = { .phy_id = PHY_ID_KSZ8795, .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8795", - .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause), + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index d02ca1491d16..8d3e53fac1da 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -91,7 +91,7 @@ #define IWL8000_FW_PRE "iwlwifi-8000C-" #define IWL8000_MODULE_FIRMWARE(api) \ - IWL8000_FW_PRE "-" __stringify(api) ".ucode" + IWL8000_FW_PRE __stringify(api) ".ucode" #define IWL8265_FW_PRE "iwlwifi-8265-" #define IWL8265_MODULE_FIRMWARE(api) \ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 636c8b03e318..09e9e2e3ed04 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, .frame_limit = IWL_FRAME_LIMIT, }; - /* Make sure reserved queue is still marked as such (or allocated) */ - mvm->queue_info[mvm_sta->reserved_queue].status = - IWL_MVM_QUEUE_RESERVED; + /* Make sure reserved queue is still marked as such (if allocated) */ + if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) + mvm->queue_info[mvm_sta->reserved_queue].status = + IWL_MVM_QUEUE_RESERVED; for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 63a051be832e..bec7d9c46087 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) return; IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); - thermal_zone_device_unregister(mvm->tz_device.tzone); - mvm->tz_device.tzone = NULL; + if (mvm->tz_device.tzone) { + thermal_zone_device_unregister(mvm->tz_device.tzone); + mvm->tz_device.tzone = NULL; + } } static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) @@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) return; IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); - thermal_cooling_device_unregister(mvm->cooling_dev.cdev); - mvm->cooling_dev.cdev = NULL; + if (mvm->cooling_dev.cdev) { + thermal_cooling_device_unregister(mvm->cooling_dev.cdev); + mvm->cooling_dev.cdev = NULL; + } } #endif /* CONFIG_THERMAL */ |