diff options
Diffstat (limited to 'drivers/net/ethernet')
429 files changed, 36302 insertions, 11328 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 7677c745fb30..91ada52f776b 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -699,7 +699,7 @@ el3_tx_timeout (struct net_device *dev) dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), inw(ioaddr + TX_FREE)); dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* Issue TX_RESET and TX_START commands. */ outw(TxReset, ioaddr + EL3_CMD); outw(TxEnable, ioaddr + EL3_CMD); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 942fb0d5aace..b26e038b4a0e 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -992,7 +992,7 @@ static void corkscrew_timeout(struct net_device *dev) if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) break; outw(TxEnable, ioaddr + EL3_CMD); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; dev->stats.tx_dropped++; netif_wake_queue(dev); diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index b9948f00c5e9..b88afd759307 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -700,7 +700,7 @@ static void el3_tx_timeout(struct net_device *dev) netdev_notice(dev, "Transmit timed out!\n"); dump_status(dev); dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* Issue TX_RESET and TX_START commands. */ tc574_wait_for_completion(dev, TxReset); outw(TxEnable, ioaddr + EL3_CMD); diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index c5a320507556..71396e4b87e3 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -534,7 +534,7 @@ static void el3_tx_timeout(struct net_device *dev) netdev_warn(dev, "Transmit timed out!\n"); dump_status(dev); dev->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* Issue TX_RESET and TX_START commands. */ tc589_wait_for_completion(dev, TxReset); outw(TxEnable, ioaddr + EL3_CMD); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index d81fceddbe0e..25c55ab05c7d 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1944,7 +1944,7 @@ static void vortex_tx_timeout(struct net_device *dev) } /* Issue Tx Enable */ iowrite16(TxEnable, ioaddr + EL3_CMD); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } /* diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index ec6eac1f8c95..4ea717d68c95 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -1041,7 +1041,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb, { ei_local->txing = 1; NS8390_trigger_send(dev, send_length, output_page); - dev->trans_start = jiffies; + netif_trans_update(dev); if (output_page == ei_local->tx_start_page) { ei_local->tx1 = -1; @@ -1270,7 +1270,7 @@ static void ei_tx_intr(struct net_device *dev) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); - dev->trans_start = jiffies; + netif_trans_update(dev); ei_local->tx2 = -1, ei_local->lasttx = 2; } @@ -1287,7 +1287,7 @@ static void ei_tx_intr(struct net_device *dev) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); - dev->trans_start = jiffies; + netif_trans_update(dev); ei_local->tx1 = -1; ei_local->lasttx = 1; } diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c index b96e8852b2d1..60f8e2c8e726 100644 --- a/drivers/net/ethernet/8390/lib8390.c +++ b/drivers/net/ethernet/8390/lib8390.c @@ -596,7 +596,7 @@ static void ei_tx_intr(struct net_device *dev) if (ei_local->tx2 > 0) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); - dev->trans_start = jiffies; + netif_trans_update(dev); ei_local->tx2 = -1, ei_local->lasttx = 2; } else @@ -609,7 +609,7 @@ static void ei_tx_intr(struct net_device *dev) if (ei_local->tx1 > 0) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); - dev->trans_start = jiffies; + netif_trans_update(dev); ei_local->tx1 = -1; ei_local->lasttx = 1; } else diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index ac7288240d55..1d1069641d81 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -1129,7 +1129,7 @@ static void tx_timeout(struct net_device *dev) /* Trigger an immediate transmit demand. */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index 74139cb7f849..3d2245fdc283 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1430,7 +1430,7 @@ static void bfin_mac_timeout(struct net_device *dev) bfin_mac_enable(lp->phydev); /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } static void bfin_mac_multicast_hash(struct net_device *dev) diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 0907ab6ff309..30defe6c81f2 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3349,7 +3349,7 @@ static void et131x_down(struct net_device *netdev) struct et131x_adapter *adapter = netdev_priv(netdev); /* Save the timestamp for the TX watchdog, prevent a timeout */ - netdev->trans_start = jiffies; + netif_trans_update(netdev); phy_stop(adapter->phydev); et131x_disable_txrx(netdev); @@ -3816,7 +3816,7 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev) netif_stop_queue(netdev); /* Save the timestamp for the TX timeout watchdog */ - netdev->trans_start = jiffies; + netif_trans_update(netdev); /* TCB is not available */ if (tx_ring->used >= NUM_TCB) diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 8d50314ac3eb..de2c4bf5fac4 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -428,7 +428,7 @@ static void emac_timeout(struct net_device *dev) emac_reset(db); emac_init_device(dev); /* We can accept TX packets again */ - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); /* Restore previous register address */ @@ -468,7 +468,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) db->membase + EMAC_TX_CTL0_REG); /* save the time stamp */ - dev->trans_start = jiffies; + netif_trans_update(dev); } else if (channel == 1) { /* set TX len */ writel(skb->len, db->membase + EMAC_TX_PL1_REG); @@ -477,7 +477,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev) db->membase + EMAC_TX_CTL1_REG); /* save the time stamp */ - dev->trans_start = jiffies; + netif_trans_update(dev); } if ((db->tx_fifo_stat & 3) == 3) { diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index 66d0b73c39c0..dcf2a1f3643d 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -260,7 +260,7 @@ static int lance_reset(struct net_device *dev) load_csrs(lp); lance_init_ring(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ status = init_restart_lance(lp); #ifdef DEBUG_DRIVER printk("Lance restart=%d\n", status); @@ -530,7 +530,7 @@ void lance_tx_timeout(struct net_device *dev) { printk("lance_tx_timeout\n"); lance_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } EXPORT_SYMBOL_GPL(lance_tx_timeout); @@ -543,11 +543,13 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) static int outs; unsigned long flags; - if (!TX_BUFFS_AVAIL) - return NETDEV_TX_LOCKED; - netif_stop_queue(dev); + if (!TX_BUFFS_AVAIL) { + dev_consume_skb_any(skb); + return NETDEV_TX_OK; + } + skblen = skb->len; #ifdef DEBUG_DRIVER diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 56139184b801..a83cd1c4ce1d 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -512,7 +512,7 @@ static inline int lance_reset(struct net_device *dev) load_csrs(lp); lance_init_ring(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_start_queue(dev); status = init_restart_lance(lp); @@ -547,10 +547,8 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, local_irq_save(flags); - if (!lance_tx_buffs_avail(lp)) { - local_irq_restore(flags); - return NETDEV_TX_LOCKED; - } + if (!lance_tx_buffs_avail(lp)) + goto out_free; #ifdef DEBUG /* dump the packet */ @@ -573,6 +571,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, /* Kick the lance: transmit now */ ll->rdp = LE_C0_INEA | LE_C0_TDMD; + out_free: dev_kfree_skb(skb); local_irq_restore(flags); diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index b10964e8cb54..d2bc8e5dcd23 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -764,7 +764,7 @@ static void lance_tx_timeout (struct net_device *dev) /* lance_restart, essentially */ lance_init_ring(dev); REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index d3977d032b48..9af309e017fd 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1074,7 +1074,7 @@ static void au1000_tx_timeout(struct net_device *dev) netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev); au1000_reset_mac(dev); au1000_init(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index b584b78237df..b799c7ac899b 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -877,7 +877,7 @@ static inline int lance_reset(struct net_device *dev) lance_init_ring(dev); load_csrs(lp); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ status = init_restart_lance(lp); return status; } diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 3a7ebfdda57d..abb1ba228b26 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -943,7 +943,7 @@ static void lance_tx_timeout (struct net_device *dev) #endif lance_restart (dev, 0x0043, 1); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue (dev); } diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 1cf33addd15e..cda53db75f17 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -782,7 +782,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p) if(!p->lock) if (p->tmdnum || !p->xmit_queued) netif_wake_queue(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } else writedatareg(CSR0_STRT | csr0); @@ -1148,7 +1148,7 @@ static void ni65_timeout(struct net_device *dev) printk("%02x ",p->tmdhead[i].u.s.status); printk("\n"); ni65_lance_reinit(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c index 27245efe9f50..2807e181647b 100644 --- a/drivers/net/ethernet/amd/nmclan_cs.c +++ b/drivers/net/ethernet/amd/nmclan_cs.c @@ -851,7 +851,7 @@ static void mace_tx_timeout(struct net_device *dev) #else /* #if RESET_ON_TIMEOUT */ pr_cont("NOT resetting card\n"); #endif /* #if RESET_ON_TIMEOUT */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 7ccebae9cb48..c22bf52d3320 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev) { struct pcnet32_private *lp = netdev_priv(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ napi_disable(&lp->napi); netif_tx_disable(dev); } @@ -2426,7 +2426,7 @@ static void pcnet32_tx_timeout(struct net_device *dev) } pcnet32_restart(dev, CSR0_NORMAL); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); spin_unlock_irqrestore(&lp->lock, flags); diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 7847638bdd22..9b56b40259dc 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -997,7 +997,7 @@ static int lance_reset(struct net_device *dev) } lp->init_ring(dev); load_csrs(lp); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ status = init_restart_lance(lp); return status; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c index b212488606da..472c0fb3f4c4 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -43,6 +43,7 @@ static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel, static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, struct xgene_cle_dbptr *dbptr, u32 *buf) { + buf[0] = SET_VAL(CLE_DROP, dbptr->drop); buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | SET_VAL(CLE_DSTQIDL, dbptr->dstqid); @@ -412,7 +413,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) .branch = { { /* IPV4 */ - .valid = 0, + .valid = 1, .next_packet_pointer = 22, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, @@ -420,7 +421,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) .next_node = PKT_PROT_NODE, .next_branch = 0, .data = 0x8, - .mask = 0xffff + .mask = 0x0 }, { .valid = 0, @@ -456,7 +457,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) .next_node = RSS_IPV4_TCP_NODE, .next_branch = 0, .data = 0x0600, - .mask = 0xffff + .mask = 0x00ff }, { /* UDP */ @@ -468,7 +469,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) .next_node = RSS_IPV4_UDP_NODE, .next_branch = 0, .data = 0x1100, - .mask = 0xffff + .mask = 0x00ff }, { .valid = 0, @@ -642,7 +643,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) { /* TCP DST Port */ .valid = 0, - .next_packet_pointer = 256, + .next_packet_pointer = 258, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, @@ -729,6 +730,6 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) return xgene_cle_setup_ptree(pdata, enet_cle); } -struct xgene_cle_ops xgene_cle3in_ops = { +const struct xgene_cle_ops xgene_cle3in_ops = { .cle_init = xgene_enet_cle_init, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h index 29a17abdd828..33c5f6b25824 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -83,6 +83,8 @@ #define CLE_TYPE_POS 0 #define CLE_TYPE_LEN 2 +#define CLE_DROP_POS 28 +#define CLE_DROP_LEN 1 #define CLE_DSTQIDL_POS 25 #define CLE_DSTQIDL_LEN 7 #define CLE_DSTQIDH_POS 0 @@ -290,6 +292,6 @@ struct xgene_enet_cle { u32 jump_bytes; }; -extern struct xgene_cle_ops xgene_cle3in_ops; +extern const struct xgene_cle_ops xgene_cle3in_ops; #endif /* __XGENE_ENET_CLE_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 39e081a70f5b..2f5638f7f864 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -219,27 +219,30 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, struct xgene_enet_pdata *pdata, enum xgene_enet_err_code status) { - struct rtnl_link_stats64 *stats = &pdata->stats; - switch (status) { case INGRESS_CRC: - stats->rx_crc_errors++; + ring->rx_crc_errors++; + ring->rx_dropped++; break; case INGRESS_CHECKSUM: case INGRESS_CHECKSUM_COMPUTE: - stats->rx_errors++; + ring->rx_errors++; + ring->rx_dropped++; break; case INGRESS_TRUNC_FRAME: - stats->rx_frame_errors++; + ring->rx_frame_errors++; + ring->rx_dropped++; break; case INGRESS_PKT_LEN: - stats->rx_length_errors++; + ring->rx_length_errors++; + ring->rx_dropped++; break; case INGRESS_PKT_UNDER: - stats->rx_frame_errors++; + ring->rx_frame_errors++; + ring->rx_dropped++; break; case INGRESS_FIFO_OVERRUN: - stats->rx_fifo_errors++; + ring->rx_fifo_errors++; break; default: break; @@ -824,7 +827,7 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, return -EINVAL; phy = get_phy_device(mdio, phy_id, false); - if (!phy || IS_ERR(phy)) + if (IS_ERR(phy)) return -EIO; ret = phy_device_register(phy); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index ba7da98af2ef..45220be3122f 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -86,7 +86,7 @@ enum xgene_enet_rm { #define RINGADDRL_POS 5 #define RINGADDRL_LEN 27 #define RINGADDRH_POS 0 -#define RINGADDRH_LEN 6 +#define RINGADDRH_LEN 7 #define RINGSIZE_POS 23 #define RINGSIZE_LEN 3 #define RINGTYPE_POS 19 @@ -94,9 +94,9 @@ enum xgene_enet_rm { #define RINGMODE_POS 20 #define RINGMODE_LEN 3 #define RECOMTIMEOUTL_POS 28 -#define RECOMTIMEOUTL_LEN 3 +#define RECOMTIMEOUTL_LEN 4 #define RECOMTIMEOUTH_POS 0 -#define RECOMTIMEOUTH_LEN 2 +#define RECOMTIMEOUTH_LEN 3 #define NUMMSGSINQ_POS 1 #define NUMMSGSINQ_LEN 16 #define ACCEPTLERR BIT(19) @@ -201,6 +201,8 @@ enum xgene_enet_rm { #define USERINFO_LEN 32 #define FPQNUM_POS 32 #define FPQNUM_LEN 12 +#define ELERR_POS 46 +#define ELERR_LEN 2 #define NV_POS 50 #define NV_LEN 1 #define LL_POS 51 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 8d4c1ad2fc60..d208b172f4d7 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -443,8 +443,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, skb_tx_timestamp(skb); - pdata->stats.tx_packets++; - pdata->stats.tx_bytes += skb->len; + tx_ring->tx_packets++; + tx_ring->tx_bytes += skb->len; pdata->ring_ops->wr_cmd(tx_ring, count); return NETDEV_TX_OK; @@ -483,12 +483,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, skb = buf_pool->rx_skb[skb_index]; /* checking for error */ - status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); + status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) || + GET_VAL(LERR, le64_to_cpu(raw_desc->m0)); if (unlikely(status > 2)) { dev_kfree_skb_any(skb); xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev), status); - pdata->stats.rx_dropped++; ret = -EIO; goto out; } @@ -506,8 +506,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring, xgene_enet_skip_csum(skb); } - pdata->stats.rx_packets++; - pdata->stats.rx_bytes += datalen; + rx_ring->rx_packets++; + rx_ring->rx_bytes += datalen; napi_gro_receive(&rx_ring->napi, skb); out: if (--rx_ring->nbufpool == 0) { @@ -630,7 +630,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) ring = pdata->rx_ring[i]; irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, - IRQF_SHARED, ring->irq_name, ring); + 0, ring->irq_name, ring); if (ret) { netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); @@ -641,7 +641,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) ring = pdata->tx_ring[i]->cp_ring; irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, - IRQF_SHARED, ring->irq_name, ring); + 0, ring->irq_name, ring); if (ret) { netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); @@ -973,6 +973,17 @@ static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p) return owner; } +static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata) +{ + struct device *dev = &pdata->pdev->dev; + u32 cpu_bufnum; + int ret; + + ret = device_property_read_u32(dev, "channel", &cpu_bufnum); + + return (!ret) ? cpu_bufnum : pdata->cpu_bufnum; +} + static int xgene_enet_create_desc_rings(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); @@ -981,13 +992,15 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) struct xgene_enet_desc_ring *buf_pool = NULL; enum xgene_ring_owner owner; dma_addr_t dma_exp_bufs; - u8 cpu_bufnum = pdata->cpu_bufnum; + u8 cpu_bufnum; u8 eth_bufnum = pdata->eth_bufnum; u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; u16 ring_id; int i, ret, size; + cpu_bufnum = xgene_start_cpu_bufnum(pdata); + for (i = 0; i < pdata->rxq_cnt; i++) { /* allocate rx descriptor ring */ owner = xgene_derive_ring_owner(pdata); @@ -1114,12 +1127,31 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64( { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct rtnl_link_stats64 *stats = &pdata->stats; + struct xgene_enet_desc_ring *ring; + int i; - stats->rx_errors += stats->rx_length_errors + - stats->rx_crc_errors + - stats->rx_frame_errors + - stats->rx_fifo_errors; - memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64)); + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + if (ring) { + stats->tx_packets += ring->tx_packets; + stats->tx_bytes += ring->tx_bytes; + } + } + + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (ring) { + stats->rx_packets += ring->rx_packets; + stats->rx_bytes += ring->rx_bytes; + stats->rx_errors += ring->rx_length_errors + + ring->rx_crc_errors + + ring->rx_frame_errors + + ring->rx_fifo_errors; + stats->rx_dropped += ring->rx_dropped; + } + } + memcpy(storage, stats, sizeof(struct rtnl_link_stats64)); return storage; } @@ -1234,6 +1266,13 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) for (i = 0; i < max_irqs; i++) { ret = platform_get_irq(pdev, i); if (ret <= 0) { + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + max_irqs = i; + pdata->rxq_cnt = max_irqs / 2; + pdata->txq_cnt = max_irqs / 2; + pdata->cq_cnt = max_irqs / 2; + break; + } dev_err(dev, "Unable to get ENET IRQ\n"); ret = ret ? : -ENXIO; return ret; @@ -1437,19 +1476,28 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) pdata->port_ops = &xgene_xgport_ops; pdata->cle_ops = &xgene_cle3in_ops; pdata->rm = RM0; - pdata->rxq_cnt = XGENE_NUM_RX_RING; - pdata->txq_cnt = XGENE_NUM_TX_RING; - pdata->cq_cnt = XGENE_NUM_TXC_RING; + if (!pdata->rxq_cnt) { + pdata->rxq_cnt = XGENE_NUM_RX_RING; + pdata->txq_cnt = XGENE_NUM_TX_RING; + pdata->cq_cnt = XGENE_NUM_TXC_RING; + } break; } if (pdata->enet_id == XGENE_ENET1) { switch (pdata->port_id) { case 0: - pdata->cpu_bufnum = START_CPU_BUFNUM_0; - pdata->eth_bufnum = START_ETH_BUFNUM_0; - pdata->bp_bufnum = START_BP_BUFNUM_0; - pdata->ring_num = START_RING_NUM_0; + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0; + pdata->eth_bufnum = X2_START_ETH_BUFNUM_0; + pdata->bp_bufnum = X2_START_BP_BUFNUM_0; + pdata->ring_num = START_RING_NUM_0; + } else { + pdata->cpu_bufnum = START_CPU_BUFNUM_0; + pdata->eth_bufnum = START_ETH_BUFNUM_0; + pdata->bp_bufnum = START_BP_BUFNUM_0; + pdata->ring_num = START_RING_NUM_0; + } break; case 1: if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { @@ -1595,21 +1643,22 @@ static int xgene_enet_probe(struct platform_device *pdev) ret = xgene_enet_init_hw(pdata); if (ret) - goto err; + goto err_netdev; mac_ops = pdata->mac_ops; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) { ret = xgene_enet_mdio_config(pdata); if (ret) - goto err; + goto err_netdev; } else { INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state); } xgene_enet_napi_add(pdata); return 0; -err: +err_netdev: unregister_netdev(ndev); +err: free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 175d18890c7a..092fbeccaa20 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -49,10 +49,10 @@ #define XGENE_ENET_MSS 1448 #define XGENE_MIN_ENET_FRAME_SIZE 60 -#define XGENE_MAX_ENET_IRQ 8 -#define XGENE_NUM_RX_RING 4 -#define XGENE_NUM_TX_RING 4 -#define XGENE_NUM_TXC_RING 4 +#define XGENE_MAX_ENET_IRQ 16 +#define XGENE_NUM_RX_RING 8 +#define XGENE_NUM_TX_RING 8 +#define XGENE_NUM_TXC_RING 8 #define START_CPU_BUFNUM_0 0 #define START_ETH_BUFNUM_0 2 @@ -121,6 +121,16 @@ struct xgene_enet_desc_ring { struct xgene_enet_raw_desc16 *raw_desc16; }; __le64 *exp_bufs; + u64 tx_packets; + u64 tx_bytes; + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; + u64 rx_errors; + u64 rx_length_errors; + u64 rx_crc_errors; + u64 rx_frame_errors; + u64 rx_fifo_errors; }; struct xgene_mac_ops { @@ -191,7 +201,7 @@ struct xgene_enet_pdata { const struct xgene_mac_ops *mac_ops; const struct xgene_port_ops *port_ops; struct xgene_ring_ops *ring_ops; - struct xgene_cle_ops *cle_ops; + const struct xgene_cle_ops *cle_ops; struct delayed_work link_work; u32 port_id; u8 cpu_bufnum; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h index 29a71b4dcc44..002df5a6756e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h @@ -33,7 +33,7 @@ #define LINK_STATUS BIT(2) #define LINK_UP BIT(15) #define MPA_IDLE_WITH_QMI_EMPTY BIT(12) -#define SG_RX_DV_GATE_REG_0_ADDR 0x0dfc +#define SG_RX_DV_GATE_REG_0_ADDR 0x05fc extern const struct xgene_mac_ops xgene_sgmac_ops; extern const struct xgene_port_ops xgene_sgport_ops; diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 55b118e876fd..9fe8b5e310d1 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -745,7 +745,7 @@ static netdev_features_t alx_fix_features(struct net_device *netdev, static void alx_netif_stop(struct alx_priv *alx) { - alx->dev->trans_start = jiffies; + netif_trans_update(alx->dev); if (netif_carrier_ok(alx->dev)) { netif_carrier_off(alx->dev); netif_tx_disable(alx->dev); diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index b9203d928938..c46b489ce9b4 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -488,7 +488,7 @@ struct atl1c_tpd_ring { dma_addr_t dma; /* descriptor ring physical address */ u16 size; /* descriptor ring length in bytes */ u16 count; /* number of descriptors in the ring */ - u16 next_to_use; /* this is protectd by adapter->tx_lock */ + u16 next_to_use; atomic_t next_to_clean; struct atl1c_buffer *buffer_info; }; @@ -542,7 +542,6 @@ struct atl1c_adapter { u16 link_duplex; spinlock_t mdio_lock; - spinlock_t tx_lock; atomic_t irq_sem; struct work_struct common_task; diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index d0084d4d1a9b..a3200ea6d765 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -821,7 +821,6 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter) atl1c_set_rxbufsize(adapter, adapter->netdev); atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); - spin_lock_init(&adapter->tx_lock); set_bit(__AT_DOWN, &adapter->flags); return 0; @@ -2206,7 +2205,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1c_adapter *adapter = netdev_priv(netdev); - unsigned long flags; u16 tpd_req = 1; struct atl1c_tpd_desc *tpd; enum atl1c_trans_queue type = atl1c_trans_normal; @@ -2217,16 +2215,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, } tpd_req = atl1c_cal_tpd_req(skb); - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { - if (netif_msg_pktdata(adapter)) - dev_info(&adapter->pdev->dev, "tx locked\n"); - return NETDEV_TX_LOCKED; - } if (atl1c_tpd_avail(adapter, type) < tpd_req) { /* no enough descriptor, just stop queue */ netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_BUSY; } @@ -2234,7 +2226,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, /* do TSO and check sum */ if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) { - spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -2257,12 +2248,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, "tx-skb droppted due to dma error\n"); /* roll back tpd/buffer */ atl1c_tx_rollback(adapter, tpd, type); - spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); } else { netdev_sent_queue(adapter->netdev, skb->len); atl1c_tx_queue(adapter, skb, tpd, type); - spin_unlock_irqrestore(&adapter->tx_lock, flags); } return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h index 0212dac7e23a..632bb843aed6 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e.h +++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h @@ -442,7 +442,6 @@ struct atl1e_adapter { u16 link_duplex; spinlock_t mdio_lock; - spinlock_t tx_lock; atomic_t irq_sem; struct work_struct reset_task; diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 59a03a193e83..974713b19ab6 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -648,7 +648,6 @@ static int atl1e_sw_init(struct atl1e_adapter *adapter) atomic_set(&adapter->irq_sem, 1); spin_lock_init(&adapter->mdio_lock); - spin_lock_init(&adapter->tx_lock); set_bit(__AT_DOWN, &adapter->flags); @@ -1866,7 +1865,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1e_adapter *adapter = netdev_priv(netdev); - unsigned long flags; u16 tpd_req = 1; struct atl1e_tpd_desc *tpd; @@ -1880,13 +1878,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } tpd_req = atl1e_cal_tdp_req(skb); - if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) - return NETDEV_TX_LOCKED; if (atl1e_tpd_avail(adapter) < tpd_req) { /* no enough descriptor, just stop queue */ netif_stop_queue(netdev); - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_BUSY; } @@ -1910,7 +1905,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, /* do TSO and check sum */ if (atl1e_tso_csum(adapter, skb, tpd) != 0) { - spin_unlock_irqrestore(&adapter->tx_lock, flags); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -1921,10 +1915,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, } atl1e_tx_queue(adapter, tpd_req, tpd); - - netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ out: - spin_unlock_irqrestore(&adapter->tx_lock, flags); return NETDEV_TX_OK; } @@ -2285,8 +2276,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_RX; - netdev->features = netdev->hw_features | NETIF_F_LLTX | - NETIF_F_HW_VLAN_CTAG_TX; + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX; /* not enabled by default */ netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS; return 0; diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 8f76f4558a88..2ff465848b65 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = -EIO; - netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX; + netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX; netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); /* Init PHY as early as possible due to power saving issue */ diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 993c780bdfab..543bf38105c9 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -831,7 +831,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); /* re-enable RX interrupts */ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); } @@ -873,7 +873,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) if (likely(napi_schedule_prep(&priv->napi))) { /* disable RX interrupts */ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); - __napi_schedule(&priv->napi); + __napi_schedule_irqoff(&priv->napi); } } @@ -916,7 +916,7 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) if (likely(napi_schedule_prep(&txr->napi))) { intrl2_1_mask_set(priv, BIT(ring)); - __napi_schedule(&txr->napi); + __napi_schedule_irqoff(&txr->napi); } } @@ -1117,7 +1117,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev) { netdev_warn(dev, "transmit timeout!\n"); - dev->trans_start = jiffies; + netif_trans_update(dev); dev->stats.tx_errors++; netif_tx_wake_all_queues(dev); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 99b30a952b38..ee5f431ab32a 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1515,7 +1515,7 @@ static int bgmac_mii_register(struct bgmac *bgmac) phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (IS_ERR(phy_dev)) { - bgmac_err(bgmac, "PHY connecton failed\n"); + bgmac_err(bgmac, "PHY connection failed\n"); err = PTR_ERR(phy_dev); goto err_unregister_bus; } @@ -1572,6 +1572,11 @@ static int bgmac_probe(struct bcma_device *core) dev_warn(&core->dev, "Using random MAC: %pM\n", mac); } + /* This (reset &) enable is not preset in specs or reference driver but + * Broadcom does it in arch PCI code when enabling fake PCI device. + */ + bcma_core_enable(core, 0); + /* Allocation and references */ net_dev = alloc_etherdev(sizeof(*bgmac)); if (!net_dev) diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 4fbb093e0d84..9a03c142b742 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -199,9 +199,9 @@ #define BGMAC_CMDCFG_TAI 0x00000200 #define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */ #define BGMAC_CMDCFG_HD_SHIFT 10 -#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for other revs */ -#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, only for core rev 4 */ -#define BGMAC_CMDCFG_SR(rev) ((rev == 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) +#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */ +#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */ +#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0) #define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */ #define BGMAC_CMDCFG_AE 0x00400000 #define BGMAC_CMDCFG_CFE 0x00800000 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 12a009d720cd..5a0dca3e6ef6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -78,6 +78,7 @@ enum board_idx { BCM57402, BCM57404, BCM57406, + BCM57314, BCM57304_VF, BCM57404_VF, }; @@ -92,6 +93,7 @@ static const struct { { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" }, { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" }, { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" }, + { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" }, { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" }, { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" }, }; @@ -103,6 +105,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 }, { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 }, { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 }, + { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF }, { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF }, @@ -118,6 +121,13 @@ static const u16 bnxt_vf_req_snif[] = { HWRM_CFA_L2_FILTER_ALLOC, }; +static const u16 bnxt_async_events_arr[] = { + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, +}; + static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == BCM57304_VF || idx == BCM57404_VF); @@ -581,12 +591,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, struct page *page; dma_addr_t mapping; u16 sw_prod = rxr->rx_sw_agg_prod; + unsigned int offset = 0; - page = alloc_page(gfp); - if (!page) - return -ENOMEM; + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { + page = rxr->rx_page; + if (!page) { + page = alloc_page(gfp); + if (!page) + return -ENOMEM; + rxr->rx_page = page; + rxr->rx_page_offset = 0; + } + offset = rxr->rx_page_offset; + rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; + if (rxr->rx_page_offset == PAGE_SIZE) + rxr->rx_page = NULL; + else + get_page(page); + } else { + page = alloc_page(gfp); + if (!page) + return -ENOMEM; + } - mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE, + mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); if (dma_mapping_error(&pdev->dev, mapping)) { __free_page(page); @@ -601,6 +629,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod); rx_agg_buf->page = page; + rx_agg_buf->offset = offset; rx_agg_buf->mapping = mapping; rxbd->rx_bd_haddr = cpu_to_le64(mapping); rxbd->rx_bd_opaque = sw_prod; @@ -642,6 +671,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons, page = cons_rx_buf->page; cons_rx_buf->page = NULL; prod_rx_buf->page = page; + prod_rx_buf->offset = cons_rx_buf->offset; prod_rx_buf->mapping = cons_rx_buf->mapping; @@ -709,7 +739,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT; cons_rx_buf = &rxr->rx_agg_ring[cons]; - skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len); + skb_fill_page_desc(skb, i, cons_rx_buf->page, + cons_rx_buf->offset, frag_len); __clear_bit(cons, rxr->rx_agg_bmap); /* It is possible for bnxt_alloc_rx_page() to allocate @@ -740,7 +771,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi, return NULL; } - dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE, + dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); skb->data_len += frag_len; @@ -792,6 +823,46 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data, return skb; } +static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, + u32 *raw_cons, void *cmp) +{ + struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; + struct rx_cmp *rxcmp = cmp; + u32 tmp_raw_cons = *raw_cons; + u8 cmp_type, agg_bufs = 0; + + cmp_type = RX_CMP_TYPE(rxcmp); + + if (cmp_type == CMP_TYPE_RX_L2_CMP) { + agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & + RX_CMP_AGG_BUFS) >> + RX_CMP_AGG_BUFS_SHIFT; + } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) { + struct rx_tpa_end_cmp *tpa_end = cmp; + + agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) & + RX_TPA_END_CMP_AGG_BUFS) >> + RX_TPA_END_CMP_AGG_BUFS_SHIFT; + } + + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons)) + return -EBUSY; + } + *raw_cons = tmp_raw_cons; + return 0; +} + +static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + if (!rxr->bnapi->in_reset) { + rxr->bnapi->in_reset = true; + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } + rxr->rx_next_cons = 0xffff; +} + static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, struct rx_tpa_start_cmp *tpa_start, struct rx_tpa_start_cmp_ext *tpa_start1) @@ -809,6 +880,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, prod_rx_buf = &rxr->rx_buf_ring[prod]; tpa_info = &rxr->rx_tpa[agg_id]; + if (unlikely(cons != rxr->rx_next_cons)) { + bnxt_sched_reset(bp, rxr); + return; + } + prod_rx_buf->data = tpa_info->data; mapping = tpa_info->mapping; @@ -846,6 +922,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, rxr->rx_prod = NEXT_RX(prod); cons = NEXT_RX(cons); + rxr->rx_next_cons = NEXT_RX(cons); cons_rx_buf = &rxr->rx_buf_ring[cons]; bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data); @@ -959,6 +1036,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, dma_addr_t mapping; struct sk_buff *skb; + if (unlikely(bnapi->in_reset)) { + int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end); + + if (rc < 0) + return ERR_PTR(-EBUSY); + return NULL; + } + tpa_info = &rxr->rx_tpa[agg_id]; data = tpa_info->data; prefetch(data); @@ -1125,6 +1210,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, cons = rxcmp->rx_cmp_opaque; rx_buf = &rxr->rx_buf_ring[cons]; data = rx_buf->data; + if (unlikely(cons != rxr->rx_next_cons)) { + int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp); + + bnxt_sched_reset(bp, rxr); + return rc1; + } prefetch(data); agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >> @@ -1224,6 +1315,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, next_rx: rxr->rx_prod = NEXT_RX(prod); + rxr->rx_next_cons = NEXT_RX(cons); next_rx_no_prod: *raw_cons = tmp_raw_cons; @@ -1231,6 +1323,10 @@ next_rx_no_prod: return rc; } +#define BNXT_GET_EVENT_PORT(data) \ + ((data) & \ + HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) + static int bnxt_async_event_process(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) { @@ -1238,12 +1334,40 @@ static int bnxt_async_event_process(struct bnxt *bp, /* TODO CHIMP_FW: Define event id's for link change, error etc */ switch (event_id) { + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: { + u32 data1 = le32_to_cpu(cmpl->event_data1); + struct bnxt_link_info *link_info = &bp->link_info; + + if (BNXT_VF(bp)) + goto async_event_process_exit; + if (data1 & 0x20000) { + u16 fw_speed = link_info->force_link_speed; + u32 speed = bnxt_fw_to_ethtool_speed(fw_speed); + + netdev_warn(bp->dev, "Link speed %d no longer supported\n", + speed); + } + /* fall thru */ + } case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); break; case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: { + u32 data1 = le32_to_cpu(cmpl->event_data1); + u16 port_id = BNXT_GET_EVENT_PORT(data1); + + if (BNXT_VF(bp)) + break; + + if (bp->pf.port_id != port_id) + break; + + set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event); + break; + } default: netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", event_id); @@ -1367,6 +1491,10 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) if (!TX_CMP_VALID(txcmp, raw_cons)) break; + /* The valid test of the entry must be done first before + * reading any further. + */ + dma_rmb(); if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) { tx_pkts++; /* return full budget so NAPI will complete. */ @@ -1584,13 +1712,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp) dma_unmap_page(&pdev->dev, dma_unmap_addr(rx_agg_buf, mapping), - PAGE_SIZE, PCI_DMA_FROMDEVICE); + BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE); rx_agg_buf->page = NULL; __clear_bit(j, rxr->rx_agg_bmap); __free_page(page); } + if (rxr->rx_page) { + __free_page(rxr->rx_page); + rxr->rx_page = NULL; + } } } @@ -1973,7 +2105,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) return 0; - type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) | + type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; bnxt_init_rxbd_pages(ring, type); @@ -2164,7 +2296,7 @@ void bnxt_set_ring_params(struct bnxt *bp) bp->rx_agg_nr_pages = 0; if (bp->flags & BNXT_FLAG_TPA) - agg_factor = 4; + agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); bp->flags &= ~BNXT_FLAG_JUMBO; if (rx_space > PAGE_SIZE) { @@ -2457,6 +2589,7 @@ static void bnxt_clear_ring_indices(struct bnxt *bp) rxr->rx_prod = 0; rxr->rx_agg_prod = 0; rxr->rx_sw_agg_prod = 0; + rxr->rx_next_cons = 0; } } } @@ -2638,7 +2771,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int timeout, bool silent) { - int i, intr_process, rc; + int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; __le32 *resp_len, *valid; @@ -2667,11 +2800,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, timeout = DFLT_HWRM_CMD_TIMEOUT; i = 0; + tmo_count = timeout * 40; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && - i++ < timeout) { - usleep_range(600, 800); + i++ < tmo_count) { + usleep_range(25, 40); } if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { @@ -2682,30 +2816,30 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } else { /* Check if response len is updated */ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; - for (i = 0; i < timeout; i++) { + for (i = 0; i < tmo_count; i++) { len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; if (len) break; - usleep_range(600, 800); + usleep_range(25, 40); } - if (i >= timeout) { + if (i >= tmo_count) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", timeout, le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), *resp_len); + le16_to_cpu(req->seq_id), len); return -1; } /* Last word of resp contains valid bit */ valid = bp->hwrm_cmd_resp_addr + len - 4; - for (i = 0; i < timeout; i++) { + for (i = 0; i < 5; i++) { if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) break; - usleep_range(600, 800); + udelay(1); } - if (i >= timeout) { + if (i >= 5) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", timeout, le16_to_cpu(req->req_type), le16_to_cpu(req->seq_id), len, *valid); @@ -2751,6 +2885,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) { struct hwrm_func_drv_rgtr_input req = {0}; int i; + DECLARE_BITMAP(async_events_bmap, 256); + u32 *events = (u32 *)async_events_bmap; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); @@ -2759,11 +2895,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) FUNC_DRV_RGTR_REQ_ENABLES_VER | FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - /* TODO: current async event fwd bits are not defined and the firmware - * only checks if it is non-zero to enable async event forwarding - */ - req.async_event_fwd[0] |= cpu_to_le32(1); - req.os_type = cpu_to_le16(1); + memset(async_events_bmap, 0, sizeof(async_events_bmap)); + for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) + __set_bit(bnxt_async_events_arr[i], async_events_bmap); + + for (i = 0; i < 8; i++) + req.async_event_fwd[i] |= cpu_to_le32(events[i]); + + req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); req.ver_maj = DRV_VER_MAJ; req.ver_min = DRV_VER_MIN; req.ver_upd = DRV_VER_UPD; @@ -3020,12 +3159,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) /* Number of segs are log2 units, and first packet is not * included as part of this units. */ - if (mss <= PAGE_SIZE) { - n = PAGE_SIZE / mss; + if (mss <= BNXT_RX_PAGE_SIZE) { + n = BNXT_RX_PAGE_SIZE / mss; nsegs = (MAX_SKB_FRAGS - 1) * n; } else { - n = mss / PAGE_SIZE; - if (mss & (PAGE_SIZE - 1)) + n = mss / BNXT_RX_PAGE_SIZE; + if (mss & (BNXT_RX_PAGE_SIZE - 1)) n++; nsegs = (MAX_SKB_FRAGS - n) / n; } @@ -3726,7 +3865,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) pf->fw_fid = le16_to_cpu(resp->fid); pf->port_id = le16_to_cpu(resp->port_id); - memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN); + memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); @@ -3751,7 +3890,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) struct bnxt_vf_info *vf = &bp->vf; vf->fw_fid = le16_to_cpu(resp->fid); - memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN); + memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN); if (is_valid_ether_addr(vf->mac_addr)) /* overwrite netdev dev_adr with admin VF MAC */ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); @@ -3842,6 +3981,8 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output)); + bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 | + resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd; if (resp->hwrm_intf_maj < 1) { netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n", resp->hwrm_intf_maj, resp->hwrm_intf_min, @@ -4013,9 +4154,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) } static int bnxt_cfg_rx_mode(struct bnxt *); +static bool bnxt_mc_list_updated(struct bnxt *, u32 *); static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; int rc = 0; if (irq_re_init) { @@ -4071,13 +4214,22 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc); goto err_out; } - bp->vnic_info[0].uc_filter_count = 1; + vnic->uc_filter_count = 1; - bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; + vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp)) - bp->vnic_info[0].rx_mask |= - CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; + + if (bp->dev->flags & IFF_ALLMULTI) { + vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; + vnic->mc_list_count = 0; + } else { + u32 mask = 0; + + bnxt_mc_list_updated(bp, &mask); + vnic->rx_mask |= mask; + } rc = bnxt_cfg_rx_mode(bp); if (rc) @@ -4309,7 +4461,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp) if (bp->flags & BNXT_FLAG_MSIX_CAP) rc = bnxt_setup_msix(bp); - if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { + if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { /* fallback to INTA */ rc = bnxt_setup_inta(bp); } @@ -4422,6 +4574,7 @@ static void bnxt_enable_napi(struct bnxt *bp) int i; for (i = 0; i < bp->cp_nr_rings; i++) { + bp->bnapi[i]->in_reset = false; bnxt_enable_poll(bp->bnapi[i]); napi_enable(&bp->bnapi[i]->napi); } @@ -4486,12 +4639,49 @@ static void bnxt_report_link(struct bnxt *bp) speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed); netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n", speed, duplex, flow_ctrl); + if (bp->flags & BNXT_FLAG_EEE_CAP) + netdev_info(bp->dev, "EEE is %s\n", + bp->eee.eee_active ? "active" : + "not active"); } else { netif_carrier_off(bp->dev); netdev_err(bp->dev, "NIC Link is Down\n"); } } +static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_port_phy_qcaps_input req = {0}; + struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + if (bp->hwrm_spec_code < 0x10201) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + goto hwrm_phy_qcaps_exit; + + if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) { + struct ethtool_eee *eee = &bp->eee; + u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); + + bp->flags |= BNXT_FLAG_EEE_CAP; + eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) & + PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK; + bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) & + PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK; + } + +hwrm_phy_qcaps_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { int rc = 0; @@ -4523,7 +4713,6 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) else link_info->link_speed = 0; link_info->force_link_speed = le16_to_cpu(resp->force_link_speed); - link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed); link_info->support_speeds = le16_to_cpu(resp->support_speeds); link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); link_info->lp_auto_link_speeds = @@ -4533,9 +4722,47 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; link_info->media_type = resp->media_type; - link_info->transceiver = resp->transceiver_type; - link_info->phy_addr = resp->phy_addr; + link_info->phy_type = resp->phy_type; + link_info->transceiver = resp->xcvr_pkg_type; + link_info->phy_addr = resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_PHY_ADDR_MASK; + link_info->module_status = resp->module_status; + + if (bp->flags & BNXT_FLAG_EEE_CAP) { + struct ethtool_eee *eee = &bp->eee; + u16 fw_speeds; + + eee->eee_active = 0; + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) { + eee->eee_active = 1; + fw_speeds = le16_to_cpu( + resp->link_partner_adv_eee_link_speed_mask); + eee->lp_advertised = + _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + } + + /* Pull initial EEE config */ + if (!chng_link_state) { + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED) + eee->eee_enabled = 1; + fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask); + eee->advertised = + _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + + if (resp->eee_config_phy_addr & + PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) { + __le32 tmr; + + eee->tx_lpi_enabled = 1; + tmr = resp->xcvr_identifier_type_tx_lpi_timer; + eee->tx_lpi_timer = le32_to_cpu(tmr) & + PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK; + } + } + } /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == BNXT_LINK_LINK) @@ -4552,10 +4779,40 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) return 0; } +static void bnxt_get_port_module_status(struct bnxt *bp) +{ + struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp; + u8 module_status; + + if (bnxt_update_link(bp, true)) + return; + + module_status = link_info->module_status; + switch (module_status) { + case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX: + case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: + case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG: + netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n", + bp->pf.port_id); + if (bp->hwrm_spec_code >= 0x10201) { + netdev_warn(bp->dev, "Module part number %s\n", + resp->phy_vendor_partnumber); + } + if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX) + netdev_warn(bp->dev, "TX is disabled\n"); + if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN) + netdev_warn(bp->dev, "SFP+ module is shutdown\n"); + } +} + static void bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) { + if (bp->hwrm_spec_code >= 0x10201) + req->auto_pause = + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX) req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX; if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX) @@ -4569,6 +4826,11 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX; req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE); + if (bp->hwrm_spec_code >= 0x10201) { + req->auto_pause = req->force_pause; + req->enables |= cpu_to_le32( + PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE); + } } } @@ -4581,7 +4843,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, if (autoneg & BNXT_AUTONEG_SPEED) { req->auto_mode |= - PORT_PHY_CFG_REQ_AUTO_MODE_MASK; + PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK; req->enables |= cpu_to_le32( PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK); @@ -4595,9 +4857,6 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE); } - /* currently don't support half duplex */ - req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL; - req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX); /* tell chimp that the setting takes effect immediately */ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); } @@ -4632,7 +4891,30 @@ int bnxt_hwrm_set_pause(struct bnxt *bp) return rc; } -int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) +static void bnxt_hwrm_set_eee(struct bnxt *bp, + struct hwrm_port_phy_cfg_input *req) +{ + struct ethtool_eee *eee = &bp->eee; + + if (eee->eee_enabled) { + u16 eee_speeds; + u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE; + + if (eee->tx_lpi_enabled) + flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE; + else + flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE; + + req->flags |= cpu_to_le32(flags); + eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised); + req->eee_link_speed_mask = cpu_to_le16(eee_speeds); + req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer); + } else { + req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE); + } +} + +int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) { struct hwrm_port_phy_cfg_input req = {0}; @@ -4641,14 +4923,57 @@ int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause) bnxt_hwrm_set_pause_common(bp, &req); bnxt_hwrm_set_link_common(bp, &req); + + if (set_eee) + bnxt_hwrm_set_eee(bp, &req); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_hwrm_shutdown_link(struct bnxt *bp) +{ + struct hwrm_port_phy_cfg_input req = {0}; + + if (BNXT_VF(bp)) + return 0; + + if (pci_num_vf(bp->pdev)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + +static bool bnxt_eee_config_ok(struct bnxt *bp) +{ + struct ethtool_eee *eee = &bp->eee; + struct bnxt_link_info *link_info = &bp->link_info; + + if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + return true; + + if (eee->eee_enabled) { + u32 advertising = + _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + eee->eee_enabled = 0; + return false; + } + if (eee->advertised & ~advertising) { + eee->advertised = advertising & eee->supported; + return false; + } + } + return true; +} + static int bnxt_update_phy_setting(struct bnxt *bp) { int rc; bool update_link = false; bool update_pause = false; + bool update_eee = false; struct bnxt_link_info *link_info = &bp->link_info; rc = bnxt_update_link(bp, true); @@ -4658,7 +4983,8 @@ static int bnxt_update_phy_setting(struct bnxt *bp) return rc; } if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && - link_info->auto_pause_setting != link_info->req_flow_ctrl) + (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) != + link_info->req_flow_ctrl) update_pause = true; if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && link_info->force_pause_setting != link_info->req_flow_ctrl) @@ -4677,8 +5003,11 @@ static int bnxt_update_phy_setting(struct bnxt *bp) update_link = true; } + if (!bnxt_eee_config_ok(bp)) + update_eee = true; + if (update_link) - rc = bnxt_hwrm_set_link_setting(bp, update_pause); + rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee); else if (update_pause) rc = bnxt_hwrm_set_pause(bp); if (rc) { @@ -4769,7 +5098,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Enable TX queues */ bnxt_tx_enable(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); - bnxt_update_link(bp, true); + /* Poll link status and check for SFP+ module status */ + bnxt_get_port_module_status(bp); return 0; @@ -4869,6 +5199,7 @@ static int bnxt_close(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); bnxt_close_nic(bp, true, true); + bnxt_hwrm_shutdown_link(bp); return 0; } @@ -5350,6 +5681,9 @@ static void bnxt_sp_task(struct work_struct *work) rtnl_unlock(); } + if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) + bnxt_get_port_module_status(bp); + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) bnxt_hwrm_port_qstats(bp); @@ -5480,10 +5814,9 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; -#ifdef CONFIG_BNXT_SRIOV - if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr)) - return -EADDRNOTAVAIL; -#endif + rc = bnxt_approve_mac(bp, addr->sa_data); + if (rc) + return rc; if (ether_addr_equal(addr->sa_data, dev->dev_addr)) return 0; @@ -5818,6 +6151,13 @@ static int bnxt_probe_phy(struct bnxt *bp) int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; + rc = bnxt_hwrm_phy_qcaps(bp); + if (rc) { + netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n", + rc); + return rc; + } + rc = bnxt_update_link(bp, false); if (rc) { netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", @@ -5827,15 +6167,24 @@ static int bnxt_probe_phy(struct bnxt *bp) /*initialize the ethool setting copy with NVM settings */ if (BNXT_AUTO_MODE(link_info->auto_mode)) { - link_info->autoneg = BNXT_AUTONEG_SPEED | - BNXT_AUTONEG_FLOW_CTRL; + link_info->autoneg = BNXT_AUTONEG_SPEED; + if (bp->hwrm_spec_code >= 0x10201) { + if (link_info->auto_pause_setting & + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE) + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } else { + link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; + } link_info->advertising = link_info->auto_link_speeds; - link_info->req_flow_ctrl = link_info->auto_pause_setting; } else { link_info->req_link_speed = link_info->force_link_speed; link_info->req_duplex = link_info->duplex_setting; - link_info->req_flow_ctrl = link_info->force_pause_setting; } + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + link_info->req_flow_ctrl = + link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH; + else + link_info->req_flow_ctrl = link_info->force_pause_setting; return rc; } @@ -5910,6 +6259,22 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) return rc; } +static void bnxt_parse_log_pcie_link(struct bnxt *bp) +{ + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; + + if (pcie_get_minimum_link(bp->pdev, &speed, &width) || + speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) + netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); + else + netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n", + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : + "Unknown", width); +} + static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int version_printed; @@ -5947,14 +6312,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | - NETIF_F_RXHASH | + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO; dev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE | - NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; + NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | + NETIF_F_GSO_PARTIAL; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_GRE_CSUM; dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA; dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX; @@ -6025,6 +6395,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) board_info[ent->driver_data].name, (long)pci_resource_start(pdev, 0), dev->dev_addr); + bnxt_parse_log_pcie_link(bp); + return 0; init_err: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 709b95b8fcba..2824d65b2e35 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -11,7 +11,7 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.0.0" +#define DRV_MODULE_VERSION "1.2.0" #define DRV_VER_MAJ 1 #define DRV_VER_MIN 0 @@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext { #define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT) +/* The RXBD length is 16-bit so we can only support page sizes < 64K */ +#if (PAGE_SHIFT > 15) +#define BNXT_RX_PAGE_SHIFT 15 +#else +#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT +#endif + +#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT) + #define BNXT_MIN_PKT_SIZE 45 #define BNXT_NUM_TESTS(bp) 0 @@ -416,10 +425,17 @@ struct rx_tpa_end_cmp_ext { #define MAX_TPA 64 +#if (BNXT_PAGE_SHIFT == 16) +#define MAX_RX_PAGES 1 +#define MAX_RX_AGG_PAGES 4 +#define MAX_TX_PAGES 1 +#define MAX_CP_PAGES 8 +#else #define MAX_RX_PAGES 8 #define MAX_RX_AGG_PAGES 32 #define MAX_TX_PAGES 8 #define MAX_CP_PAGES 64 +#endif #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd)) #define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd)) @@ -506,6 +522,7 @@ struct bnxt_sw_rx_bd { struct bnxt_sw_rx_agg_bd { struct page *page; + unsigned int offset; dma_addr_t mapping; }; @@ -574,6 +591,7 @@ struct bnxt_rx_ring_info { u16 rx_prod; u16 rx_agg_prod; u16 rx_sw_agg_prod; + u16 rx_next_cons; void __iomem *rx_doorbell; void __iomem *rx_agg_doorbell; @@ -586,6 +604,9 @@ struct bnxt_rx_ring_info { unsigned long *rx_agg_bmap; u16 rx_agg_bmap_size; + struct page *rx_page; + unsigned int rx_page_offset; + dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; @@ -623,6 +644,7 @@ struct bnxt_napi { #ifdef CONFIG_NET_RX_BUSY_POLL atomic_t poll_state; #endif + bool in_reset; }; #ifdef CONFIG_NET_RX_BUSY_POLL @@ -759,6 +781,7 @@ struct bnxt_ntuple_filter { }; struct bnxt_link_info { + u8 phy_type; u8 media_type; u8 transceiver; u8 phy_addr; @@ -788,7 +811,7 @@ struct bnxt_link_info { #define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS #define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED #define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW -#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK +#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK #define PHY_VER_LEN 3 u8 phy_ver[PHY_VER_LEN]; u16 link_speed; @@ -813,9 +836,9 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB u16 lp_auto_link_speeds; - u16 auto_link_speed; u16 force_link_speed; u32 preemphasis; + u8 module_status; /* copy of requested setting from ethtool cmd */ u8 autoneg; @@ -826,6 +849,7 @@ struct bnxt_link_info { u16 req_link_speed; u32 advertising; bool force_link_chng; + /* a copy of phy_qcfg output used to report link * info to VF */ @@ -875,6 +899,7 @@ struct bnxt { #define BNXT_FLAG_RFS 0x100 #define BNXT_FLAG_SHARED_RINGS 0x200 #define BNXT_FLAG_PORT_STATS 0x400 + #define BNXT_FLAG_EEE_CAP 0x1000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -940,6 +965,7 @@ struct bnxt { u32 msg_enable; + u32 hwrm_spec_code; u16 hwrm_cmd_seq; u32 hwrm_intr_seq_id; void *hwrm_cmd_resp_addr; @@ -991,6 +1017,7 @@ struct bnxt { #define BNXT_RST_RING_SP_EVENT 7 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 #define BNXT_PERIODIC_STATS_SP_EVENT 9 +#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV @@ -1011,6 +1038,9 @@ struct bnxt { int ntp_fltr_count; struct bnxt_link_info link_info; + struct ethtool_eee eee; + u32 lpi_tmr_lo; + u32 lpi_tmr_hi; }; #ifdef CONFIG_NET_RX_BUSY_POLL @@ -1100,6 +1130,16 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi) #endif +#define I2C_DEV_ADDR_A0 0xa0 +#define I2C_DEV_ADDR_A2 0xa2 +#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e +#define SFP_EEPROM_SFF_8472_COMP_SIZE 1 +#define SFF_MODULE_ID_SFP 0x3 +#define SFF_MODULE_ID_QSFP 0xc +#define SFF_MODULE_ID_QSFP_PLUS 0xd +#define SFF_MODULE_ID_QSFP28 0x11 +#define BNXT_MAX_PHY_I2C_RESP_SIZE 64 + void bnxt_set_ring_params(struct bnxt *); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); @@ -1108,7 +1148,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_set_coal(struct bnxt *); int bnxt_hwrm_func_qcaps(struct bnxt *); int bnxt_hwrm_set_pause(struct bnxt *); -int bnxt_hwrm_set_link_setting(struct bnxt *, bool); +int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 2e472f6dbf2d..a38cb047b540 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -327,7 +327,11 @@ static void bnxt_get_channels(struct net_device *dev, bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); channel->max_combined = max_rx_rings; - bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false); + if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { + max_rx_rings = 0; + max_tx_rings = 0; + } + tcs = netdev_get_num_tc(dev); if (tcs > 1) max_tx_rings /= tcs; @@ -597,7 +601,7 @@ static void bnxt_get_drvinfo(struct net_device *dev, kfree(pkglog); } -static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) +u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) { u32 speed_mask = 0; @@ -698,10 +702,23 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (link_info->phy_link_status == BNXT_LINK_LINK) cmd->lp_advertising = bnxt_fw_to_ethtool_lp_adv(link_info); + ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); + if (!netif_carrier_ok(dev)) + cmd->duplex = DUPLEX_UNKNOWN; + else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; + else + cmd->duplex = DUPLEX_HALF; } else { cmd->autoneg = AUTONEG_DISABLE; cmd->advertising = 0; + ethtool_speed = + bnxt_fw_to_ethtool_speed(link_info->req_link_speed); + cmd->duplex = DUPLEX_HALF; + if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL) + cmd->duplex = DUPLEX_FULL; } + ethtool_cmd_speed_set(cmd, ethtool_speed); cmd->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { @@ -719,16 +736,8 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->port = PORT_FIBRE; } - if (link_info->phy_link_status == BNXT_LINK_LINK) { - if (link_info->duplex & BNXT_LINK_DUPLEX_FULL) - cmd->duplex = DUPLEX_FULL; - } else { - cmd->duplex = DUPLEX_UNKNOWN; - } - ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed); - ethtool_cmd_speed_set(cmd, ethtool_speed); if (link_info->transceiver == - PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL) + PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL) cmd->transceiver = XCVR_INTERNAL; else cmd->transceiver = XCVR_EXTERNAL; @@ -739,31 +748,52 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed) { + struct bnxt *bp = netdev_priv(dev); + struct bnxt_link_info *link_info = &bp->link_info; + u16 support_spds = link_info->support_speeds; + u32 fw_speed = 0; + switch (ethtool_speed) { case SPEED_100: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; + if (support_spds & BNXT_LINK_SPEED_MSK_100MB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB; + break; case SPEED_1000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; + if (support_spds & BNXT_LINK_SPEED_MSK_1GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB; + break; case SPEED_2500: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; + if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB; + break; case SPEED_10000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; + if (support_spds & BNXT_LINK_SPEED_MSK_10GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB; + break; case SPEED_20000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; + if (support_spds & BNXT_LINK_SPEED_MSK_20GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB; + break; case SPEED_25000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; + if (support_spds & BNXT_LINK_SPEED_MSK_25GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB; + break; case SPEED_40000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; + if (support_spds & BNXT_LINK_SPEED_MSK_40GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB; + break; case SPEED_50000: - return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; + if (support_spds & BNXT_LINK_SPEED_MSK_50GB) + fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB; + break; default: netdev_err(dev, "unsupported speed!\n"); break; } - return 0; + return fw_speed; } -static u16 bnxt_get_fw_auto_link_speeds(u32 advertising) +u16 bnxt_get_fw_auto_link_speeds(u32 advertising) { u16 fw_speed_mask = 0; @@ -823,6 +853,16 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) */ set_pause = true; } else { + u16 fw_speed; + u8 phy_type = link_info->phy_type; + + if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET || + phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE || + link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { + netdev_err(dev, "10GBase-T devices must autoneg\n"); + rc = -EINVAL; + goto set_setting_exit; + } /* TODO: currently don't support half duplex */ if (cmd->duplex == DUPLEX_HALF) { netdev_err(dev, "HALF DUPLEX is not supported!\n"); @@ -833,14 +873,19 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) if (cmd->duplex == DUPLEX_UNKNOWN) cmd->duplex = DUPLEX_FULL; speed = ethtool_cmd_speed(cmd); - link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); + fw_speed = bnxt_get_fw_speed(dev, speed); + if (!fw_speed) { + rc = -EINVAL; + goto set_setting_exit; + } + link_info->req_link_speed = fw_speed; link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; link_info->autoneg = 0; link_info->advertising = 0; } if (netif_running(dev)) - rc = bnxt_hwrm_set_link_setting(bp, set_pause); + rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); set_setting_exit: return rc; @@ -874,7 +919,9 @@ static int bnxt_set_pauseparam(struct net_device *dev, return -EINVAL; link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; - link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; + if (bp->hwrm_spec_code >= 0x10201) + link_info->req_flow_ctrl = + PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE; } else { /* when transition from auto pause to force pause, * force a link change @@ -882,17 +929,13 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) link_info->force_link_chng = true; link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL; - link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH; + link_info->req_flow_ctrl = 0; } if (epause->rx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX; - else - link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX; if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - else - link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX; if (netif_running(dev)) rc = bnxt_hwrm_set_pause(bp); @@ -1381,6 +1424,199 @@ static int bnxt_set_eeprom(struct net_device *dev, eeprom->len); } +static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnxt *bp = netdev_priv(dev); + struct ethtool_eee *eee = &bp->eee; + struct bnxt_link_info *link_info = &bp->link_info; + u32 advertising = + _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + int rc = 0; + + if (BNXT_VF(bp)) + return 0; + + if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + return -EOPNOTSUPP; + + if (!edata->eee_enabled) + goto eee_ok; + + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + netdev_warn(dev, "EEE requires autoneg\n"); + return -EINVAL; + } + if (edata->tx_lpi_enabled) { + if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || + edata->tx_lpi_timer < bp->lpi_tmr_lo)) { + netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", + bp->lpi_tmr_lo, bp->lpi_tmr_hi); + return -EINVAL; + } else if (!bp->lpi_tmr_hi) { + edata->tx_lpi_timer = eee->tx_lpi_timer; + } + } + if (!edata->advertised) { + edata->advertised = advertising & eee->supported; + } else if (edata->advertised & ~advertising) { + netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", + edata->advertised, advertising); + return -EINVAL; + } + + eee->advertised = edata->advertised; + eee->tx_lpi_enabled = edata->tx_lpi_enabled; + eee->tx_lpi_timer = edata->tx_lpi_timer; +eee_ok: + eee->eee_enabled = edata->eee_enabled; + + if (netif_running(dev)) + rc = bnxt_hwrm_set_link_setting(bp, false, true); + + return rc; +} + +static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct bnxt *bp = netdev_priv(dev); + + if (!(bp->flags & BNXT_FLAG_EEE_CAP)) + return -EOPNOTSUPP; + + *edata = bp->eee; + if (!bp->eee.eee_enabled) { + /* Preserve tx_lpi_timer so that the last value will be used + * by default when it is re-enabled. + */ + edata->advertised = 0; + edata->tx_lpi_enabled = 0; + } + + if (!bp->eee.eee_active) + edata->lp_advertised = 0; + + return 0; +} + +static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, + u16 page_number, u16 start_addr, + u16 data_length, u8 *buf) +{ + struct hwrm_port_phy_i2c_read_input req = {0}; + struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; + int rc, byte_offset = 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); + req.i2c_slave_addr = i2c_addr; + req.page_number = cpu_to_le16(page_number); + req.port_id = cpu_to_le16(bp->pf.port_id); + do { + u16 xfer_size; + + xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); + data_length -= xfer_size; + req.page_offset = cpu_to_le16(start_addr + byte_offset); + req.data_length = xfer_size; + req.enables = cpu_to_le32(start_addr + byte_offset ? + PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); + if (!rc) + memcpy(buf + byte_offset, output->data, xfer_size); + mutex_unlock(&bp->hwrm_cmd_lock); + byte_offset += xfer_size; + } while (!rc && data_length > 0); + + return rc; +} + +static int bnxt_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) +{ + struct bnxt *bp = netdev_priv(dev); + struct hwrm_port_phy_i2c_read_input req = {0}; + struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; + int rc; + + /* No point in going further if phy status indicates + * module is not inserted or if it is powered down or + * if it is of type 10GBase-T + */ + if (bp->link_info.module_status > + PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) + return -EOPNOTSUPP; + + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code < 0x10202) + return -EOPNOTSUPP; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); + req.i2c_slave_addr = I2C_DEV_ADDR_A0; + req.page_number = 0; + req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR); + req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE; + req.port_id = cpu_to_le16(bp->pf.port_id); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + u32 module_id = le32_to_cpu(output->data[0]); + + switch (module_id) { + case SFF_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + case SFF_MODULE_ID_QSFP: + case SFF_MODULE_ID_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case SFF_MODULE_ID_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + break; + default: + rc = -EOPNOTSUPP; + break; + } + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int bnxt_get_module_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct bnxt *bp = netdev_priv(dev); + u16 start = eeprom->offset, length = eeprom->len; + int rc; + + memset(data, 0, eeprom->len); + + /* Read A0 portion of the EEPROM */ + if (start < ETH_MODULE_SFF_8436_LEN) { + if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) + length = ETH_MODULE_SFF_8436_LEN - start; + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, + start, length, data); + if (rc) + return rc; + start += length; + data += length; + length = eeprom->len - length; + } + + /* Read A2 portion of the EEPROM */ + if (length) { + start -= ETH_MODULE_SFF_8436_LEN; + bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, + length, data); + } + return rc; +} + const struct ethtool_ops bnxt_ethtool_ops = { .get_settings = bnxt_get_settings, .set_settings = bnxt_set_settings, @@ -1409,4 +1645,8 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_eeprom = bnxt_get_eeprom, .set_eeprom = bnxt_set_eeprom, .get_link = bnxt_get_link, + .get_eee = bnxt_get_eee, + .set_eee = bnxt_set_eee, + .get_module_info = bnxt_get_module_info, + .get_module_eeprom = bnxt_get_module_eeprom, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index 98fa81e08b58..3abc03b60dbc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -12,6 +12,8 @@ extern const struct ethtool_ops bnxt_ethtool_ops; +u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); u32 bnxt_fw_to_ethtool_speed(u16); +u16 bnxt_get_fw_auto_link_speeds(u32); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h index e0aac65c6d82..461675caaacd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 4badbedcb421..05e3c49a7677 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -104,6 +104,7 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0) @@ -111,6 +112,7 @@ struct hwrm_async_event_cmpl { #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0) #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0) __le32 event_data2; u8 opaque_v; @@ -141,6 +143,7 @@ struct hwrm_async_event_cmpl_link_status_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0) #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1 #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL @@ -195,6 +198,9 @@ struct hwrm_async_event_cmpl_link_speed_change { #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1) + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 }; @@ -237,6 +243,55 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed { __le32 event_data1; #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16 + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16) + #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN +}; + +/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */ +struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0 +}; + +/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */ +struct hwrm_async_event_cmpl_link_speed_cfg_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL + #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL }; /* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */ @@ -363,6 +418,47 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change { #define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0 }; +/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */ +struct hwrm_async_event_cmpl_pf_vf_comm_status_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL +}; + +/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */ +struct hwrm_async_event_cmpl_vf_cfg_change { + __le16 type; + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0 + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0) + __le16 event_id; + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0) + __le32 event_data2; + u8 opaque_v; + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL + #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL +}; + /* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */ struct hwrm_async_event_cmpl_hwrm_error { __le16 type; @@ -377,6 +473,7 @@ struct hwrm_async_event_cmpl_hwrm_error { #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0) #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0) #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0) + #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL u8 opaque_v; #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL @@ -387,12 +484,12 @@ struct hwrm_async_event_cmpl_hwrm_error { #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL }; -/* HW Resource Manager Specification 1.0.0 */ +/* HW Resource Manager Specification 1.2.2 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 0 -#define HWRM_VERSION_UPDATE 0 +#define HWRM_VERSION_MINOR 2 +#define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_STR "1.0.0" +#define HWRM_VERSION_STR "1.2.2" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -444,7 +541,7 @@ struct cmd_nums { #define HWRM_FUNC_BUF_RGTR (0x1fUL) #define HWRM_PORT_PHY_CFG (0x20UL) #define HWRM_PORT_MAC_CFG (0x21UL) - #define RESERVED2 (0x22UL) + #define HWRM_PORT_TS_QUERY (0x22UL) #define HWRM_PORT_QSTATS (0x23UL) #define HWRM_PORT_LPBK_QSTATS (0x24UL) #define HWRM_PORT_CLR_STATS (0x25UL) @@ -452,6 +549,9 @@ struct cmd_nums { #define HWRM_PORT_PHY_QCFG (0x27UL) #define HWRM_PORT_MAC_QCFG (0x28UL) #define HWRM_PORT_BLINK_LED (0x29UL) + #define HWRM_PORT_PHY_QCAPS (0x2aUL) + #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL) + #define HWRM_PORT_PHY_I2C_READ (0x2cUL) #define HWRM_QUEUE_QPORTCFG (0x30UL) #define HWRM_QUEUE_QCFG (0x31UL) #define HWRM_QUEUE_CFG (0x32UL) @@ -531,6 +631,7 @@ struct cmd_nums { __le16 unused_0[3]; }; +/* Return Codes (8 bytes) */ struct ret_codes { __le16 error_code; #define HWRM_ERR_CODE_SUCCESS (0x0UL) @@ -875,10 +976,11 @@ struct hwrm_func_vf_cfg_input { #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL + #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL __le16 mtu; __le16 guest_vlan; __le16 async_event_cr; - __le16 unused_0[3]; + u8 dflt_mac_addr[6]; }; /* Output (16 bytes) */ @@ -917,7 +1019,8 @@ struct hwrm_func_qcaps_output { __le32 flags; #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL - u8 perm_mac_address[6]; + #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL + u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; __le16 max_tx_rings; @@ -942,6 +1045,67 @@ struct hwrm_func_qcaps_output { u8 valid; }; +/* hwrm_func_qcfg */ +/* Input (24 bytes) */ +struct hwrm_func_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + __le16 unused_0[3]; +}; + +/* Output (72 bytes) */ +struct hwrm_func_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 fid; + __le16 port_id; + __le16 vlan; + u8 unused_0; + u8 unused_1; + u8 mac_address[6]; + __le16 pci_id; + __le16 alloc_rsscos_ctx; + __le16 alloc_cmpl_rings; + __le16 alloc_tx_rings; + __le16 alloc_rx_rings; + __le16 alloc_l2_ctx; + __le16 alloc_vnics; + __le16 mtu; + __le16 mru; + __le16 stat_ctx_id; + u8 port_partition_type; + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0) + #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0) + u8 unused_2; + __le16 dflt_vnic_id; + u8 unused_3; + u8 unused_4; + __le32 min_bw; + __le32 max_bw; + u8 evb_mode; + #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0) + #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0) + #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0) + u8 unused_5; + __le16 unused_6; + __le32 alloc_mcast_filters; + __le32 alloc_hw_ring_grps; + u8 unused_7; + u8 unused_8; + u8 unused_9; + u8 valid; +}; + /* hwrm_func_cfg */ /* Input (88 bytes) */ struct hwrm_func_cfg_input { @@ -1171,6 +1335,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0) + #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0) #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0) @@ -1302,6 +1467,7 @@ struct hwrm_func_drv_qver_output { #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0) + #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0) #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0) @@ -1317,7 +1483,7 @@ struct hwrm_func_drv_qver_output { }; /* hwrm_port_phy_cfg */ -/* Input (48 bytes) */ +/* Input (56 bytes) */ struct hwrm_port_phy_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1329,6 +1495,10 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL + #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL __le32 enables; #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL @@ -1339,6 +1509,8 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL + #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL + #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL __le16 port_id; __le16 force_link_speed; #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0) @@ -1350,12 +1522,14 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0) u8 auto_mode; #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0) #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0) #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) - #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0) u8 auto_duplex; #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0) #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0) @@ -1363,6 +1537,7 @@ struct hwrm_port_phy_cfg_input { u8 auto_pause; #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL u8 unused_0; __le16 auto_link_speed; #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0) @@ -1374,6 +1549,8 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0) __le16 auto_link_speed_mask; #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL @@ -1386,6 +1563,9 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL u8 wirespeed; #define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0) #define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0) @@ -1398,7 +1578,20 @@ struct hwrm_port_phy_cfg_input { #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL u8 unused_1; __le32 preemphasis; - __le32 unused_2; + __le16 eee_link_speed_mask; + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL + u8 unused_2; + u8 unused_3; + __le32 tx_lpi_timer; + __le32 unused_4; + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0 }; /* Output (16 bytes) */ @@ -1426,7 +1619,7 @@ struct hwrm_port_phy_qcfg_input { __le16 unused_0[3]; }; -/* Output (48 bytes) */ +/* Output (96 bytes) */ struct hwrm_port_phy_qcfg_output { __le16 error_code; __le16 req_type; @@ -1447,6 +1640,8 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0) u8 duplex; #define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0) #define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0) @@ -1465,6 +1660,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL __le16 force_link_speed; #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0) #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0) @@ -1475,15 +1673,18 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0) u8 auto_mode; #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) - #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0) u8 auto_pause; #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL + #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL __le16 auto_link_speed; #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0) @@ -1494,6 +1695,8 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0) #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0) #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0) + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0) __le16 auto_link_speed_mask; #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL @@ -1506,6 +1709,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL u8 wirespeed; #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0) #define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0) @@ -1516,31 +1722,49 @@ struct hwrm_port_phy_qcfg_output { u8 force_pause; #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL - u8 reserved1; + u8 module_status; + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0) __le32 preemphasis; u8 phy_maj; u8 phy_min; u8 phy_bld; u8 phy_type; - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0) #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0) - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0) - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0) #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0) - #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0) #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0) #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0) + #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0) u8 media_type; + #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0) #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0) #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0) #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0) - u8 transceiver_type; - #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0) - #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0) - u8 phy_addr; + u8 xcvr_pkg_type; + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0) + #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0) + u8 eee_config_phy_addr; #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0 - u8 unused_2; + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL + #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5 + u8 parallel_detect; + #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL + #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL + #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1 __le16 link_partner_adv_speeds; #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL @@ -1553,15 +1777,48 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL u8 link_partner_adv_auto_mode; #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0) #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0) #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0) #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0) - #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0) + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0) u8 link_partner_adv_pause; #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL + __le16 adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le16 link_partner_adv_eee_link_speed_mask; + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL + #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL + __le32 xcvr_identifier_type_tx_lpi_timer; + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL + #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24 + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24) + #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24) + __le32 unused_1; + char phy_vendor_name[16]; + char phy_vendor_partnumber[16]; + __le32 unused_2; u8 unused_3; u8 unused_4; u8 unused_5; @@ -1569,7 +1826,7 @@ struct hwrm_port_phy_qcfg_output { }; /* hwrm_port_mac_cfg */ -/* Input (32 bytes) */ +/* Input (40 bytes) */ struct hwrm_port_mac_cfg_input { __le16 req_type; __le16 cmpl_ring; @@ -1581,6 +1838,10 @@ struct hwrm_port_mac_cfg_input { #define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL + #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL __le32 enables; #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL @@ -1588,6 +1849,8 @@ struct hwrm_port_mac_cfg_input { #define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL + #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL + #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL __le16 port_id; u8 ipg; u8 lpbk; @@ -1598,6 +1861,9 @@ struct hwrm_port_mac_cfg_input { u8 lcos_map_pri; u8 tunnel_pri2cos_map_pri; u8 dscp2pri_map_pri; + __le16 rx_ts_capture_ptp_msg_type; + __le16 tx_ts_capture_ptp_msg_type; + __le32 unused_0; }; /* Output (16 bytes) */ @@ -1754,7 +2020,113 @@ struct hwrm_port_blink_led_output { u8 valid; }; -/* hwrm_queue_qportcfg */ +/* hwrm_port_phy_qcaps */ +/* Input (24 bytes) */ +struct hwrm_port_phy_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (24 bytes) */ +struct hwrm_port_phy_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 eee_supported; + #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL + #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1 + u8 unused_0; + __le16 supported_speeds_force_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL + __le16 supported_speeds_auto_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL + __le16 supported_speeds_eee_mode; + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL + #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL + __le32 tx_lpi_timer_low; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0 + #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24 + __le32 valid_tx_lpi_timer_high; + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL + #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0 + #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL + #define PORT_PHY_QCAPS_RESP_VALID_SFT 24 +}; + +/* hwrm_port_phy_i2c_read */ +/* Input (40 bytes) */ +struct hwrm_port_phy_i2c_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + __le32 enables; + #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL + __le16 port_id; + u8 i2c_slave_addr; + u8 unused_0; + __le16 page_number; + __le16 page_offset; + u8 data_length; + u8 unused_1[7]; +}; + +/* Output (80 bytes) */ +struct hwrm_port_phy_i2c_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 data[16]; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* Input (24 bytes) */ struct hwrm_queue_qportcfg_input { __le16 req_type; @@ -1766,6 +2138,7 @@ struct hwrm_queue_qportcfg_input { #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX __le16 port_id; __le16 unused_0; }; @@ -1838,6 +2211,7 @@ struct hwrm_queue_cfg_input { #define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL #define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX __le32 enables; #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL @@ -1875,6 +2249,7 @@ struct hwrm_queue_buffers_cfg_input { #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX __le32 enables; #define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL #define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL @@ -1952,6 +2327,7 @@ struct hwrm_queue_pri2cos_cfg_input { #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL __le32 enables; u8 port_id; @@ -2158,6 +2534,8 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL + #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL + #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL __le32 enables; #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL @@ -2622,6 +3000,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0) #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL @@ -2747,6 +3126,7 @@ struct hwrm_cfa_l2_filter_cfg_input { #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0) #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0) + #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL __le32 enables; #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL @@ -3337,6 +3717,41 @@ struct hwrm_fw_reset_output { u8 valid; }; +/* hwrm_fw_qstatus */ +/* Input (24 bytes) */ +struct hwrm_fw_qstatus_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 embedded_proc_type; + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0) + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0) + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_fw_qstatus_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 selfrst_status; + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0) + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0) + #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0) + u8 unused_0; + __le16 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; +}; + /* hwrm_exec_fwd_resp */ /* Input (128 bytes) */ struct hwrm_exec_fwd_resp_input { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 43ef392c8588..40a7b0e09612 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 0c5f510492f1..363884dd9e8a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -771,12 +771,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) PORT_PHY_QCFG_RESP_LINK_NO_LINK) { phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_LINK; - if (phy_qcfg_resp.auto_link_speed) - phy_qcfg_resp.link_speed = - phy_qcfg_resp.auto_link_speed; - else - phy_qcfg_resp.link_speed = - phy_qcfg_resp.force_link_speed; + phy_qcfg_resp.link_speed = cpu_to_le16( + PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_FULL; phy_qcfg_resp.pause = @@ -859,8 +855,8 @@ void bnxt_update_vf_mac(struct bnxt *bp) * default but the stored zero MAC will allow the VF user to change * the random MAC address using ndo_set_mac_address() if he wants. */ - if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) - memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); + if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) + memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); /* overwrite netdev dev_addr with admin VF MAC */ if (is_valid_ether_addr(bp->vf.mac_addr)) @@ -869,6 +865,31 @@ update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); } +int bnxt_approve_mac(struct bnxt *bp, u8 *mac) +{ + struct hwrm_func_vf_cfg_input req = {0}; + int rc = 0; + + if (!BNXT_VF(bp)) + return 0; + + if (bp->hwrm_spec_code < 0x10202) { + if (is_valid_ether_addr(bp->vf.mac_addr)) + rc = -EADDRNOTAVAIL; + goto mac_done; + } + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req.dflt_mac_addr, mac, ETH_ALEN); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +mac_done: + if (rc) { + rc = -EADDRNOTAVAIL; + netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", + mac); + } + return rc; +} #else void bnxt_sriov_disable(struct bnxt *bp) @@ -883,4 +904,9 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) void bnxt_update_vf_mac(struct bnxt *bp) { } + +int bnxt_approve_mac(struct bnxt *bp, u8 *mac) +{ + return 0; +} #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index c151280e3980..0392670ab49c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2014-2015 Broadcom Corporation + * Copyright (c) 2014-2016 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,4 +20,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); void bnxt_update_vf_mac(struct bnxt *); +int bnxt_approve_mac(struct bnxt *, u8 *); #endif diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index b69dc58faeab..b1d2ac818710 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5350,7 +5350,10 @@ static int cnic_start_hw(struct cnic_dev *dev) return 0; err1: - cp->free_resc(dev); + if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ) + cp->stop_hw(dev); + else + cp->free_resc(dev); pci_dev_put(dev->pcidev); return err; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index cf6445d148ca..541456398dfb 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -104,8 +104,8 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv, static inline void dmadesc_set(struct bcmgenet_priv *priv, void __iomem *d, dma_addr_t addr, u32 val) { - dmadesc_set_length_status(priv, d, val); dmadesc_set_addr(priv, d, addr); + dmadesc_set_length_status(priv, d, val); } static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, @@ -878,7 +878,11 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev, else p = (char *)priv; p += s->stat_offset; - data[i] = *(u32 *)p; + if (sizeof(unsigned long) != sizeof(u32) && + s->stat_sizeof == sizeof(unsigned long)) + data[i] = *(unsigned long *)p; + else + data[i] = *(u32 *)p; } } @@ -1221,8 +1225,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, dev->stats.tx_packets += pkts_compl; dev->stats.tx_bytes += bytes_compl; + txq = netdev_get_tx_queue(dev, ring->queue); + netdev_tx_completed_queue(txq, pkts_compl, bytes_compl); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { - txq = netdev_get_tx_queue(dev, ring->queue); if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); } @@ -1331,6 +1337,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev, struct bcmgenet_priv *priv = netdev_priv(dev); struct device *kdev = &priv->pdev->dev; struct enet_cb *tx_cb_ptr; + unsigned int frag_size; dma_addr_t mapping; int ret; @@ -1338,10 +1345,12 @@ static int bcmgenet_xmit_frag(struct net_device *dev, if (unlikely(!tx_cb_ptr)) BUG(); + tx_cb_ptr->skb = NULL; - mapping = skb_frag_dma_map(kdev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); + frag_size = skb_frag_size(frag); + + mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE); ret = dma_mapping_error(kdev, mapping); if (ret) { priv->mib.tx_dma_failed++; @@ -1351,10 +1360,10 @@ static int bcmgenet_xmit_frag(struct net_device *dev, } dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size); + dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size); dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, - (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | + (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); return 0; @@ -1447,15 +1456,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) else index -= 1; - nr_frags = skb_shinfo(skb)->nr_frags; ring = &priv->tx_rings[index]; txq = netdev_get_tx_queue(dev, ring->queue); + nr_frags = skb_shinfo(skb)->nr_frags; + spin_lock_irqsave(&ring->lock, flags); - if (ring->free_bds <= nr_frags + 1) { - netif_tx_stop_queue(txq); - netdev_err(dev, "%s: tx ring %d full when queue %d awake\n", - __func__, index, ring->queue); + if (ring->free_bds <= (nr_frags + 1)) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + netdev_err(dev, + "%s: tx ring %d full when queue %d awake\n", + __func__, index, ring->queue); + } ret = NETDEV_TX_BUSY; goto out; } @@ -1509,6 +1522,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) ring->prod_index += nr_frags + 1; ring->prod_index &= DMA_P_INDEX_MASK; + netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent); + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); @@ -1728,7 +1743,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) work_done = bcmgenet_desc_rx(ring, budget); if (work_done < budget) { - napi_complete(napi); + napi_complete_done(napi, work_done); ring->int_enable(ring); } @@ -2357,6 +2372,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { int i; + struct netdev_queue *txq; bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv); @@ -2371,6 +2387,14 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) } } + for (i = 0; i < priv->hw_params->tx_queues; i++) { + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue); + netdev_tx_reset_queue(txq); + } + + txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue); + netdev_tx_reset_queue(txq); + bcmgenet_free_rx_buffers(priv); kfree(priv->rx_cbs); kfree(priv->tx_cbs); @@ -2486,7 +2510,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring->int_disable(rx_ring); - __napi_schedule(&rx_ring->napi); + __napi_schedule_irqoff(&rx_ring->napi); } } @@ -2499,7 +2523,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) if (likely(napi_schedule_prep(&tx_ring->napi))) { tx_ring->int_disable(tx_ring); - __napi_schedule(&tx_ring->napi); + __napi_schedule_irqoff(&tx_ring->napi); } } @@ -2529,7 +2553,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring->int_disable(rx_ring); - __napi_schedule(&rx_ring->napi); + __napi_schedule_irqoff(&rx_ring->napi); } } @@ -2538,7 +2562,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) if (likely(napi_schedule_prep(&tx_ring->napi))) { tx_ring->int_disable(tx_ring); - __napi_schedule(&tx_ring->napi); + __napi_schedule_irqoff(&tx_ring->napi); } } @@ -3035,7 +3059,7 @@ static void bcmgenet_timeout(struct net_device *dev) bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); - dev->trans_start = jiffies; + netif_trans_update(dev); dev->stats.tx_errors++; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index eacc559679bf..f1b81187a201 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2462,7 +2462,7 @@ static void sbmac_tx_timeout (struct net_device *dev) spin_lock_irqsave(&sc->sbm_lock, flags); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; spin_unlock_irqrestore(&sc->sbm_lock, flags); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3010080cfeee..ff300f7cf529 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7383,7 +7383,7 @@ static void tg3_napi_fini(struct tg3 *tp) static inline void tg3_netif_stop(struct tg3 *tp) { - tp->dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(tp->dev); /* prevent tx timeout */ tg3_napi_disable(tp); netif_carrier_off(tp->dev); netif_tx_disable(tp->dev); diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 48a7d7dee846..cb07d95e3dd9 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -61,8 +61,7 @@ #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) -/* - * Graceful stop timeouts in us. We should allow up to +/* Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) */ #define MACB_HALT_TIMEOUT 1230 @@ -130,9 +129,8 @@ static void hw_writel(struct macb *bp, int offset, u32 value) writel_relaxed(value, bp->regs + offset); } -/* - * Find the CPU endianness by using the loopback bit of NCR register. When the - * CPU is in big endian we need to program swaped mode for management +/* Find the CPU endianness by using the loopback bit of NCR register. When the + * CPU is in big endian we need to program swapped mode for management * descriptor access. */ static bool hw_is_native_io(void __iomem *addr) @@ -189,7 +187,7 @@ static void macb_get_hwaddr(struct macb *bp) pdata = dev_get_platdata(&bp->pdev->dev); - /* Check all 4 address register for vaild address */ + /* Check all 4 address register for valid address */ for (i = 0; i < 4; i++) { bottom = macb_or_gem_readl(bp, SA1B + i * 8); top = macb_or_gem_readl(bp, SA1T + i * 8); @@ -297,7 +295,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) ferr = DIV_ROUND_UP(ferr, rate / 100000); if (ferr > 5) netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", - rate); + rate); if (clk_set_rate(clk, rate_rounded)) netdev_err(dev, "adjusting tx_clk failed.\n"); @@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev) pdata = dev_get_platdata(&bp->pdev->dev); if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { - ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); + ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, + "phy int"); if (!ret) { phy_irq = gpio_to_irq(pdata->phy_irq_pin); phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; @@ -430,7 +429,7 @@ static int macb_mii_init(struct macb *bp) macb_writel(bp, NCR, MACB_BIT(MPE)); bp->mii_bus = mdiobus_alloc(); - if (bp->mii_bus == NULL) { + if (!bp->mii_bus) { err = -ENOMEM; goto err_out; } @@ -439,9 +438,9 @@ static int macb_mii_init(struct macb *bp) bp->mii_bus->read = &macb_mdio_read; bp->mii_bus->write = &macb_mdio_write; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - bp->pdev->name, bp->pdev->id); + bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; - bp->mii_bus->parent = &bp->dev->dev; + bp->mii_bus->parent = &bp->pdev->dev; pdata = dev_get_platdata(&bp->pdev->dev); dev_set_drvdata(&bp->dev->dev, bp->mii_bus); @@ -452,13 +451,15 @@ static int macb_mii_init(struct macb *bp) err = of_mdiobus_register(bp->mii_bus, np); /* fallback to standard phy registration if no phy were - found during dt phy registration */ + * found during dt phy registration + */ if (!err && !phy_find_first(bp->mii_bus)) { for (i = 0; i < PHY_MAX_ADDR; i++) { struct phy_device *phydev; phydev = mdiobus_scan(bp->mii_bus, i); - if (IS_ERR(phydev)) { + if (IS_ERR(phydev) && + PTR_ERR(phydev) != -ENODEV) { err = PTR_ERR(phydev); break; } @@ -499,7 +500,7 @@ static void macb_update_stats(struct macb *bp) WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); - for(; p < end; p++, offset += 4) + for (; p < end; p++, offset += 4) *p += bp->macb_reg_readl(bp, offset); } @@ -567,8 +568,7 @@ static void macb_tx_error_task(struct work_struct *work) /* Make sure nobody is trying to queue up new packets */ netif_tx_stop_all_queues(bp->dev); - /* - * Stop transmission now + /* Stop transmission now * (in case we have just queued new packets) * macb/gem must be halted to write TBQP register */ @@ -576,8 +576,7 @@ static void macb_tx_error_task(struct work_struct *work) /* Just complain for now, reinitializing TX path can be good */ netdev_err(bp->dev, "BUG: halt tx timed out\n"); - /* - * Treat frames in TX queue including the ones that caused the error. + /* Treat frames in TX queue including the ones that caused the error. * Free transmit buffers in upper layer. */ for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { @@ -607,10 +606,9 @@ static void macb_tx_error_task(struct work_struct *work) bp->stats.tx_bytes += skb->len; } } else { - /* - * "Buffers exhausted mid-frame" errors may only happen - * if the driver is buggy, so complain loudly about those. - * Statistics are updated by hardware. + /* "Buffers exhausted mid-frame" errors may only happen + * if the driver is buggy, so complain loudly about + * those. Statistics are updated by hardware. */ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) netdev_err(bp->dev, @@ -662,7 +660,7 @@ static void macb_tx_interrupt(struct macb_queue *queue) queue_writel(queue, ISR, MACB_BIT(TCOMP)); netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", - (unsigned long)status); + (unsigned long)status); head = queue->tx_head; for (tail = queue->tx_tail; tail != head; tail++) { @@ -722,7 +720,8 @@ static void gem_rx_refill(struct macb *bp) struct sk_buff *skb; dma_addr_t paddr; - while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { + while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, + RX_RING_SIZE) > 0) { entry = macb_rx_ring_wrap(bp->rx_prepared_head); /* Make hw descriptor updates visible to CPU */ @@ -730,10 +729,10 @@ static void gem_rx_refill(struct macb *bp) bp->rx_prepared_head++; - if (bp->rx_skbuff[entry] == NULL) { + if (!bp->rx_skbuff[entry]) { /* allocate sk_buff for this free entry in ring */ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); - if (unlikely(skb == NULL)) { + if (unlikely(!skb)) { netdev_err(bp->dev, "Unable to allocate sk_buff\n"); break; @@ -741,7 +740,8 @@ static void gem_rx_refill(struct macb *bp) /* now fill corresponding descriptor entry */ paddr = dma_map_single(&bp->pdev->dev, skb->data, - bp->rx_buffer_size, DMA_FROM_DEVICE); + bp->rx_buffer_size, + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, paddr)) { dev_kfree_skb(skb); break; @@ -766,7 +766,7 @@ static void gem_rx_refill(struct macb *bp) wmb(); netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", - bp->rx_prepared_head, bp->rx_tail); + bp->rx_prepared_head, bp->rx_tail); } /* Mark DMA descriptors from begin up to and not including end as unused */ @@ -777,14 +777,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin, for (frag = begin; frag != end; frag++) { struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); } /* Make descriptor updates visible to hardware */ wmb(); - /* - * When this happens, the hardware stats registers for + /* When this happens, the hardware stats registers for * whatever caused this is updated, so we don't have to record * anything. */ @@ -880,11 +880,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, len = desc->ctrl & bp->rx_frm_len_mask; netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", - macb_rx_ring_wrap(first_frag), - macb_rx_ring_wrap(last_frag), len); + macb_rx_ring_wrap(first_frag), + macb_rx_ring_wrap(last_frag), len); - /* - * The ethernet header starts NET_IP_ALIGN bytes into the + /* The ethernet header starts NET_IP_ALIGN bytes into the * first buffer. Since the header is 14 bytes, this makes the * payload word-aligned. * @@ -924,7 +923,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, frag_len = len - offset; } skb_copy_to_linear_data_offset(skb, offset, - macb_rx_buffer(bp, frag), frag_len); + macb_rx_buffer(bp, frag), + frag_len); offset += bp->rx_buffer_size; desc = macb_rx_desc(bp, frag); desc->addr &= ~MACB_BIT(RX_USED); @@ -942,7 +942,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, bp->stats.rx_packets++; bp->stats.rx_bytes += skb->len; netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", - skb->len, skb->csum); + skb->len, skb->csum); netif_receive_skb(skb); return 0; @@ -1049,7 +1049,7 @@ static int macb_poll(struct napi_struct *napi, int budget) work_done = 0; netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", - (unsigned long)status, budget); + (unsigned long)status, budget); work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { @@ -1099,8 +1099,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) (unsigned long)status); if (status & MACB_RX_INT_FLAGS) { - /* - * There's no point taking any more interrupts + /* There's no point taking any more interrupts * until we have processed the buffers. The * scheduling call may fail if the poll routine * is already scheduled, so disable interrupts @@ -1129,8 +1128,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(TCOMP)) macb_tx_interrupt(queue); - /* - * Link change detection isn't possible with RMII, so we'll + /* Link change detection isn't possible with RMII, so we'll * add that if/when we get our hands on a full-blown MII PHY. */ @@ -1161,8 +1159,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } if (status & MACB_BIT(HRESP)) { - /* - * TODO: Reset the hardware, and maybe move the + /* TODO: Reset the hardware, and maybe move the * netdev_err to a lower-priority context as well * (work queue?) */ @@ -1181,8 +1178,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools +/* Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void macb_poll_controller(struct net_device *dev) @@ -1268,7 +1264,7 @@ static unsigned int macb_tx_map(struct macb *bp, } /* Should never happen */ - if (unlikely(tx_skb == NULL)) { + if (unlikely(!tx_skb)) { netdev_err(bp->dev, "BUG! empty skb!\n"); return 0; } @@ -1338,16 +1334,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) #if defined(DEBUG) && defined(VERBOSE_DEBUG) netdev_vdbg(bp->dev, - "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", - queue_index, skb->len, skb->head, skb->data, - skb_tail_pointer(skb), skb_end_pointer(skb)); + "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", + queue_index, skb->len, skb->head, skb->data, + skb_tail_pointer(skb), skb_end_pointer(skb)); print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, 16, true); #endif /* Count how many TX buffer descriptors are needed to send this * socket buffer: skb fragments of jumbo frames may need to be - * splitted into many buffer descriptors. + * split into many buffer descriptors. */ count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); nr_frags = skb_shinfo(skb)->nr_frags; @@ -1398,8 +1394,8 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size) if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { netdev_dbg(bp->dev, - "RX buffer must be multiple of %d bytes, expanding\n", - RX_BUFFER_MULTIPLE); + "RX buffer must be multiple of %d bytes, expanding\n", + RX_BUFFER_MULTIPLE); bp->rx_buffer_size = roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); } @@ -1422,7 +1418,7 @@ static void gem_free_rx_buffers(struct macb *bp) for (i = 0; i < RX_RING_SIZE; i++) { skb = bp->rx_skbuff[i]; - if (skb == NULL) + if (!skb) continue; desc = &bp->rx_ring[i]; @@ -1478,10 +1474,10 @@ static int gem_alloc_rx_buffers(struct macb *bp) bp->rx_skbuff = kzalloc(size, GFP_KERNEL); if (!bp->rx_skbuff) return -ENOMEM; - else - netdev_dbg(bp->dev, - "Allocated %d RX struct sk_buff entries at %p\n", - RX_RING_SIZE, bp->rx_skbuff); + + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + RX_RING_SIZE, bp->rx_skbuff); return 0; } @@ -1494,10 +1490,10 @@ static int macb_alloc_rx_buffers(struct macb *bp) &bp->rx_buffers_dma, GFP_KERNEL); if (!bp->rx_buffers) return -ENOMEM; - else - netdev_dbg(bp->dev, - "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", - size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); + + netdev_dbg(bp->dev, + "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); return 0; } @@ -1588,8 +1584,7 @@ static void macb_reset_hw(struct macb *bp) struct macb_queue *queue; unsigned int q; - /* - * Disable RX and TX (XXX: Should we halt the transmission + /* Disable RX and TX (XXX: Should we halt the transmission * more gracefully?) */ macb_writel(bp, NCR, 0); @@ -1652,8 +1647,7 @@ static u32 macb_mdc_clk_div(struct macb *bp) return config; } -/* - * Get the DMA bus width field of the network configuration register that we +/* Get the DMA bus width field of the network configuration register that we * should program. We find the width from decoding the design configuration * register to find the maximum supported data bus width. */ @@ -1673,8 +1667,7 @@ static u32 macb_dbw(struct macb *bp) } } -/* - * Configure the receive DMA engine +/* Configure the receive DMA engine * - use the correct receive buffer size * - set best burst length for DMA operations * (if not supported by FIFO, it will fallback to default) @@ -1762,8 +1755,7 @@ static void macb_init_hw(struct macb *bp) macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); } -/* - * The hash address register is 64 bits long and takes up two +/* The hash address register is 64 bits long and takes up two * locations in the memory map. The least significant bits are stored * in EMAC_HSL and the most significant bits in EMAC_HSH. * @@ -1803,9 +1795,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr) return 0; } -/* - * Return the hash index value for the specified address. - */ +/* Return the hash index value for the specified address. */ static int hash_get_index(__u8 *addr) { int i, j, bitval; @@ -1821,9 +1811,7 @@ static int hash_get_index(__u8 *addr) return hash_index; } -/* - * Add multicast addresses to the internal multicast-hash table. - */ +/* Add multicast addresses to the internal multicast-hash table. */ static void macb_sethashtable(struct net_device *dev) { struct netdev_hw_addr *ha; @@ -1831,7 +1819,8 @@ static void macb_sethashtable(struct net_device *dev) unsigned int bitnr; struct macb *bp = netdev_priv(dev); - mc_filter[0] = mc_filter[1] = 0; + mc_filter[0] = 0; + mc_filter[1] = 0; netdev_for_each_mc_addr(ha, dev) { bitnr = hash_get_index(ha->addr); @@ -1842,9 +1831,7 @@ static void macb_sethashtable(struct net_device *dev) macb_or_gem_writel(bp, HRT, mc_filter[1]); } -/* - * Enable/Disable promiscuous and multicast modes. - */ +/* Enable/Disable promiscuous and multicast modes. */ static void macb_set_rx_mode(struct net_device *dev) { unsigned long cfg; @@ -2161,9 +2148,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) regs_buff[12] = macb_or_gem_readl(bp, USRIO); - if (macb_is_gem(bp)) { + if (macb_is_gem(bp)) regs_buff[13] = gem_readl(bp, DMACFG); - } } static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -2286,11 +2272,11 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_set_features = macb_set_features, }; -/* - * Configure peripheral capabilities according to device tree +/* Configure peripheral capabilities according to device tree * and integration options used */ -static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) +static void macb_configure_caps(struct macb *bp, + const struct macb_config *dt_conf) { u32 dcfg; @@ -2988,7 +2974,7 @@ static int macb_probe(struct platform_device *pdev) mac = of_get_mac_address(np); if (mac) - memcpy(bp->dev->dev_addr, mac, ETH_ALEN); + ether_addr_copy(bp->dev->dev_addr, mac); else macb_get_hwaddr(bp); @@ -2996,6 +2982,7 @@ static int macb_probe(struct platform_device *pdev) phy_node = of_get_next_available_child(np, NULL); if (phy_node) { int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); + if (gpio_is_valid(gpio)) { bp->reset_gpio = gpio_to_desc(gpio); gpiod_direction_output(bp->reset_gpio, 1); @@ -3019,29 +3006,36 @@ static int macb_probe(struct platform_device *pdev) if (err) goto err_out_free_netdev; + err = macb_mii_init(bp); + if (err) + goto err_out_free_netdev; + + phydev = bp->phy_dev; + + netif_carrier_off(dev); + err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); - goto err_out_unregister_netdev; + goto err_out_unregister_mdio; } - err = macb_mii_init(bp); - if (err) - goto err_out_unregister_netdev; - - netif_carrier_off(dev); + phy_attached_info(phydev); netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n", macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID), dev->base_addr, dev->irq, dev->dev_addr); - phydev = bp->phy_dev; - phy_attached_info(phydev); - return 0; -err_out_unregister_netdev: - unregister_netdev(dev); +err_out_unregister_mdio: + phy_disconnect(bp->phy_dev); + mdiobus_unregister(bp->mii_bus); + mdiobus_free(bp->mii_bus); + + /* Shutdown the PHY if there is a GPIO reset */ + if (bp->reset_gpio) + gpiod_set_value(bp->reset_gpio, 0); err_out_free_netdev: free_netdev(dev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 34d269cd5579..8de79ae63231 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2899,7 +2899,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (status == IQ_SEND_STOP) stop_q(lio->netdev, q_idx); - netdev->trans_start = jiffies; + netif_trans_update(netdev); stats->tx_done++; stats->tx_tot_bytes += skb->len; @@ -2928,7 +2928,7 @@ static void liquidio_tx_timeout(struct net_device *netdev) netif_info(lio, tx_err, lio->netdev, "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", netdev->stats.tx_dropped); - netdev->trans_start = jiffies; + netif_trans_update(netdev); txqs_wake(netdev); } diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index c177c7cec13b..388cd799d9ed 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1320,7 +1320,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) /* Ring the bell. */ cvmx_write_csr(p->mix + MIX_ORING2, 1); - netdev->trans_start = jiffies; + netif_trans_update(netdev); rv = NETDEV_TX_OK; out: octeon_mgmt_update_tx_stats(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index bfee298fc02a..a19e73f11d73 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1442,7 +1442,7 @@ static void nicvf_reset_task(struct work_struct *work) nicvf_stop(nic->netdev); nicvf_open(nic->netdev); - nic->netdev->trans_start = jiffies; + netif_trans_update(nic->netdev); } static int nicvf_config_loopback(struct nicvf *nic, diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index fa05e347262f..0ff8e60deccb 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -23,7 +23,7 @@ static void nicvf_get_page(struct nicvf *nic) if (!nic->rb_pageref || !nic->rb_page) return; - atomic_add(nic->rb_pageref, &nic->rb_page->_count); + page_ref_add(nic->rb_page, nic->rb_pageref); nic->rb_pageref = 0; } @@ -533,6 +533,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, nicvf_config_vlan_stripping(nic, nic->netdev->features); /* Enable Receive queue */ + memset(&rq_cfg, 0, sizeof(struct rq_cfg)); rq_cfg.ena = 1; rq_cfg.tcp_ena = 0; nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg); @@ -565,6 +566,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, qidx, (u64)(cq->dmem.phys_base)); /* Enable Completion queue */ + memset(&cq_cfg, 0, sizeof(struct cq_cfg)); cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; @@ -613,6 +615,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, qidx, (u64)(sq->dmem.phys_base)); /* Enable send queue & set queue size */ + memset(&sq_cfg, 0, sizeof(struct sq_cfg)); sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; @@ -649,6 +652,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, /* Enable RBDR & set queue size */ /* Buffer size should be in multiples of 128 bytes */ + memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg)); rbdr_cfg.ena = 1; rbdr_cfg.reset = 0; rbdr_cfg.ldwb = 0; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 967951582e03..d20539a6d162 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1011,10 +1011,11 @@ static int bgx_init_of_phy(struct bgx *bgx) } lmac++; - if (lmac == MAX_LMAC_PER_BGX) + if (lmac == MAX_LMAC_PER_BGX) { + of_node_put(node); break; + } } - of_node_put(node); return 0; defer: diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 526ea74e82d9..86f467a2c485 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter, struct cmdQ *q = &sge->cmdQ[qid]; unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - if (!spin_trylock(&q->lock)) - return NETDEV_TX_LOCKED; + spin_lock(&q->lock); reclaim_completed_tx(sge, q); diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 60908eab3b3a..43da891fab97 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap) unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; u8 cpus[SGE_QSETS + 1]; - u16 rspq_map[RSS_TABLE_SIZE]; + u16 rspq_map[RSS_TABLE_SIZE + 1]; for (i = 0; i < SGE_QSETS; ++i) cpus[i] = i; @@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap) rspq_map[i] = i % nq0; rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0; } + rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */ t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 984a3cc26f86..b4fceb92479f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -324,7 +324,9 @@ struct adapter_params { unsigned int sf_fw_start; /* start of FW image in flash */ unsigned int fw_vers; + unsigned int bs_vers; /* bootstrap version */ unsigned int tp_vers; + unsigned int er_vers; /* expansion ROM version */ u8 api_vers[7]; unsigned short mtus[NMTUS]; @@ -357,6 +359,34 @@ struct sge_idma_monitor_state { unsigned int idma_warn[2]; /* time to warning in HZ */ }; +/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format. + * The access and execute times are signed in order to accommodate negative + * error returns. + */ +struct mbox_cmd { + u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */ + u64 timestamp; /* OS-dependent timestamp */ + u32 seqno; /* sequence number */ + s16 access; /* time (ms) to access mailbox */ + s16 execute; /* time (ms) to execute */ +}; + +struct mbox_cmd_log { + unsigned int size; /* number of entries in the log */ + unsigned int cursor; /* next position in the log to write */ + u32 seqno; /* next sequence number */ + /* variable length mailbox command log starts here */ +}; + +/* Given a pointer to a Firmware Mailbox Command Log and a log entry index, + * return a pointer to the specified entry. + */ +static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, + unsigned int entry_idx) +{ + return &((struct mbox_cmd *)&(log)[1])[entry_idx]; +} + #include "t4fw_api.h" #define FW_VERSION(chip) ( \ @@ -394,6 +424,7 @@ struct link_config { unsigned char fc; /* actual link flow control */ unsigned char autoneg; /* autonegotiating? */ unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ }; #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16) @@ -731,6 +762,7 @@ struct adapter { u32 t4_bar0; struct pci_dev *pdev; struct device *pdev_dev; + const char *name; unsigned int mbox; unsigned int pf; unsigned int flags; @@ -776,6 +808,10 @@ struct adapter { struct work_struct db_drop_task; bool tid_release_task_busy; + /* support for mailbox command/reply logging */ +#define T4_OS_LOG_MBOX_CMDS 256 + struct mbox_cmd_log *mbox_log; + struct dentry *debugfs_root; bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */ bool trace_rss; /* 1 implies that different RSS flit per filter is @@ -1306,6 +1342,7 @@ int t4_fl_pkt_align(struct adapter *adap); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_check_fw_version(struct adapter *adap); int t4_get_fw_version(struct adapter *adapter, u32 *vers); +int t4_get_bs_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); int t4_get_exprom_version(struct adapter *adapter, u32 *vers); int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, @@ -1329,6 +1366,8 @@ int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_init_rss_mode(struct adapter *adap, int mbox); +int t4_init_portinfo(struct port_info *pi, int mbox, + int port, int pf, int vf, u8 mac[]); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); void t4_fatal_err(struct adapter *adapter); int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, @@ -1451,6 +1490,9 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 *valp); int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, unsigned int mmd, unsigned int reg, u16 val); +int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int iqtype, unsigned int iqid, unsigned int fl0id, unsigned int fl1id); @@ -1461,6 +1503,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int eqid); int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); +void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 052c660aca80..6ee2ed30626b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -253,7 +253,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap, { const union fw_port_dcb *fwdcb = &pcmd->u.dcb; int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid)); - struct net_device *dev = adap->port[port]; + struct net_device *dev = adap->port[adap->chan_map[port]]; struct port_info *pi = netdev_priv(dev); struct port_dcb_info *dcb = &pi->dcb; int dcb_type = pcmd->u.dcb.pgid.type; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 0bb41e9b9b1c..91fb50850fff 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -1152,6 +1152,104 @@ static const struct file_operations devlog_fops = { .release = seq_release_private }; +/* Show Firmware Mailbox Command/Reply Log + * + * Note that we don't do any locking when dumping the Firmware Mailbox Log so + * it's possible that we can catch things during a log update and therefore + * see partially corrupted log entries. But it's probably Good Enough(tm). + * If we ever decide that we want to make sure that we're dumping a coherent + * log, we'd need to perform locking in the mailbox logging and in + * mboxlog_open() where we'd need to grab the entire mailbox log in one go + * like we do for the Firmware Device Log. + */ +static int mboxlog_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct mbox_cmd_log *log = adapter->mbox_log; + struct mbox_cmd *entry; + int entry_idx, i; + + if (v == SEQ_START_TOKEN) { + seq_printf(seq, + "%10s %15s %5s %5s %s\n", + "Seq#", "Tstamp", "Atime", "Etime", + "Command/Reply"); + return 0; + } + + entry_idx = log->cursor + ((uintptr_t)v - 2); + if (entry_idx >= log->size) + entry_idx -= log->size; + entry = mbox_cmd_log_entry(log, entry_idx); + + /* skip over unused entries */ + if (entry->timestamp == 0) + return 0; + + seq_printf(seq, "%10u %15llu %5d %5d", + entry->seqno, entry->timestamp, + entry->access, entry->execute); + for (i = 0; i < MBOX_LEN / 8; i++) { + u64 flit = entry->cmd[i]; + u32 hi = (u32)(flit >> 32); + u32 lo = (u32)flit; + + seq_printf(seq, " %08x %08x", hi, lo); + } + seq_puts(seq, "\n"); + return 0; +} + +static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos) +{ + struct adapter *adapter = seq->private; + struct mbox_cmd_log *log = adapter->mbox_log; + + return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL); +} + +static void *mboxlog_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN; +} + +static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + return mboxlog_get_idx(seq, *pos); +} + +static void mboxlog_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations mboxlog_seq_ops = { + .start = mboxlog_start, + .next = mboxlog_next, + .stop = mboxlog_stop, + .show = mboxlog_show +}; + +static int mboxlog_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &mboxlog_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations mboxlog_fops = { + .owner = THIS_MODULE, + .open = mboxlog_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + static int mbox_show(struct seq_file *seq, void *v) { static const char * const owner[] = { "none", "FW", "driver", @@ -1572,6 +1670,7 @@ static const struct file_operations flash_debugfs_fops = { .owner = THIS_MODULE, .open = mem_open, .read = flash_read, + .llseek = default_llseek, }; static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) @@ -3128,6 +3227,7 @@ int t4_setup_debugfs(struct adapter *adap) { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 }, { "clk", &clk_debugfs_fops, S_IRUSR, 0 }, { "devlog", &devlog_fops, S_IRUSR, 0 }, + { "mboxlog", &mboxlog_fops, S_IRUSR, 0 }, { "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 }, { "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 }, { "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 }, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d1e3f0997d6b..477db477b133 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -168,7 +168,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter," static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0644); -MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); +MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, " + "deprecated parameter"); /* * The driver uses the best interrupt scheme available on a platform in the @@ -303,6 +304,22 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) } #endif /* CONFIG_CHELSIO_T4_DCB */ +int cxgb4_dcb_enabled(const struct net_device *dev) +{ +#ifdef CONFIG_CHELSIO_T4_DCB + struct port_info *pi = netdev_priv(dev); + + if (!pi->dcb.enabled) + return 0; + + return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || + (pi->dcb.state == CXGB4_DCB_STATE_HOST)); +#else + return 0; +#endif +} +EXPORT_SYMBOL(cxgb4_dcb_enabled); + void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) { struct net_device *dev = adapter->port[port_id]; @@ -313,8 +330,10 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) netif_carrier_on(dev); else { #ifdef CONFIG_CHELSIO_T4_DCB - cxgb4_dcb_state_init(dev); - dcb_tx_queue_prio_enable(dev, false); + if (cxgb4_dcb_enabled(dev)) { + cxgb4_dcb_state_init(dev); + dcb_tx_queue_prio_enable(dev, false); + } #endif /* CONFIG_CHELSIO_T4_DCB */ netif_carrier_off(dev); } @@ -336,6 +355,17 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) netdev_info(dev, "port module unplugged\n"); else if (pi->mod_type < ARRAY_SIZE(mod_str)) netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); + else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) + netdev_info(dev, "%s: unsupported port module inserted\n", + dev->name); + else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) + netdev_info(dev, "%s: unknown port module inserted\n", + dev->name); + else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR) + netdev_info(dev, "%s: transceiver module error\n", dev->name); + else + netdev_info(dev, "%s: unknown module type %d inserted\n", + dev->name, pi->mod_type); } int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ @@ -482,28 +512,12 @@ static int link_start(struct net_device *dev) return ret; } -int cxgb4_dcb_enabled(const struct net_device *dev) -{ -#ifdef CONFIG_CHELSIO_T4_DCB - struct port_info *pi = netdev_priv(dev); - - if (!pi->dcb.enabled) - return 0; - - return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) || - (pi->dcb.state == CXGB4_DCB_STATE_HOST)); -#else - return 0; -#endif -} -EXPORT_SYMBOL(cxgb4_dcb_enabled); - #ifdef CONFIG_CHELSIO_T4_DCB /* Handle a Data Center Bridging update message from the firmware. */ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) { int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); - struct net_device *dev = adap->port[port]; + struct net_device *dev = adap->port[adap->chan_map[port]]; int old_dcb_enabled = cxgb4_dcb_enabled(dev); int new_dcb_enabled; @@ -633,7 +647,8 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, action == FW_PORT_ACTION_GET_PORT_INFO) { int port = FW_PORT_CMD_PORTID_G( be32_to_cpu(pcmd->op_to_portid)); - struct net_device *dev = q->adap->port[port]; + struct net_device *dev = + q->adap->port[q->adap->chan_map[port]]; int state_input = ((pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F) ? CXGB4_DCB_INPUT_FW_DISABLED @@ -3737,7 +3752,10 @@ static int adap_init0(struct adapter *adap) * is excessively mismatched relative to the driver.) */ t4_get_fw_version(adap, &adap->params.fw_vers); + t4_get_bs_version(adap, &adap->params.bs_vers); t4_get_tp_version(adap, &adap->params.tp_vers); + t4_get_exprom_version(adap, &adap->params.er_vers); + ret = t4_check_fw_version(adap); /* If firmware is too old (not supported by driver) force an update. */ if (ret) @@ -4651,6 +4669,68 @@ static void cxgb4_check_pcie_caps(struct adapter *adap) "suggested for optimal performance.\n"); } +/* Dump basic information about the adapter */ +static void print_adapter_info(struct adapter *adapter) +{ + /* Device information */ + dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", + adapter->params.vpd.id, + CHELSIO_CHIP_RELEASE(adapter->params.chip)); + dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", + adapter->params.vpd.sn, adapter->params.vpd.pn); + + /* Firmware Version */ + if (!adapter->params.fw_vers) + dev_warn(adapter->pdev_dev, "No firmware loaded\n"); + else + dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); + + /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap + * Firmware, so dev_info() is more appropriate here.) + */ + if (!adapter->params.bs_vers) + dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); + else + dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); + + /* TP Microcode Version */ + if (!adapter->params.tp_vers) + dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); + else + dev_info(adapter->pdev_dev, + "TP Microcode version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); + + /* Expansion ROM version */ + if (!adapter->params.er_vers) + dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); + else + dev_info(adapter->pdev_dev, + "Expansion ROM version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); + + /* Software/Hardware configuration */ + dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", + is_offload(adapter) ? "R" : "", + ((adapter->flags & USING_MSIX) ? "MSI-X" : + (adapter->flags & USING_MSI) ? "MSI" : ""), + is_offload(adapter) ? "Offload" : "non-Offload"); +} + static void print_port_info(const struct net_device *dev) { char buf[80]; @@ -4678,14 +4758,8 @@ static void print_port_info(const struct net_device *dev) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); - netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n", - adap->params.vpd.id, - CHELSIO_CHIP_RELEASE(adap->params.chip), buf, - is_offload(adap) ? "R" : "", - (adap->flags & USING_MSIX) ? " MSI-X" : - (adap->flags & USING_MSI) ? " MSI" : ""); - netdev_info(dev, "S/N: %s, P/N: %s\n", - adap->params.vpd.sn, adap->params.vpd.pn); + netdev_info(dev, "%s: Chelsio %s (%s) %s\n", + dev->name, adap->params.vpd.id, adap->name, buf); } static void enable_pcie_relaxed_ordering(struct pci_dev *dev) @@ -4837,12 +4911,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_free_adapter; } + adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + + (sizeof(struct mbox_cmd) * + T4_OS_LOG_MBOX_CMDS), + GFP_KERNEL); + if (!adapter->mbox_log) { + err = -ENOMEM; + goto out_free_adapter; + } + adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS; + /* PCI device has been enabled */ adapter->flags |= DEV_ENABLED; adapter->regs = regs; adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; + adapter->name = pci_name(pdev); adapter->mbox = func; adapter->pf = func; adapter->msg_enable = dflt_msg_enable; @@ -5073,6 +5158,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (is_offload(adapter)) attach_ulds(adapter); + print_adapter_info(adapter); + sriov: #ifdef CONFIG_PCI_IOV if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) @@ -5092,6 +5179,7 @@ sriov: if (adapter->workq) destroy_workqueue(adapter->workq); + kfree(adapter->mbox_log); kfree(adapter); out_unmap_bar0: iounmap(regs); @@ -5158,6 +5246,7 @@ static void remove_one(struct pci_dev *pdev) adapter->flags &= ~DEV_ENABLED; } pci_release_regions(pdev); + kfree(adapter->mbox_log); synchronize_rcu(); kfree(adapter); } else diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 13b144bcf725..bad253beb8c8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2981,18 +2981,34 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) void t4_free_sge_resources(struct adapter *adap) { int i; - struct sge_eth_rxq *eq = adap->sge.ethrxq; - struct sge_eth_txq *etq = adap->sge.ethtxq; + struct sge_eth_rxq *eq; + struct sge_eth_txq *etq; + + /* stop all Rx queues in order to start them draining */ + for (i = 0; i < adap->sge.ethqsets; i++) { + eq = &adap->sge.ethrxq[i]; + if (eq->rspq.desc) + t4_iq_stop(adap, adap->mbox, adap->pf, 0, + FW_IQ_TYPE_FL_INT_CAP, + eq->rspq.cntxt_id, + eq->fl.size ? eq->fl.cntxt_id : 0xffff, + 0xffff); + } /* clean up Ethernet Tx/Rx queues */ - for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { + for (i = 0; i < adap->sge.ethqsets; i++) { + eq = &adap->sge.ethrxq[i]; if (eq->rspq.desc) free_rspq_fl(adap, &eq->rspq, eq->fl.size ? &eq->fl : NULL); + + etq = &adap->sge.ethtxq[i]; if (etq->q.desc) { t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, etq->q.cntxt_id); + __netif_tx_lock_bh(etq->txq); free_tx_desc(adap, &etq->q, etq->q.in_use, true); + __netif_tx_unlock_bh(etq->txq); kfree(etq->q.sdesc); free_txq(adap, &etq->q); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index cc1736bece0f..a63addb4e72c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -224,18 +224,34 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr) be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y)); } -static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) +/** + * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log + * @adapter: the adapter + * @cmd: the Firmware Mailbox Command or Reply + * @size: command length in bytes + * @access: the time (ms) needed to access the Firmware Mailbox + * @execute: the time (ms) the command spent being executed + */ +static void t4_record_mbox(struct adapter *adapter, + const __be64 *cmd, unsigned int size, + int access, int execute) { - dev_err(adap->pdev_dev, - "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, - (unsigned long long)t4_read_reg64(adap, data_reg), - (unsigned long long)t4_read_reg64(adap, data_reg + 8), - (unsigned long long)t4_read_reg64(adap, data_reg + 16), - (unsigned long long)t4_read_reg64(adap, data_reg + 24), - (unsigned long long)t4_read_reg64(adap, data_reg + 32), - (unsigned long long)t4_read_reg64(adap, data_reg + 40), - (unsigned long long)t4_read_reg64(adap, data_reg + 48), - (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + struct mbox_cmd_log *log = adapter->mbox_log; + struct mbox_cmd *entry; + int i; + + entry = mbox_cmd_log_entry(log, log->cursor++); + if (log->cursor == log->size) + log->cursor = 0; + + for (i = 0; i < size / 8; i++) + entry->cmd[i] = be64_to_cpu(cmd[i]); + while (i < MBOX_LEN / 8) + entry->cmd[i++] = 0; + entry->timestamp = jiffies; + entry->seqno = log->seqno++; + entry->access = access; + entry->execute = execute; } /** @@ -268,12 +284,16 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 }; + u16 access = 0; + u16 execute = 0; u32 v; u64 res; - int i, ms, delay_idx; + int i, ms, delay_idx, ret; const __be64 *p = cmd; u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A); u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A); + __be64 cmd_rpl[MBOX_LEN / 8]; + u32 pcie_fw; if ((size & 15) || size > MBOX_LEN) return -EINVAL; @@ -285,13 +305,24 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, if (adap->pdev->error_state != pci_channel_io_normal) return -EIO; + /* If we have a negative timeout, that implies that we can't sleep. */ + if (timeout < 0) { + sleep_ok = false; + timeout = -timeout; + } + v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adap, ctl_reg)); - if (v != MBOX_OWNER_DRV) - return v ? -EBUSY : -ETIMEDOUT; + if (v != MBOX_OWNER_DRV) { + ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; + t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); + return ret; + } + /* Copy in the new mailbox command and send it on its way ... */ + t4_record_mbox(adap, cmd, MBOX_LEN, access, 0); for (i = 0; i < size; i += 8) t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); @@ -301,7 +332,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, delay_idx = 0; ms = delay[0]; - for (i = 0; i < timeout; i += ms) { + for (i = 0; + !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) && + i < timeout; + i += ms) { if (sleep_ok) { ms = delay[delay_idx]; /* last element may repeat */ if (delay_idx < ARRAY_SIZE(delay) - 1) @@ -317,26 +351,31 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, continue; } - res = t4_read_reg64(adap, data_reg); + get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg); + res = be64_to_cpu(cmd_rpl[0]); + if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) { fw_asrt(adap, data_reg); res = FW_CMD_RETVAL_V(EIO); } else if (rpl) { - get_mbox_rpl(adap, rpl, size / 8, data_reg); + memcpy(rpl, cmd_rpl, size); } - if (FW_CMD_RETVAL_G((int)res)) - dump_mbox(adap, mbox, data_reg); t4_write_reg(adap, ctl_reg, 0); + + execute = i + ms; + t4_record_mbox(adap, cmd_rpl, + MBOX_LEN, access, execute); return -FW_CMD_RETVAL_G((int)res); } } - dump_mbox(adap, mbox, data_reg); + ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT; + t4_record_mbox(adap, cmd, MBOX_LEN, access, ret); dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", *(const u8 *)cmd, mbox); t4_report_fw_error(adap); - return -ETIMEDOUT; + return ret; } int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, @@ -2557,6 +2596,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) } #define EEPROM_STAT_ADDR 0x7bfc +#define VPD_SIZE 0x800 #define VPD_BASE 0x400 #define VPD_BASE_OLD 0 #define VPD_LEN 1024 @@ -2594,6 +2634,15 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p) if (!vpd) return -ENOMEM; + /* We have two VPD data structures stored in the adapter VPD area. + * By default, Linux calculates the size of the VPD area by traversing + * the first VPD area at offset 0x0, so we need to tell the OS what + * our real VPD size is. + */ + ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE); + if (ret < 0) + goto out; + /* Card information normally starts at VPD_BASE but early cards had * it at 0. */ @@ -2927,6 +2976,20 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers) } /** + * t4_get_bs_version - read the firmware bootstrap version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW Bootstrap version from flash. + */ +int t4_get_bs_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START + + offsetof(struct fw_hdr, fw_ver), 1, + vers, 0); +} + +/** * t4_get_tp_version - read the TP microcode version * @adapter: the adapter * @vers: where to place the version @@ -6940,6 +7003,39 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, } /** + * t4_iq_stop - stop an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Stops an ingress queue and its associated FLs, if any. This causes + * any current or future data/messages destined for these queues to be + * tossed. + */ +int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F | + FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | + FW_IQ_CMD_VFN_V(vf)); + c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c)); + c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype)); + c.iqid = cpu_to_be16(iqid); + c.fl0id = cpu_to_be16(fl0id); + c.fl1id = cpu_to_be16(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** * t4_iq_free - free an ingress queue and its FLs * @adap: the adapter * @mbox: mailbox to use for the FW command @@ -7046,52 +7142,122 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, } /** - * t4_handle_fw_rpl - process a FW reply message + * t4_link_down_rc_str - return a string for a Link Down Reason Code * @adap: the adapter + * @link_down_rc: Link Down Reason Code + * + * Returns a string representation of the Link Down Reason Code. + */ +static const char *t4_link_down_rc_str(unsigned char link_down_rc) +{ + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + + if (link_down_rc >= ARRAY_SIZE(reason)) + return "Bad Reason Code"; + + return reason[link_down_rc]; +} + +/** + * t4_handle_get_port_info - process a FW reply message + * @pi: the port info * @rpl: start of the FW message * - * Processes a FW message, such as link state change messages. + * Processes a GET_PORT_INFO FW reply message. + */ +void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) +{ + const struct fw_port_cmd *p = (const void *)rpl; + struct adapter *adap = pi->adapter; + + /* link/module state change message */ + int speed = 0, fc = 0; + struct link_config *lc; + u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); + int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; + u32 mod = FW_PORT_CMD_MODTYPE_G(stat); + + if (stat & FW_PORT_CMD_RXPAUSE_F) + fc |= PAUSE_RX; + if (stat & FW_PORT_CMD_TXPAUSE_F) + fc |= PAUSE_TX; + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + speed = 100; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + speed = 1000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + speed = 10000; + else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + speed = 40000; + + lc = &pi->link_cfg; + + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, pi->port_id); + } + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + if (!link_ok && lc->link_ok) { + unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat); + + lc->link_down_rc = rc; + dev_warn(adap->pdev_dev, + "Port %d link down, reason: %s\n", + pi->port_id, t4_link_down_rc_str(rc)); + } + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->supported = be16_to_cpu(p->u.info.pcap); + t4_os_link_changed(adap, pi->port_id, link_ok); + } +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. */ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) { u8 opcode = *(const u8 *)rpl; - if (opcode == FW_PORT_CMD) { /* link/module state change message */ - int speed = 0, fc = 0; - const struct fw_port_cmd *p = (void *)rpl; + /* This might be a port command ... this simplifies the following + * conditionals ... We can get away with pre-dereferencing + * action_to_len16 because it's in the first 16 bytes and all messages + * will be at least that long. + */ + const struct fw_port_cmd *p = (const void *)rpl; + unsigned int action = + FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16)); + + if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { + int i; int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid)); - int port = adap->chan_map[chan]; - struct port_info *pi = adap2pinfo(adap, port); - struct link_config *lc = &pi->link_cfg; - u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); - int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; - u32 mod = FW_PORT_CMD_MODTYPE_G(stat); - - if (stat & FW_PORT_CMD_RXPAUSE_F) - fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE_F) - fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) - speed = 100; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) - speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) - speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) - speed = 40000; - - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { /* something changed */ - lc->link_ok = link_ok; - lc->speed = speed; - lc->fc = fc; - lc->supported = be16_to_cpu(p->u.info.pcap); - t4_os_link_changed(adap, port, link_ok); - } - if (mod != pi->mod_type) { - pi->mod_type = mod; - t4_os_portmod_changed(adap, port); + struct port_info *pi = NULL; + + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->tx_chan == chan) + break; } + + t4_handle_get_port_info(pi, rpl); + } else { + dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode); + return -EINVAL; } return 0; } @@ -7611,61 +7777,74 @@ int t4_init_rss_mode(struct adapter *adap, int mbox) return 0; } -int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +/** + * t4_init_portinfo - allocate a virtual interface amd initialize port_info + * @pi: the port_info + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @mac: the MAC address of the VI + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC address of the VI as assigned by FW. + * @mac should be large enough to hold an Ethernet address. + * Returns < 0 on error. + */ +int t4_init_portinfo(struct port_info *pi, int mbox, + int port, int pf, int vf, u8 mac[]) { - u8 addr[6]; - int ret, i, j = 0; + int ret; struct fw_port_cmd c; - struct fw_rss_vi_config_cmd rvc; + unsigned int rss_size; memset(&c, 0, sizeof(c)); - memset(&rvc, 0, sizeof(rvc)); + c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(port)); + c.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size); + if (ret < 0) + return ret; + + pi->viid = ret; + pi->tx_chan = port; + pi->lport = port; + pi->rss_size = rss_size; + + ret = be32_to_cpu(c.u.info.lstatus_to_modtype); + pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(ret) : -1; + pi->port_type = FW_PORT_CMD_PTYPE_G(ret); + pi->mod_type = FW_PORT_MOD_TYPE_NA; + + init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap)); + return 0; +} + +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + u8 addr[6]; + int ret, i, j = 0; for_each_port(adap, i) { - unsigned int rss_size; - struct port_info *p = adap2pinfo(adap, i); + struct port_info *pi = adap2pinfo(adap, i); while ((adap->params.portvec & (1 << j)) == 0) j++; - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | - FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_PORT_CMD_PORTID_V(j)); - c.action_to_len16 = cpu_to_be32( - FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(c)); - ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr); if (ret) return ret; - ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); - if (ret < 0) - return ret; - - p->viid = ret; - p->tx_chan = j; - p->lport = j; - p->rss_size = rss_size; memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); adap->port[i]->dev_port = j; - - ret = be32_to_cpu(c.u.info.lstatus_to_modtype); - p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? - FW_PORT_CMD_MDIOADDR_G(ret) : -1; - p->port_type = FW_PORT_CMD_PTYPE_G(ret); - p->mod_type = FW_PORT_MOD_TYPE_NA; - - rvc.op_to_viid = - cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) | - FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); - rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc)); - ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); - if (ret) - return ret; - p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen); - - init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap)); j++; } return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 2fc60e83a7a1..7f59ca458431 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -220,6 +220,13 @@ enum { FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), + /* Location of bootstrap firmware image in FLASH. + */ + FLASH_FWBOOTSTRAP_START_SEC = 27, + FLASH_FWBOOTSTRAP_NSECS = 1, + FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC), + FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS), + /* * iSCSI persistent/crash information. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 06bc2d2e7a73..a2cdfc1261dc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ + CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ /* T6 adapters: */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 7ad6d4e75b2a..392d6644fdd8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2510,6 +2510,11 @@ struct fw_port_cmd { #define FW_PORT_CMD_PTYPE_G(x) \ (((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M) +#define FW_PORT_CMD_LINKDNRC_S 5 +#define FW_PORT_CMD_LINKDNRC_M 0x7 +#define FW_PORT_CMD_LINKDNRC_G(x) \ + (((x) >> FW_PORT_CMD_LINKDNRC_S) & FW_PORT_CMD_LINKDNRC_M) + #define FW_PORT_CMD_MODTYPE_S 0 #define FW_PORT_CMD_MODTYPE_M 0x1f #define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 4a707c32d76f..734dd776c22f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -387,6 +387,10 @@ struct adapter { /* various locks */ spinlock_t stats_lock; + /* support for mailbox command/reply logging */ +#define T4VF_OS_LOG_MBOX_CMDS 256 + struct mbox_cmd_log *mbox_log; + /* list of MAC addresses in MPS Hash */ struct list_head mac_hlist; }; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 1cc8a7a69457..04fc6f6d1e25 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -74,7 +74,8 @@ static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0644); MODULE_PARM_DESC(dflt_msg_enable, - "default adapter ethtool message level bitmap"); + "default adapter ethtool message level bitmap, " + "deprecated parameter"); /* * The driver uses the best interrupt scheme available on a platform in the @@ -1703,6 +1704,105 @@ static const struct ethtool_ops cxgb4vf_ethtool_ops = { */ /* + * Show Firmware Mailbox Command/Reply Log + * + * Note that we don't do any locking when dumping the Firmware Mailbox Log so + * it's possible that we can catch things during a log update and therefore + * see partially corrupted log entries. But i9t's probably Good Enough(tm). + * If we ever decide that we want to make sure that we're dumping a coherent + * log, we'd need to perform locking in the mailbox logging and in + * mboxlog_open() where we'd need to grab the entire mailbox log in one go + * like we do for the Firmware Device Log. But as stated above, meh ... + */ +static int mboxlog_show(struct seq_file *seq, void *v) +{ + struct adapter *adapter = seq->private; + struct mbox_cmd_log *log = adapter->mbox_log; + struct mbox_cmd *entry; + int entry_idx, i; + + if (v == SEQ_START_TOKEN) { + seq_printf(seq, + "%10s %15s %5s %5s %s\n", + "Seq#", "Tstamp", "Atime", "Etime", + "Command/Reply"); + return 0; + } + + entry_idx = log->cursor + ((uintptr_t)v - 2); + if (entry_idx >= log->size) + entry_idx -= log->size; + entry = mbox_cmd_log_entry(log, entry_idx); + + /* skip over unused entries */ + if (entry->timestamp == 0) + return 0; + + seq_printf(seq, "%10u %15llu %5d %5d", + entry->seqno, entry->timestamp, + entry->access, entry->execute); + for (i = 0; i < MBOX_LEN / 8; i++) { + u64 flit = entry->cmd[i]; + u32 hi = (u32)(flit >> 32); + u32 lo = (u32)flit; + + seq_printf(seq, " %08x %08x", hi, lo); + } + seq_puts(seq, "\n"); + return 0; +} + +static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos) +{ + struct adapter *adapter = seq->private; + struct mbox_cmd_log *log = adapter->mbox_log; + + return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL); +} + +static void *mboxlog_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN; +} + +static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + return mboxlog_get_idx(seq, *pos); +} + +static void mboxlog_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations mboxlog_seq_ops = { + .start = mboxlog_start, + .next = mboxlog_next, + .stop = mboxlog_stop, + .show = mboxlog_show +}; + +static int mboxlog_open(struct inode *inode, struct file *file) +{ + int res = seq_open(file, &mboxlog_seq_ops); + + if (!res) { + struct seq_file *seq = file->private_data; + + seq->private = inode->i_private; + } + return res; +} + +static const struct file_operations mboxlog_fops = { + .owner = THIS_MODULE, + .open = mboxlog_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* * Show SGE Queue Set information. We display QPL Queues Sets per line. */ #define QPL 4 @@ -2121,6 +2221,7 @@ struct cxgb4vf_debugfs_entry { }; static struct cxgb4vf_debugfs_entry debugfs_files[] = { + { "mboxlog", S_IRUGO, &mboxlog_fops }, { "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops }, { "sge_qstats", S_IRUGO, &sge_qstats_proc_fops }, { "resources", S_IRUGO, &resources_proc_fops }, @@ -2663,6 +2764,16 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, adapter->pdev = pdev; adapter->pdev_dev = &pdev->dev; + adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + + (sizeof(struct mbox_cmd) * + T4VF_OS_LOG_MBOX_CMDS), + GFP_KERNEL); + if (!adapter->mbox_log) { + err = -ENOMEM; + goto err_free_adapter; + } + adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS; + /* * Initialize SMP data synchronization resources. */ @@ -2912,6 +3023,7 @@ err_unmap_bar0: iounmap(adapter->regs); err_free_adapter: + kfree(adapter->mbox_log); kfree(adapter); err_release_regions: @@ -2981,6 +3093,7 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) iounmap(adapter->regs); if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); + kfree(adapter->mbox_log); kfree(adapter); } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 1ccd282949a5..1bb57d3fbbe8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1448,7 +1448,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev) * the new TX descriptors and return success. */ txq_advance(&txq->q, ndesc); - dev->trans_start = jiffies; + netif_trans_update(dev); ring_tx_db(adapter, &txq->q, ndesc); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 9b40a85cc1e4..438374a05791 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -36,6 +36,7 @@ #ifndef __T4VF_COMMON_H__ #define __T4VF_COMMON_H__ +#include "../cxgb4/t4_hw.h" #include "../cxgb4/t4fw_api.h" #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) @@ -227,6 +228,34 @@ struct adapter_params { u8 nports; /* # of Ethernet "ports" */ }; +/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format. + * The access and execute times are signed in order to accommodate negative + * error returns. + */ +struct mbox_cmd { + u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */ + u64 timestamp; /* OS-dependent timestamp */ + u32 seqno; /* sequence number */ + s16 access; /* time (ms) to access mailbox */ + s16 execute; /* time (ms) to execute */ +}; + +struct mbox_cmd_log { + unsigned int size; /* number of entries in the log */ + unsigned int cursor; /* next position in the log to write */ + u32 seqno; /* next sequence number */ + /* variable length mailbox command log starts here */ +}; + +/* Given a pointer to a Firmware Mailbox Command Log and a log entry index, + * return a pointer to the specified entry. + */ +static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, + unsigned int entry_idx) +{ + return &((struct mbox_cmd *)&(log)[1])[entry_idx]; +} + #include "adapter.h" #ifndef PCI_VENDOR_ID_CHELSIO diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index fed83d88fc4e..955ff7c61f1b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -76,21 +76,33 @@ static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size, *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data)); } -/* - * Dump contents of mailbox with a leading tag. +/** + * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log + * @adapter: the adapter + * @cmd: the Firmware Mailbox Command or Reply + * @size: command length in bytes + * @access: the time (ms) needed to access the Firmware Mailbox + * @execute: the time (ms) the command spent being executed */ -static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data) +static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd, + int size, int access, int execute) { - dev_err(adapter->pdev_dev, - "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag, - (unsigned long long)t4_read_reg64(adapter, mbox_data + 0), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 8), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 16), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 24), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 32), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 40), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 48), - (unsigned long long)t4_read_reg64(adapter, mbox_data + 56)); + struct mbox_cmd_log *log = adapter->mbox_log; + struct mbox_cmd *entry; + int i; + + entry = mbox_cmd_log_entry(log, log->cursor++); + if (log->cursor == log->size) + log->cursor = 0; + + for (i = 0; i < size / 8; i++) + entry->cmd[i] = be64_to_cpu(cmd[i]); + while (i < MBOX_LEN / 8) + entry->cmd[i++] = 0; + entry->timestamp = jiffies; + entry->seqno = log->seqno++; + entry->access = access; + entry->execute = execute; } /** @@ -120,10 +132,13 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 1, 1, 3, 5, 10, 10, 20, 50, 100 }; + u16 access = 0, execute = 0; u32 v, mbox_data; - int i, ms, delay_idx; + int i, ms, delay_idx, ret; const __be64 *p; u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL; + u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi)); + __be64 cmd_rpl[MBOX_LEN / 8]; /* In T6, mailbox size is changed to 128 bytes to avoid * invalidating the entire prefetch buffer. @@ -148,8 +163,11 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl)); - if (v != MBOX_OWNER_DRV) - return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT; + if (v != MBOX_OWNER_DRV) { + ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT; + t4vf_record_mbox(adapter, cmd, size, access, ret); + return ret; + } /* * Write the command array into the Mailbox Data register array and @@ -164,6 +182,8 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, * Data registers before doing the write to the VF Mailbox Control * register. */ + if (cmd_op != FW_VI_STATS_CMD) + t4vf_record_mbox(adapter, cmd, size, access, 0); for (i = 0, p = cmd; i < size; i += 8) t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); t4_read_reg(adapter, mbox_data); /* flush write */ @@ -209,31 +229,33 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, * We return the (negated) firmware command return * code (this depends on FW_SUCCESS == 0). */ + get_mbox_rpl(adapter, cmd_rpl, size, mbox_data); /* return value in low-order little-endian word */ - v = t4_read_reg(adapter, mbox_data); - if (FW_CMD_RETVAL_G(v)) - dump_mbox(adapter, "FW Error", mbox_data); + v = be64_to_cpu(cmd_rpl[0]); if (rpl) { /* request bit in high-order BE word */ WARN_ON((be32_to_cpu(*(const __be32 *)cmd) & FW_CMD_REQUEST_F) == 0); - get_mbox_rpl(adapter, rpl, size, mbox_data); + memcpy(rpl, cmd_rpl, size); WARN_ON((be32_to_cpu(*(__be32 *)rpl) & FW_CMD_REQUEST_F) != 0); } t4_write_reg(adapter, mbox_ctl, MBOWNER_V(MBOX_OWNER_NONE)); + execute = i + ms; + if (cmd_op != FW_VI_STATS_CMD) + t4vf_record_mbox(adapter, cmd_rpl, size, access, + execute); return -FW_CMD_RETVAL_G(v); } } - /* - * We timed out. Return the error ... - */ - dump_mbox(adapter, "FW Timeout", mbox_data); - return -ETIMEDOUT; + /* We timed out. Return the error ... */ + ret = -ETIMEDOUT; + t4vf_record_mbox(adapter, cmd, size, access, ret); + return ret; } #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index b2182d3ba3cc..f15560a06718 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2740,6 +2740,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->hw_features |= NETIF_F_RXCSUM; netdev->features |= netdev->hw_features; + netdev->vlan_features |= netdev->features; #ifdef CONFIG_RFS_ACCEL netdev->hw_features |= NETIF_F_NTUPLE; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 48d91941408d..1471e16ba719 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -966,7 +966,7 @@ dm9000_init_dm9000(struct net_device *dev) /* Init Driver variable */ db->tx_pkt_cnt = 0; db->queue_pkt_len = 0; - dev->trans_start = jiffies; + netif_trans_update(dev); } /* Our watchdog timed out. Called by the networking layer */ @@ -985,7 +985,7 @@ static void dm9000_timeout(struct net_device *dev) dm9000_init_dm9000(dev); dm9000_unmask_interrupts(db); /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); /* Restore previous register address */ @@ -1432,6 +1432,7 @@ dm9000_probe(struct platform_device *pdev) int reset_gpios; enum of_gpio_flags flags; struct regulator *power; + bool inv_mac_addr = false; power = devm_regulator_get(dev, "vcc"); if (IS_ERR(power)) { @@ -1686,9 +1687,7 @@ dm9000_probe(struct platform_device *pdev) } if (!is_valid_ether_addr(ndev->dev_addr)) { - dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", ndev->name); - + inv_mac_addr = true; eth_hw_addr_random(ndev); mac_src = "random"; } @@ -1697,11 +1696,15 @@ dm9000_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); ret = register_netdev(ndev); - if (ret == 0) + if (ret == 0) { + if (inv_mac_addr) + dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n", + ndev->name); printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n", ndev->name, dm9000_type_to_char(db->type), db->io_addr, db->io_data, ndev->irq, ndev->dev_addr, mac_src); + } return 0; out: diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 3acde3b9b767..cbe84972ff7a 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1336,7 +1336,7 @@ de4x5_open(struct net_device *dev) } lp->interrupt = UNMASK_INTERRUPTS; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ START_DE4X5; @@ -1465,7 +1465,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); if (!lp->tx_enable) /* Cannot send for now */ - return NETDEV_TX_LOCKED; + goto tx_err; /* ** Clean out the TX ring asynchronously to interrupts - sometimes the @@ -1478,7 +1478,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) /* Test if cache is already locked - requeue skb if so */ if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) - return NETDEV_TX_LOCKED; + goto tx_err; /* Transmit descriptor ring full or stale skb */ if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) { @@ -1519,6 +1519,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) lp->cache.lock = 0; return NETDEV_TX_OK; +tx_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } /* @@ -1932,7 +1935,7 @@ set_multicast_list(struct net_device *dev) lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } } } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index afd8e78e024e..8ed0fd8b1dda 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -192,9 +192,6 @@ (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \ (pci_dev)->revision)) -/* Sten Check */ -#define DEVICE net_device - /* Structure/enum declaration ------------------------------- */ struct tx_desc { __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ @@ -313,10 +310,10 @@ static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control /* function declaration ------------------------------------- */ -static int dmfe_open(struct DEVICE *); -static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *); -static int dmfe_stop(struct DEVICE *); -static void dmfe_set_filter_mode(struct DEVICE *); +static int dmfe_open(struct net_device *); +static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *); +static int dmfe_stop(struct net_device *); +static void dmfe_set_filter_mode(struct net_device *); static const struct ethtool_ops netdev_ethtool_ops; static u16 read_srom_word(void __iomem *, int); static irqreturn_t dmfe_interrupt(int , void *); @@ -326,8 +323,8 @@ static void poll_dmfe (struct net_device *dev); static void dmfe_descriptor_init(struct net_device *); static void allocate_rx_buffer(struct net_device *); static void update_cr6(u32, void __iomem *); -static void send_filter_frame(struct DEVICE *); -static void dm9132_id_table(struct DEVICE *); +static void send_filter_frame(struct net_device *); +static void dm9132_id_table(struct net_device *); static u16 dmfe_phy_read(void __iomem *, u8, u8, u32); static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32); static void dmfe_phy_write_1bit(void __iomem *, u32); @@ -336,12 +333,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *); static void dmfe_process_mode(struct dmfe_board_info *); static void dmfe_timer(unsigned long); static inline u32 cal_CRC(unsigned char *, unsigned int, u8); -static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *); -static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *); +static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *); +static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *); static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *); -static void dmfe_dynamic_reset(struct DEVICE *); +static void dmfe_dynamic_reset(struct net_device *); static void dmfe_free_rxbuffer(struct dmfe_board_info *); -static void dmfe_init_dm910x(struct DEVICE *); +static void dmfe_init_dm910x(struct net_device *); static void dmfe_parse_srom(struct dmfe_board_info *); static void dmfe_program_DM9801(struct dmfe_board_info *, int); static void dmfe_program_DM9802(struct dmfe_board_info *); @@ -558,7 +555,7 @@ static void dmfe_remove_one(struct pci_dev *pdev) * The interface is opened whenever "ifconfig" actives it. */ -static int dmfe_open(struct DEVICE *dev) +static int dmfe_open(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); const int irq = db->pdev->irq; @@ -617,7 +614,7 @@ static int dmfe_open(struct DEVICE *dev) * Enable Tx/Rx machine */ -static void dmfe_init_dm910x(struct DEVICE *dev) +static void dmfe_init_dm910x(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; @@ -684,7 +681,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev) */ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, - struct DEVICE *dev) + struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; @@ -728,7 +725,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ db->tx_packet_cnt++; /* Ready to send */ dw32(DCR1, 0x1); /* Issue Tx polling */ - dev->trans_start = jiffies; /* saved time stamp */ + netif_trans_update(dev); /* saved time stamp */ } else { db->tx_queue_cnt++; /* queue TX packet */ dw32(DCR1, 0x1); /* Issue Tx polling */ @@ -754,7 +751,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, * The interface is stopped when it is brought. */ -static int dmfe_stop(struct DEVICE *dev) +static int dmfe_stop(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; @@ -798,7 +795,7 @@ static int dmfe_stop(struct DEVICE *dev) static irqreturn_t dmfe_interrupt(int irq, void *dev_id) { - struct DEVICE *dev = dev_id; + struct net_device *dev = dev_id; struct dmfe_board_info *db = netdev_priv(dev); void __iomem *ioaddr = db->ioaddr; unsigned long flags; @@ -879,7 +876,7 @@ static void poll_dmfe (struct net_device *dev) * Free TX resource after TX complete */ -static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) +static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db) { struct tx_desc *txptr; void __iomem *ioaddr = db->ioaddr; @@ -934,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) db->tx_packet_cnt++; /* Ready to send */ db->tx_queue_cnt--; dw32(DCR1, 0x1); /* Issue Tx polling */ - dev->trans_start = jiffies; /* saved time stamp */ + netif_trans_update(dev); /* saved time stamp */ } /* Resource available check */ @@ -961,7 +958,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag) * Receive the come packet and pass to upper layer */ -static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) +static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db) { struct rx_desc *rxptr; struct sk_buff *skb, *newskb; @@ -1052,7 +1049,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) * Set DM910X multicast address */ -static void dmfe_set_filter_mode(struct DEVICE * dev) +static void dmfe_set_filter_mode(struct net_device *dev) { struct dmfe_board_info *db = netdev_priv(dev); unsigned long flags; @@ -1545,7 +1542,7 @@ static void send_filter_frame(struct net_device *dev) update_cr6(db->cr6_data | 0x2000, ioaddr); dw32(DCR1, 0x1); /* Issue Tx polling */ update_cr6(db->cr6_data, ioaddr); - dev->trans_start = jiffies; + netif_trans_update(dev); } else db->tx_queue_cnt++; /* Put in TX queue */ } diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c index 5364563c4378..7bcccf5cac7a 100644 --- a/drivers/net/ethernet/dec/tulip/pnic.c +++ b/drivers/net/ethernet/dec/tulip/pnic.c @@ -44,7 +44,7 @@ void pnic_do_nway(struct net_device *dev) tp->csr6 = new_csr6; /* Restart Tx */ tulip_restart_rxtx(tp); - dev->trans_start = jiffies; + netif_trans_update(dev); } } } @@ -70,7 +70,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5) iowrite32(tp->csr6, ioaddr + CSR6); iowrite32(0x30, ioaddr + CSR12); iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */ - dev->trans_start = jiffies; + netif_trans_update(dev); } } else if (ioread32(ioaddr + CSR5) & TPLnkPass) { if (tulip_media_cap[dev->if_port] & MediaIsMII) { @@ -147,7 +147,7 @@ void pnic_timer(unsigned long data) tp->csr6 = new_csr6; /* Restart Tx */ tulip_restart_rxtx(tp); - dev->trans_start = jiffies; + netif_trans_update(dev); if (tulip_debug > 1) dev_info(&dev->dev, "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n", diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 94d0eebef129..bbde90bc74fe 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev) out_unlock: spin_unlock_irqrestore (&tp->lock, flags); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue (dev); } diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 447d09272ab7..e750b5ddc0fb 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -636,7 +636,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ db->tx_packet_cnt++; /* Ready to send */ uw32(DCR1, 0x1); /* Issue Tx polling */ - dev->trans_start = jiffies; /* saved time stamp */ + netif_trans_update(dev); /* saved time stamp */ } /* Tx resource check */ @@ -1431,7 +1431,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt) update_cr6(db->cr6_data | 0x2000, ioaddr); uw32(DCR1, 0x1); /* Issue Tx polling */ update_cr6(db->cr6_data, ioaddr); - dev->trans_start = jiffies; + netif_trans_update(dev); } else netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); } diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 3c0e4d5c5fef..1f62b9423851 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -966,7 +966,7 @@ static void tx_timeout(struct net_device *dev) enable_irq(irq); netif_wake_queue(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ np->stats.tx_errors++; } diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index f92b6d948398..78f144696d6b 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -706,7 +706,7 @@ rio_tx_timeout (struct net_device *dev) dev->name, dr32(TxStatus)); rio_free_tx(dev, 0); dev->if_port = 0; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } static netdev_tx_t diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index a28a2e583f0f..58c6338a839e 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -1011,7 +1011,7 @@ static void tx_timeout(struct net_device *dev) dev->if_port = 0; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { netif_wake_queue(dev); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 536686476369..ed98ef1ecac3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4890,11 +4890,13 @@ static int be_resume(struct be_adapter *adapter) if (status) return status; - if (netif_running(netdev)) { + rtnl_lock(); + if (netif_running(netdev)) status = be_open(netdev); - if (status) - return status; - } + rtnl_unlock(); + + if (status) + return status; netif_device_attach(netdev); diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 1f23845a0694..085f9125cf42 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -145,7 +145,7 @@ static void nps_enet_tx_handler(struct net_device *ndev) u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; /* Check if we got TX */ - if (!priv->tx_packet_sent || tx_ctrl_ct) + if (!priv->tx_skb || tx_ctrl_ct) return; /* Ack Tx ctrl register */ @@ -160,7 +160,7 @@ static void nps_enet_tx_handler(struct net_device *ndev) } dev_kfree_skb(priv->tx_skb); - priv->tx_packet_sent = false; + priv->tx_skb = NULL; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); @@ -183,6 +183,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { u32 buf_int_enable_value = 0; + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = + (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; napi_complete(napi); @@ -192,6 +195,18 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, buf_int_enable_value); + + /* in case we will get a tx interrupt while interrupts + * are masked, we will lose it since the tx is edge interrupt. + * specifically, while executing the code section above, + * between nps_enet_tx_handler and the interrupts enable, all + * tx requests will be stuck until we will get an rx interrupt. + * the two code lines below will solve this situation by + * re-adding ourselves to the poll list. + */ + + if (priv->tx_skb && !tx_ctrl_ct) + napi_reschedule(napi); } return work_done; @@ -217,7 +232,7 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; - if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr) + if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); @@ -387,8 +402,6 @@ static void nps_enet_send_frame(struct net_device *ndev, /* Write the length of the Frame */ tx_ctrl_value |= length << TX_CTL_NT_SHIFT; - /* Indicate SW is done */ - priv->tx_packet_sent = true; tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; /* Send Frame */ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); @@ -465,7 +478,7 @@ static s32 nps_enet_open(struct net_device *ndev) s32 err; /* Reset private variables */ - priv->tx_packet_sent = false; + priv->tx_skb = NULL; priv->ge_mac_cfg_2_value = 0; priv->ge_mac_cfg_3_value = 0; @@ -534,6 +547,11 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb, priv->tx_skb = skb; + /* make sure tx_skb is actually written to the memory + * before the HW is informed and the IRQ is fired. + */ + wmb(); + nps_enet_send_frame(ndev, skb); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h index d0cab600bce8..3939ca20cc9f 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.h +++ b/drivers/net/ethernet/ezchip/nps_enet.h @@ -165,14 +165,12 @@ * struct nps_enet_priv - Storage of ENET's private information. * @regs_base: Base address of ENET memory-mapped control registers. * @irq: For RX/TX IRQ number. - * @tx_packet_sent: SW indication if frame is being sent. * @tx_skb: socket buffer of sent frame. * @napi: Structure for NAPI. */ struct nps_enet_priv { void __iomem *regs_base; s32 irq; - bool tx_packet_sent; struct sk_buff *tx_skb; struct napi_struct napi; u32 ge_mac_cfg_2_value; diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 84384e1585a5..e7cf313e359b 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -71,7 +71,6 @@ struct ftgmac100 { struct napi_struct napi; struct mii_bus *mii_bus; - struct phy_device *phydev; int old_speed; }; @@ -807,7 +806,7 @@ err: static void ftgmac100_adjust_link(struct net_device *netdev) { struct ftgmac100 *priv = netdev_priv(netdev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = netdev->phydev; int ier; if (phydev->speed == priv->old_speed) @@ -850,7 +849,6 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv) return PTR_ERR(phydev); } - priv->phydev = phydev; return 0; } @@ -939,27 +937,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev, strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); } -static int ftgmac100_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct ftgmac100 *priv = netdev_priv(netdev); - - return phy_ethtool_gset(priv->phydev, cmd); -} - -static int ftgmac100_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct ftgmac100 *priv = netdev_priv(netdev); - - return phy_ethtool_sset(priv->phydev, cmd); -} - static const struct ethtool_ops ftgmac100_ethtool_ops = { - .set_settings = ftgmac100_set_settings, - .get_settings = ftgmac100_get_settings, .get_drvinfo = ftgmac100_get_drvinfo, .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; /****************************************************************************** @@ -1085,7 +1067,7 @@ static int ftgmac100_open(struct net_device *netdev) ftgmac100_init_hw(priv); ftgmac100_start_hw(priv, 10); - phy_start(priv->phydev); + phy_start(netdev->phydev); napi_enable(&priv->napi); netif_start_queue(netdev); @@ -1111,7 +1093,7 @@ static int ftgmac100_stop(struct net_device *netdev) netif_stop_queue(netdev); napi_disable(&priv->napi); - phy_stop(priv->phydev); + phy_stop(netdev->phydev); ftgmac100_stop_hw(priv); free_irq(priv->irq, netdev); @@ -1152,9 +1134,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb, /* optional */ static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { - struct ftgmac100 *priv = netdev_priv(netdev); - - return phy_mii_ioctl(priv->phydev, ifr, cmd); + return phy_mii_ioctl(netdev->phydev, ifr, cmd); } static const struct net_device_ops ftgmac100_netdev_ops = { @@ -1275,7 +1255,7 @@ static int ftgmac100_probe(struct platform_device *pdev) return 0; err_register_netdev: - phy_disconnect(priv->phydev); + phy_disconnect(netdev->phydev); err_mii_probe: mdiobus_unregister(priv->mii_bus); err_register_mdiobus: @@ -1301,7 +1281,7 @@ static int __exit ftgmac100_remove(struct platform_device *pdev) unregister_netdev(netdev); - phy_disconnect(priv->phydev); + phy_disconnect(netdev->phydev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index b1b9ebafb354..c08bd763172a 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -1227,7 +1227,7 @@ static void fealnx_tx_timeout(struct net_device *dev) spin_unlock_irqrestore(&np->lock, flags); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); /* or .._start_.. ?? */ } diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 195122e11f10..f58f9ea51639 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -517,7 +517,6 @@ struct fec_enet_private { /* Phylib and MDIO interface */ struct mii_bus *mii_bus; - struct phy_device *phy_dev; int mii_timeout; uint phy_speed; phy_interface_t phy_interface; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 08243c2ff4b4..ca2cccc594fd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -967,10 +967,10 @@ fec_restart(struct net_device *ndev) rcntl &= ~(1 << 8); /* 1G, 100M or 10M */ - if (fep->phy_dev) { - if (fep->phy_dev->speed == SPEED_1000) + if (ndev->phydev) { + if (ndev->phydev->speed == SPEED_1000) ecntl |= (1 << 5); - else if (fep->phy_dev->speed == SPEED_100) + else if (ndev->phydev->speed == SPEED_100) rcntl &= ~(1 << 9); else rcntl |= (1 << 9); @@ -991,7 +991,7 @@ fec_restart(struct net_device *ndev) */ cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; - if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) + if (ndev->phydev && ndev->phydev->speed == SPEED_10) cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); @@ -1005,7 +1005,7 @@ fec_restart(struct net_device *ndev) /* enable pause frame*/ if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && - fep->phy_dev && fep->phy_dev->pause)) { + ndev->phydev && ndev->phydev->pause)) { rcntl |= FEC_ENET_FCE; /* set FIFO threshold parameter to reduce overrun */ @@ -1521,9 +1521,15 @@ fec_enet_rx(struct net_device *ndev, int budget) struct fec_enet_private *fep = netdev_priv(ndev); for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { - clear_bit(queue_id, &fep->work_rx); - pkt_received += fec_enet_rx_queue(ndev, + int ret; + + ret = fec_enet_rx_queue(ndev, budget - pkt_received, queue_id); + + if (ret < budget - pkt_received) + clear_bit(queue_id, &fep->work_rx); + + pkt_received += ret; } return pkt_received; } @@ -1679,7 +1685,7 @@ static void fec_get_mac(struct net_device *ndev) static void fec_enet_adjust_link(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phy_dev = fep->phy_dev; + struct phy_device *phy_dev = ndev->phydev; int status_change = 0; /* Prevent a state halted on mii error */ @@ -1879,8 +1885,6 @@ static int fec_enet_mii_probe(struct net_device *ndev) int phy_id; int dev_id = fep->dev_id; - fep->phy_dev = NULL; - if (fep->phy_node) { phy_dev = of_phy_connect(ndev, fep->phy_node, &fec_enet_adjust_link, 0, @@ -1928,7 +1932,6 @@ static int fec_enet_mii_probe(struct net_device *ndev) phy_dev->advertising = phy_dev->supported; - fep->phy_dev = phy_dev; fep->link = 0; fep->full_duplex = 0; @@ -2058,30 +2061,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep) } } -static int fec_enet_get_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_gset(phydev, cmd); -} - -static int fec_enet_set_settings(struct net_device *ndev, - struct ethtool_cmd *cmd) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; - - if (!phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - static void fec_enet_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info) { @@ -2214,7 +2193,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, { struct fec_enet_private *fep = netdev_priv(ndev); - if (!fep->phy_dev) + if (!ndev->phydev) return -ENODEV; if (pause->tx_pause != pause->rx_pause) { @@ -2230,17 +2209,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; if (pause->rx_pause || pause->autoneg) { - fep->phy_dev->supported |= ADVERTISED_Pause; - fep->phy_dev->advertising |= ADVERTISED_Pause; + ndev->phydev->supported |= ADVERTISED_Pause; + ndev->phydev->advertising |= ADVERTISED_Pause; } else { - fep->phy_dev->supported &= ~ADVERTISED_Pause; - fep->phy_dev->advertising &= ~ADVERTISED_Pause; + ndev->phydev->supported &= ~ADVERTISED_Pause; + ndev->phydev->advertising &= ~ADVERTISED_Pause; } if (pause->autoneg) { if (netif_running(ndev)) fec_stop(ndev); - phy_start_aneg(fep->phy_dev); + phy_start_aneg(ndev->phydev); } if (netif_running(ndev)) { napi_disable(&fep->napi); @@ -2356,8 +2335,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) static int fec_enet_nway_reset(struct net_device *dev) { - struct fec_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = fep->phy_dev; + struct phy_device *phydev = dev->phydev; if (!phydev) return -ENODEV; @@ -2562,8 +2540,6 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) } static const struct ethtool_ops fec_enet_ethtool_ops = { - .get_settings = fec_enet_get_settings, - .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, .get_regs_len = fec_enet_get_regs_len, .get_regs = fec_enet_get_regs, @@ -2583,12 +2559,14 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .set_tunable = fec_enet_set_tunable, .get_wol = fec_enet_get_wol, .set_wol = fec_enet_set_wol, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) { struct fec_enet_private *fep = netdev_priv(ndev); - struct phy_device *phydev = fep->phy_dev; + struct phy_device *phydev = ndev->phydev; if (!netif_running(ndev)) return -EINVAL; @@ -2843,7 +2821,7 @@ fec_enet_open(struct net_device *ndev) goto err_enet_mii_probe; napi_enable(&fep->napi); - phy_start(fep->phy_dev); + phy_start(ndev->phydev); netif_tx_start_all_queues(ndev); device_set_wakeup_enable(&ndev->dev, fep->wol_flag & @@ -2867,7 +2845,7 @@ fec_enet_close(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - phy_stop(fep->phy_dev); + phy_stop(ndev->phydev); if (netif_device_present(ndev)) { napi_disable(&fep->napi); @@ -2875,8 +2853,7 @@ fec_enet_close(struct net_device *ndev) fec_stop(ndev); } - phy_disconnect(fep->phy_dev); - fep->phy_dev = NULL; + phy_disconnect(ndev->phydev); fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); @@ -3504,7 +3481,7 @@ static int __maybe_unused fec_suspend(struct device *dev) if (netif_running(ndev)) { if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; - phy_stop(fep->phy_dev); + phy_stop(ndev->phydev); napi_disable(&fep->napi); netif_tx_lock_bh(ndev); netif_device_detach(ndev); @@ -3564,7 +3541,7 @@ static int __maybe_unused fec_resume(struct device *dev) netif_device_attach(ndev); netif_tx_unlock_bh(ndev); napi_enable(&fep->napi); - phy_start(fep->phy_dev); + phy_start(ndev->phydev); } rtnl_unlock(); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 25553ee857b4..446ae9d60c71 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -66,7 +66,6 @@ struct mpc52xx_fec_priv { /* MDIO link details */ unsigned int mdio_speed; struct device_node *phy_node; - struct phy_device *phydev; enum phy_state link; int seven_wire_mode; }; @@ -165,7 +164,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task static void mpc52xx_fec_adjust_link(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; int new_state = 0; if (phydev->link != PHY_DOWN) { @@ -215,16 +214,17 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev) static int mpc52xx_fec_open(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; int err = -EBUSY; if (priv->phy_node) { - priv->phydev = of_phy_connect(priv->ndev, priv->phy_node, - mpc52xx_fec_adjust_link, 0, 0); - if (!priv->phydev) { + phydev = of_phy_connect(priv->ndev, priv->phy_node, + mpc52xx_fec_adjust_link, 0, 0); + if (!phydev) { dev_err(&dev->dev, "of_phy_connect failed\n"); return -ENODEV; } - phy_start(priv->phydev); + phy_start(phydev); } if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED, @@ -268,10 +268,9 @@ static int mpc52xx_fec_open(struct net_device *dev) free_ctrl_irq: free_irq(dev->irq, dev); free_phy: - if (priv->phydev) { - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + if (phydev) { + phy_stop(phydev); + phy_disconnect(phydev); } return err; @@ -280,6 +279,7 @@ static int mpc52xx_fec_open(struct net_device *dev) static int mpc52xx_fec_close(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; netif_stop_queue(dev); @@ -291,11 +291,10 @@ static int mpc52xx_fec_close(struct net_device *dev) free_irq(priv->r_irq, dev); free_irq(priv->t_irq, dev); - if (priv->phydev) { + if (phydev) { /* power down phy */ - phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; + phy_stop(phydev); + phy_disconnect(phydev); } return 0; @@ -763,26 +762,6 @@ static void mpc52xx_fec_reset(struct net_device *dev) /* ethtool interface */ -static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct mpc52xx_fec_priv *priv = netdev_priv(dev); - - if (!priv->phydev) - return -ENODEV; - - return phy_ethtool_gset(priv->phydev, cmd); -} - -static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct mpc52xx_fec_priv *priv = netdev_priv(dev); - - if (!priv->phydev) - return -ENODEV; - - return phy_ethtool_sset(priv->phydev, cmd); -} - static u32 mpc52xx_fec_get_msglevel(struct net_device *dev) { struct mpc52xx_fec_priv *priv = netdev_priv(dev); @@ -796,23 +775,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level) } static const struct ethtool_ops mpc52xx_fec_ethtool_ops = { - .get_settings = mpc52xx_fec_get_settings, - .set_settings = mpc52xx_fec_set_settings, .get_link = ethtool_op_get_link, .get_msglevel = mpc52xx_fec_get_msglevel, .set_msglevel = mpc52xx_fec_set_msglevel, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct mpc52xx_fec_priv *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; - if (!priv->phydev) + if (!phydev) return -ENOTSUPP; - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(phydev, rq, cmd); } static const struct net_device_ops mpc52xx_fec_netdev_ops = { diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index ea83712a6d62..bcb9dccada4d 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -2772,7 +2772,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev) /* Get the FM address */ res = platform_get_resource(of_dev, IORESOURCE_MEM, 0); if (!res) { - dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n", + dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n", __func__); goto fman_node_put; } diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 48a9c176e0d1..61fd486c50bb 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -652,13 +652,13 @@ static void fs_timeout(struct net_device *dev) spin_lock_irqsave(&fep->lock, flags); if (dev->flags & IFF_UP) { - phy_stop(fep->phydev); + phy_stop(dev->phydev); (*fep->ops->stop)(dev); (*fep->ops->restart)(dev); - phy_start(fep->phydev); + phy_start(dev->phydev); } - phy_start(fep->phydev); + phy_start(dev->phydev); wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); spin_unlock_irqrestore(&fep->lock, flags); @@ -672,7 +672,7 @@ static void fs_timeout(struct net_device *dev) static void generic_adjust_link(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); - struct phy_device *phydev = fep->phydev; + struct phy_device *phydev = dev->phydev; int new_state = 0; if (phydev->link) { @@ -741,8 +741,6 @@ static int fs_init_phy(struct net_device *dev) return -ENODEV; } - fep->phydev = phydev; - return 0; } @@ -776,7 +774,7 @@ static int fs_enet_open(struct net_device *dev) napi_disable(&fep->napi_tx); return err; } - phy_start(fep->phydev); + phy_start(dev->phydev); netif_start_queue(dev); @@ -792,7 +790,7 @@ static int fs_enet_close(struct net_device *dev) netif_carrier_off(dev); napi_disable(&fep->napi); napi_disable(&fep->napi_tx); - phy_stop(fep->phydev); + phy_stop(dev->phydev); spin_lock_irqsave(&fep->lock, flags); spin_lock(&fep->tx_lock); @@ -801,8 +799,7 @@ static int fs_enet_close(struct net_device *dev) spin_unlock_irqrestore(&fep->lock, flags); /* release any irqs */ - phy_disconnect(fep->phydev); - fep->phydev = NULL; + phy_disconnect(dev->phydev); free_irq(fep->interrupt, dev); return 0; @@ -847,26 +844,6 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs->version = 0; } -static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (!fep->phydev) - return -ENODEV; - - return phy_ethtool_gset(fep->phydev, cmd); -} - -static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct fs_enet_private *fep = netdev_priv(dev); - - if (!fep->phydev) - return -ENODEV; - - return phy_ethtool_sset(fep->phydev, cmd); -} - static int fs_nway_reset(struct net_device *dev) { return 0; @@ -887,24 +864,22 @@ static void fs_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops fs_ethtool_ops = { .get_drvinfo = fs_get_drvinfo, .get_regs_len = fs_get_regs_len, - .get_settings = fs_get_settings, - .set_settings = fs_set_settings, .nway_reset = fs_nway_reset, .get_link = ethtool_op_get_link, .get_msglevel = fs_get_msglevel, .set_msglevel = fs_set_msglevel, .get_regs = fs_get_regs, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct fs_enet_private *fep = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - return phy_mii_ioctl(fep->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } extern int fs_mii_connect(struct net_device *dev); diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h index f184d8f952e2..e29f54a35210 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -149,7 +149,6 @@ struct fs_enet_private { unsigned int last_mii_status; int interrupt; - struct phy_device *phydev; int oldduplex, oldspeed, oldlink; /* current settings */ /* event masks */ diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 1ba359f17ec6..d71761a34022 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -370,7 +370,7 @@ static void restart(struct net_device *dev) /* adjust to speed (for RMII mode) */ if (fpi->use_rmii) { - if (fep->phydev->speed == 100) + if (dev->phydev->speed == 100) C8(fcccp, fcc_gfemr, 0x20); else S8(fcccp, fcc_gfemr, 0x20); @@ -396,7 +396,7 @@ static void restart(struct net_device *dev) S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); /* adjust to duplex mode */ - if (fep->phydev->duplex) + if (dev->phydev->duplex) S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); else C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c index bade2f8f9b5c..35a318ed3a62 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c @@ -254,7 +254,7 @@ static void restart(struct net_device *dev) int r; u32 addrhi, addrlo; - struct mii_bus *mii = fep->phydev->mdio.bus; + struct mii_bus *mii = dev->phydev->mdio.bus; struct fec_info* fec_inf = mii->priv; r = whack_reset(fep->fec.fecp); @@ -333,7 +333,7 @@ static void restart(struct net_device *dev) /* * adjust to duplex mode */ - if (fep->phydev->duplex) { + if (dev->phydev->duplex) { FC(fecp, r_cntrl, FEC_RCNTRL_DRT); FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ } else { @@ -363,7 +363,7 @@ static void stop(struct net_device *dev) const struct fs_platform_info *fpi = fep->fpi; struct fec __iomem *fecp = fep->fec.fecp; - struct fec_info *feci = fep->phydev->mdio.bus->priv; + struct fec_info *feci = dev->phydev->mdio.bus->priv; int i; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c index 7a184e8816a4..e8b9c33d35b4 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c @@ -352,7 +352,7 @@ static void restart(struct net_device *dev) W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); /* Set full duplex mode if needed */ - if (fep->phydev->duplex) + if (dev->phydev->duplex) S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); /* Restore multicast and promiscuous settings */ diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index d2f917af539f..7615e0668acb 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -999,7 +999,7 @@ static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct gfar_private *priv = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; if (!netif_running(dev)) return -EINVAL; @@ -1009,10 +1009,10 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (cmd == SIOCGHWTSTAMP) return gfar_hwtstamp_get(dev, rq); - if (!priv->phydev) + if (!phydev) return -ENODEV; - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(phydev, rq, cmd); } static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, @@ -1635,7 +1635,7 @@ static int gfar_suspend(struct device *dev) gfar_start_wol_filer(priv); } else { - phy_stop(priv->phydev); + phy_stop(ndev->phydev); } return 0; @@ -1664,7 +1664,7 @@ static int gfar_resume(struct device *dev) gfar_filer_restore_table(priv); } else { - phy_start(priv->phydev); + phy_start(ndev->phydev); } gfar_start(priv); @@ -1698,8 +1698,8 @@ static int gfar_restore(struct device *dev) priv->oldspeed = 0; priv->oldduplex = -1; - if (priv->phydev) - phy_start(priv->phydev); + if (ndev->phydev) + phy_start(ndev->phydev); netif_device_attach(ndev); enable_napi(priv); @@ -1778,6 +1778,7 @@ static int init_phy(struct net_device *dev) priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? GFAR_SUPPORTED_GBIT : 0; phy_interface_t interface; + struct phy_device *phydev; priv->oldlink = 0; priv->oldspeed = 0; @@ -1785,9 +1786,9 @@ static int init_phy(struct net_device *dev) interface = gfar_get_interface(dev); - priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, - interface); - if (!priv->phydev) { + phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0, + interface); + if (!phydev) { dev_err(&dev->dev, "could not attach to PHY\n"); return -ENODEV; } @@ -1796,11 +1797,11 @@ static int init_phy(struct net_device *dev) gfar_configure_serdes(dev); /* Remove any features not supported by the controller */ - priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support); - priv->phydev->advertising = priv->phydev->supported; + phydev->supported &= (GFAR_SUPPORTED | gigabit_support); + phydev->advertising = phydev->supported; /* Add support for flow control, but don't advertise it by default */ - priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); return 0; } @@ -1944,7 +1945,7 @@ void stop_gfar(struct net_device *dev) /* disable ints and gracefully shut down Rx/Tx DMA */ gfar_halt(priv); - phy_stop(priv->phydev); + phy_stop(dev->phydev); free_skb_resources(priv); } @@ -2076,7 +2077,7 @@ void gfar_start(struct gfar_private *priv) gfar_ints_enable(priv); - priv->ndev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(priv->ndev); /* prevent tx timeout */ } static void free_grp_irqs(struct gfar_priv_grp *grp) @@ -2204,7 +2205,7 @@ int startup_gfar(struct net_device *ndev) priv->oldspeed = 0; priv->oldduplex = -1; - phy_start(priv->phydev); + phy_start(ndev->phydev); enable_napi(priv); @@ -2572,8 +2573,7 @@ static int gfar_close(struct net_device *dev) stop_gfar(dev); /* Disconnect from the PHY */ - phy_disconnect(priv->phydev); - priv->phydev = NULL; + phy_disconnect(dev->phydev); gfar_free_irq(priv); @@ -3379,7 +3379,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id) static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; if (unlikely(phydev->link != priv->oldlink || (phydev->link && (phydev->duplex != priv->oldduplex || @@ -3620,7 +3620,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id) static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) { - struct phy_device *phydev = priv->phydev; + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; u32 val = 0; if (!phydev->duplex) @@ -3660,7 +3661,8 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) static noinline void gfar_update_link_state(struct gfar_private *priv) { struct gfar __iomem *regs = priv->gfargrp[0].regs; - struct phy_device *phydev = priv->phydev; + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; struct gfar_priv_rx_q *rx_queue = NULL; int i; diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index cb77667971a7..373fd094f2f3 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -1153,7 +1153,6 @@ struct gfar_private { phy_interface_t interface; struct device_node *phy_node; struct device_node *tbi_node; - struct phy_device *phydev; struct mii_bus *mii_bus; int oldspeed; int oldduplex; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 4b0ee855edd7..56588f2e1d91 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -184,40 +184,6 @@ static void gfar_gdrvinfo(struct net_device *dev, strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info)); } - -static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; - - if (NULL == phydev) - return -ENODEV; - - return phy_ethtool_sset(phydev, cmd); -} - - -/* Return the current settings in the ethtool_cmd structure */ -static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; - struct gfar_priv_rx_q *rx_queue = NULL; - struct gfar_priv_tx_q *tx_queue = NULL; - - if (NULL == phydev) - return -ENODEV; - tx_queue = priv->tx_queue[0]; - rx_queue = priv->rx_queue[0]; - - /* etsec-1.7 and older versions have only one txic - * and rxic regs although they support multiple queues */ - cmd->maxtxpkt = get_icft_value(tx_queue->txic); - cmd->maxrxpkt = get_icft_value(rx_queue->rxic); - - return phy_ethtool_gset(phydev, cmd); -} - /* Return the length of the register structure */ static int gfar_reglen(struct net_device *dev) { @@ -242,10 +208,12 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs) { + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; unsigned int count; /* The timer is different, depending on the interface speed */ - switch (priv->phydev->speed) { + switch (phydev->speed) { case SPEED_1000: count = GFAR_GBIT_TIME; break; @@ -267,10 +235,12 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks) { + struct net_device *ndev = priv->ndev; + struct phy_device *phydev = ndev->phydev; unsigned int count; /* The timer is different, depending on the interface speed */ - switch (priv->phydev->speed) { + switch (phydev->speed) { case SPEED_1000: count = GFAR_GBIT_TIME; break; @@ -304,7 +274,7 @@ static int gfar_gcoalesce(struct net_device *dev, if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) return -EOPNOTSUPP; - if (NULL == priv->phydev) + if (!dev->phydev) return -ENODEV; rx_queue = priv->rx_queue[0]; @@ -365,7 +335,7 @@ static int gfar_scoalesce(struct net_device *dev, if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) return -EOPNOTSUPP; - if (NULL == priv->phydev) + if (!dev->phydev) return -ENODEV; /* Check the bounds of the values */ @@ -529,7 +499,7 @@ static int gfar_spauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { struct gfar_private *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 oldadv, newadv; @@ -1565,8 +1535,6 @@ static int gfar_get_ts_info(struct net_device *dev, } const struct ethtool_ops gfar_ethtool_ops = { - .get_settings = gfar_gsettings, - .set_settings = gfar_ssettings, .get_drvinfo = gfar_gdrvinfo, .get_regs_len = gfar_reglen, .get_regs = gfar_get_regs, @@ -1589,4 +1557,6 @@ const struct ethtool_ops gfar_ethtool_ops = { .set_rxnfc = gfar_set_nfc, .get_rxnfc = gfar_get_nfc, .get_ts_info = gfar_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, }; diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c index 89714f5e0dfc..812a968a78e9 100644 --- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c +++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c @@ -105,23 +105,20 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = { #define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings) static int -uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct phy_device *phydev = ugeth->phydev; - struct ucc_geth_info *ug_info = ugeth->ug_info; if (!phydev) return -ENODEV; - ecmd->maxtxpkt = 1; - ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0]; - - return phy_ethtool_gset(phydev, ecmd); + return phy_ethtool_ksettings_get(phydev, cmd); } static int -uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +uec_set_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) { struct ucc_geth_private *ugeth = netdev_priv(netdev); struct phy_device *phydev = ugeth->phydev; @@ -129,7 +126,7 @@ uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) if (!phydev) return -ENODEV; - return phy_ethtool_sset(phydev, ecmd); + return phy_ethtool_ksettings_set(phydev, cmd); } static void @@ -392,8 +389,6 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) #endif /* CONFIG_PM */ static const struct ethtool_ops uec_ethtool_ops = { - .get_settings = uec_get_settings, - .set_settings = uec_set_settings, .get_drvinfo = uec_get_drvinfo, .get_regs_len = uec_get_regs_len, .get_regs = uec_get_regs, @@ -411,6 +406,8 @@ static const struct ethtool_ops uec_ethtool_ops = { .get_wol = uec_get_wol, .set_wol = uec_set_wol, .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = uec_get_ksettings, + .set_link_ksettings = uec_set_ksettings, }; void uec_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c index 678f5018d0be..399cfd217288 100644 --- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c +++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c @@ -746,7 +746,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id) lp->sent = lp->tx_queue ; lp->tx_queue = 0; lp->tx_queue_len = 0; - dev->trans_start = jiffies; + netif_trans_update(dev); } else { lp->tx_started = 0; } diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c index e51892d518ff..b9f2ea59308a 100644 --- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c +++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c @@ -636,7 +636,7 @@ static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev) pos = dma_ring_incr(pos, TX_DESC_NUM); writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR); - dev->trans_start = jiffies; + netif_trans_update(dev); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; netdev_sent_queue(dev, skb->len); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index a1cb461ac45f..7a757e88c89a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -29,25 +29,6 @@ static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle) return vf_cb->mac_cb; } -/** - * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id - * @port_id: enet port id - *: debug port 0-1, service port 2 -7 (dsaf mode only 2) - * return: dsaf port id - *: service ports 0 - 5, debug port 6-7 - **/ -static int hns_ae_map_eport_to_dport(u32 port_id) -{ - int port_index; - - if (port_id < DSAF_DEBUG_NW_NUM) - port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF; - else - port_index = port_id - DSAF_DEBUG_NW_NUM; - - return port_index; -} - static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev) { return container_of(dev, struct dsaf_device, ae_dev); @@ -56,50 +37,35 @@ static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev) static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle) { int ppe_index; - int ppe_common_index; struct ppe_common_cb *ppe_comm; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); - if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) { - ppe_index = vf_cb->port_index; - ppe_common_index = 0; - } else { - ppe_index = 0; - ppe_common_index = - vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1; - } - ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index]; + ppe_comm = vf_cb->dsaf_dev->ppe_common[0]; + ppe_index = vf_cb->port_index; + return &ppe_comm->ppe_cb[ppe_index]; } static int hns_ae_get_q_num_per_vf( struct dsaf_device *dsaf_dev, int port) { - int common_idx = hns_dsaf_get_comm_idx_by_port(port); - - return dsaf_dev->rcb_common[common_idx]->max_q_per_vf; + return dsaf_dev->rcb_common[0]->max_q_per_vf; } static int hns_ae_get_vf_num_per_port( struct dsaf_device *dsaf_dev, int port) { - int common_idx = hns_dsaf_get_comm_idx_by_port(port); - - return dsaf_dev->rcb_common[common_idx]->max_vfn; + return dsaf_dev->rcb_common[0]->max_vfn; } static struct ring_pair_cb *hns_ae_get_base_ring_pair( struct dsaf_device *dsaf_dev, int port) { - int common_idx = hns_dsaf_get_comm_idx_by_port(port); - struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx]; + struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0]; int q_num = rcb_comm->max_q_per_vf; int vf_num = rcb_comm->max_vfn; - if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX) - return &rcb_comm->ring_pair_cb[port * q_num * vf_num]; - else - return &rcb_comm->ring_pair_cb[0]; + return &rcb_comm->ring_pair_cb[port * q_num * vf_num]; } static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q) @@ -110,7 +76,6 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q) struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, u32 port_id) { - int port_idx; int vfnum_per_port; int qnum_per_vf; int i; @@ -120,11 +85,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, struct hnae_vf_cb *vf_cb; dsaf_dev = hns_ae_get_dsaf_dev(dev); - port_idx = hns_ae_map_eport_to_dport(port_id); - ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx); - vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx); - qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx); + ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id); + vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id); + qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id); vf_cb = kzalloc(sizeof(*vf_cb) + qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL); @@ -163,14 +127,14 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, } vf_cb->dsaf_dev = dsaf_dev; - vf_cb->port_index = port_idx; - vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx]; + vf_cb->port_index = port_id; + vf_cb->mac_cb = dsaf_dev->mac_cb[port_id]; ae_handle->phy_if = vf_cb->mac_cb->phy_if; ae_handle->phy_node = vf_cb->mac_cb->phy_node; ae_handle->if_support = vf_cb->mac_cb->if_support; ae_handle->port_type = vf_cb->mac_cb->mac_type; - ae_handle->dport_id = port_idx; + ae_handle->dport_id = port_id; return ae_handle; vf_id_err: @@ -320,11 +284,8 @@ static void hns_ae_reset(struct hnae_handle *handle) struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) { - u8 ppe_common_index = - vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1; - hns_mac_reset(vf_cb->mac_cb); - hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index); + hns_ppe_reset_common(vf_cb->dsaf_dev, 0); } } @@ -399,11 +360,16 @@ static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, static void hns_ae_get_pauseparam(struct hnae_handle *handle, u32 *auto_neg, u32 *rx_en, u32 *tx_en) { - assert(handle); + struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; + + hns_mac_get_autoneg(mac_cb, auto_neg); - hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg); + hns_mac_get_pauseparam(mac_cb, rx_en, tx_en); - hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en); + /* Service port's pause feature is provided by DSAF, not mac */ + if (handle->port_type == HNAE_PORT_SERVICE) + hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en); } static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable) @@ -436,12 +402,21 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle, u32 autoneg, u32 rx_en, u32 tx_en) { struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); + struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; int ret; ret = hns_mac_set_autoneg(mac_cb, autoneg); if (ret) return ret; + /* Service port's pause feature is provided by DSAF, not mac */ + if (handle->port_type == HNAE_PORT_SERVICE) { + ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev, + mac_cb->mac_id, rx_en); + if (ret) + return ret; + rx_en = 0; + } return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en); } @@ -689,7 +664,7 @@ void hns_ae_update_led_status(struct hnae_handle *handle) assert(handle); mac_cb = hns_get_mac_cb(handle); - if (!mac_cb->cpld_vaddr) + if (!mac_cb->cpld_ctrl) return; hns_set_led_opt(mac_cb); } @@ -709,7 +684,6 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle, void hns_ae_get_regs(struct hnae_handle *handle, void *data) { u32 *p = data; - u32 rcb_com_idx; int i; struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle); @@ -717,8 +691,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data) hns_ppe_get_regs(ppe_cb, p); p += hns_ppe_get_regs_count(); - rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index); - hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p); + hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p); p += hns_rcb_get_common_regs_count(); for (i = 0; i < handle->q_num; i++) { diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index a38084a22bf2..611581fccf2a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -7,18 +7,19 @@ * (at your option) any later version. */ -#include <linux/module.h> -#include <linux/kernel.h> #include <linux/init.h> -#include <linux/netdevice.h> -#include <linux/phy_fixed.h> #include <linux/interrupt.h> -#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/phy_fixed.h> +#include <linux/platform_device.h> -#include "hns_dsaf_misc.h" #include "hns_dsaf_main.h" +#include "hns_dsaf_misc.h" #include "hns_dsaf_rcb.h" #define MAC_EN_FLAG_V 0xada0328 @@ -81,17 +82,6 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb) } } -int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) -{ - if (!mac_cb->cpld_vaddr) - return -ENODEV; - - *sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr - + MAC_SFP_PORT_OFFSET); - - return 0; -} - void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status) { struct mac_driver *mac_ctrl_drv; @@ -168,10 +158,9 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num) { u8 tmp_port; - u32 comm_idx; if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) { - if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) { + if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) { dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n", mac_cb->dsaf_dev->ae_dev.name, @@ -179,7 +168,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, return -EINVAL; } } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) { - if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) { + if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) { dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d!\n", mac_cb->dsaf_dev->ae_dev.name, @@ -192,9 +181,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, return -EINVAL; } - comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id); - - if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) { + if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) { dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n", mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid); return -EINVAL; @@ -234,7 +221,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, } /** - *hns_mac_get_inner_port_num - change vf mac address + *hns_mac_change_vf_addr - change vf mac address *@mac_cb: mac device *@vmid: vmid *@addr:mac address @@ -249,7 +236,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, struct mac_entry_idx *old_entry; old_entry = &mac_cb->addr_entry_idx[vmid]; - if (dsaf_dev) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); mac_entry.in_vlan_id = old_entry->vlan_id; mac_entry.in_port_num = mac_cb->mac_id; @@ -289,7 +276,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb, struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev; struct dsaf_drv_mac_single_dest_entry mac_entry; - if (dsaf_dev && addr) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) { memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); mac_entry.in_vlan_id = 0;/*vlan_id;*/ mac_entry.in_port_num = mac_cb->mac_id; @@ -380,7 +367,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb, if (mac_cb->mac_type == HNAE_PORT_DEBUG) return 0; - if (dsaf_dev) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); mac_entry.in_vlan_id = vlan_id; mac_entry.in_port_num = mac_cb->mac_id; @@ -418,7 +405,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) uc_mac_entry = &mac_cb->addr_entry_idx[vmid]; - if (dsaf_dev) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr)); mac_entry.in_vlan_id = uc_mac_entry->vlan_id; mac_entry.in_port_num = mac_cb->mac_id; @@ -439,9 +426,8 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable) void hns_mac_reset(struct hns_mac_cb *mac_cb) { - struct mac_driver *drv; - - drv = hns_mac_get_drv(mac_cb); + struct mac_driver *drv = hns_mac_get_drv(mac_cb); + bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); drv->mac_init(drv); @@ -456,7 +442,7 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb) if (drv->mac_pausefrm_cfg) { if (mac_cb->mac_type == HNAE_PORT_DEBUG) - drv->mac_pausefrm_cfg(drv, 0, 0); + drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1); else /* mac rx must disable, dsaf pfc close instead of it*/ drv->mac_pausefrm_cfg(drv, 0, 1); } @@ -561,14 +547,6 @@ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en) *rx_en = 0; *tx_en = 0; } - - /* Due to the chip defect, the service mac's rx pause CAN'T be enabled. - * We set the rx pause frm always be true (1), because DSAF deals with - * the rx pause frm instead of service mac. After all, we still support - * rx pause frm. - */ - if (mac_cb->mac_type == HNAE_PORT_SERVICE) - *rx_en = 1; } /** @@ -602,20 +580,13 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable) int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en) { struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb); + bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); - if (mac_cb->mac_type == HNAE_PORT_SERVICE) { - if (!rx_en) { - dev_err(mac_cb->dev, "disable rx_pause is not allowed!"); + if (mac_cb->mac_type == HNAE_PORT_DEBUG) { + if (is_ver1 && (tx_en || rx_en)) { + dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!"); return -EINVAL; } - } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) { - if (tx_en || rx_en) { - dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!"); - return -EINVAL; - } - } else { - dev_err(mac_cb->dev, "Unsupport this operation!"); - return -EINVAL; } if (mac_ctrl_drv->mac_pausefrm_cfg) @@ -667,14 +638,18 @@ free_mac_drv: } /** - *mac_free_dev - get mac information from device node + *hns_mac_get_info - get mac information from device node *@mac_cb: mac device *@np:device node - *@mac_mode_idx:mac mode index + * return: 0 --success, negative --fail */ -static void hns_mac_get_info(struct hns_mac_cb *mac_cb, - struct device_node *np, u32 mac_mode_idx) +static int hns_mac_get_info(struct hns_mac_cb *mac_cb) { + struct device_node *np = mac_cb->dev->of_node; + struct regmap *syscon; + struct of_phandle_args cpld_args; + u32 ret; + mac_cb->link = false; mac_cb->half_duplex = false; mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if]; @@ -690,12 +665,73 @@ static void hns_mac_get_info(struct hns_mac_cb *mac_cb, mac_cb->max_frm = MAC_DEFAULT_MTU; mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME; + mac_cb->port_rst_off = mac_cb->mac_id; + mac_cb->port_mode_off = 0; - /* Get the rest of the PHY information */ - mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id); + /* if the dsaf node doesn't contain a port subnode, get phy-handle + * from dsaf node + */ + if (!mac_cb->fw_port) { + mac_cb->phy_node = of_parse_phandle(np, "phy-handle", + mac_cb->mac_id); + if (mac_cb->phy_node) + dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", + mac_cb->mac_id, mac_cb->phy_node->name); + return 0; + } + if (!is_of_node(mac_cb->fw_port)) + return -EINVAL; + /* parse property from port subnode in dsaf */ + mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port), + "phy-handle", 0); if (mac_cb->phy_node) dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", mac_cb->mac_id, mac_cb->phy_node->name); + syscon = syscon_node_to_regmap( + of_parse_phandle(to_of_node(mac_cb->fw_port), + "serdes-syscon", 0)); + if (IS_ERR_OR_NULL(syscon)) { + dev_err(mac_cb->dev, "serdes-syscon is needed!\n"); + return -EINVAL; + } + mac_cb->serdes_ctrl = syscon; + + ret = fwnode_property_read_u32(mac_cb->fw_port, + "port-rst-offset", + &mac_cb->port_rst_off); + if (ret) { + dev_dbg(mac_cb->dev, + "mac%d port-rst-offset not found, use default value.\n", + mac_cb->mac_id); + } + + ret = fwnode_property_read_u32(mac_cb->fw_port, + "port-mode-offset", + &mac_cb->port_mode_off); + if (ret) { + dev_dbg(mac_cb->dev, + "mac%d port-mode-offset not found, use default value.\n", + mac_cb->mac_id); + } + + ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port), + "cpld-syscon", 1, 0, &cpld_args); + if (ret) { + dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n", + mac_cb->mac_id); + mac_cb->cpld_ctrl = NULL; + } else { + syscon = syscon_node_to_regmap(cpld_args.np); + if (IS_ERR_OR_NULL(syscon)) { + dev_dbg(mac_cb->dev, "no cpld-syscon found!\n"); + mac_cb->cpld_ctrl = NULL; + } else { + mac_cb->cpld_ctrl = syscon; + mac_cb->cpld_ctrl_reg = cpld_args.args[0]; + } + } + + return 0; } /** @@ -725,40 +761,31 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev, return base + 0x40000 + mac_id * 0x4000 - mac_mode_idx * 0x20000; else - return mac_cb->serdes_vaddr + 0x1000 - + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000; + return dsaf_dev->ppe_base + 0x1000; } /** * hns_mac_get_cfg - get mac cfg from dtb or acpi table * @dsaf_dev: dsa fabric device struct pointer - * @mac_idx: mac index - * retuen 0 - success , negative --fail + * @mac_cb: mac control block + * return 0 - success , negative --fail */ -int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx) +int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb) { int ret; u32 mac_mode_idx; - struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx]; mac_cb->dsaf_dev = dsaf_dev; mac_cb->dev = dsaf_dev->dev; - mac_cb->mac_id = mac_idx; mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base; mac_cb->serdes_vaddr = dsaf_dev->sds_base; - if (dsaf_dev->cpld_base && - mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) { - mac_cb->cpld_vaddr = dsaf_dev->cpld_base + - mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET; - cpld_led_reset(mac_cb); - } mac_cb->sfp_prsnt = 0; mac_cb->txpkt_for_led = 0; mac_cb->rxpkt_for_led = 0; - if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) mac_cb->mac_type = HNAE_PORT_SERVICE; else mac_cb->mac_type = HNAE_PORT_DEBUG; @@ -774,53 +801,100 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx) } mac_mode_idx = (u32)ret; - hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx); + ret = hns_mac_get_info(mac_cb); + if (ret) + return ret; + cpld_led_reset(mac_cb); mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx); return 0; } +static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev) +{ + if (HNS_DSAF_IS_DEBUG(dsaf_dev)) + return 1; + else + return DSAF_MAX_PORT_NUM; +} + /** * hns_mac_init - init mac * @dsaf_dev: dsa fabric device struct pointer - * retuen 0 - success , negative --fail + * return 0 - success , negative --fail */ int hns_mac_init(struct dsaf_device *dsaf_dev) { - int i; + bool found = false; int ret; - size_t size; + u32 port_id; + int max_port_num = hns_mac_get_max_port_num(dsaf_dev); struct hns_mac_cb *mac_cb; + struct fwnode_handle *child; - size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP; - dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL); - if (!dsaf_dev->mac_cb) - return -ENOMEM; + device_for_each_child_node(dsaf_dev->dev, child) { + ret = fwnode_property_read_u32(child, "reg", &port_id); + if (ret) { + dev_err(dsaf_dev->dev, + "get reg fail, ret=%d!\n", ret); + return ret; + } + if (port_id >= max_port_num) { + dev_err(dsaf_dev->dev, + "reg(%u) out of range!\n", port_id); + return -EINVAL; + } + mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb), + GFP_KERNEL); + if (!mac_cb) + return -ENOMEM; + mac_cb->fw_port = child; + mac_cb->mac_id = (u8)port_id; + dsaf_dev->mac_cb[port_id] = mac_cb; + found = true; + } - for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) { - ret = hns_mac_get_cfg(dsaf_dev, i); - if (ret) - goto free_mac_cb; + /* if don't get any port subnode from dsaf node + * will init all port then, this is compatible with the old dts + */ + if (!found) { + for (port_id = 0; port_id < max_port_num; port_id++) { + mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb), + GFP_KERNEL); + if (!mac_cb) + return -ENOMEM; + + mac_cb->mac_id = port_id; + dsaf_dev->mac_cb[port_id] = mac_cb; + } + } + /* init mac_cb for all port */ + for (port_id = 0; port_id < max_port_num; port_id++) { + mac_cb = dsaf_dev->mac_cb[port_id]; + if (!mac_cb) + continue; - mac_cb = &dsaf_dev->mac_cb[i]; + ret = hns_mac_get_cfg(dsaf_dev, mac_cb); + if (ret) + return ret; ret = hns_mac_init_ex(mac_cb); if (ret) - goto free_mac_cb; + return ret; } return 0; - -free_mac_cb: - dsaf_dev->mac_cb = NULL; - - return ret; } void hns_mac_uninit(struct dsaf_device *dsaf_dev) { - cpld_led_reset(dsaf_dev->mac_cb); - dsaf_dev->mac_cb = NULL; + int i; + int max_port_num = hns_mac_get_max_port_num(dsaf_dev); + + for (i = 0; i < max_port_num; i++) { + cpld_led_reset(dsaf_dev->mac_cb[i]); + dsaf_dev->mac_cb[i] = NULL; + } } int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb, @@ -908,7 +982,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb) int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status) { - if (!mac_cb || !mac_cb->cpld_vaddr) + if (!mac_cb || !mac_cb->cpld_ctrl) return 0; return cpld_set_led_id(mac_cb, status); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h index 823b6e78c8aa..97ce9a750aaf 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h @@ -10,9 +10,10 @@ #ifndef _HNS_DSAF_MAC_H #define _HNS_DSAF_MAC_H -#include <linux/phy.h> -#include <linux/kernel.h> #include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/phy.h> +#include <linux/regmap.h> #include "hns_dsaf_main.h" struct dsaf_device; @@ -310,10 +311,15 @@ struct hns_mac_cb { struct device *dev; struct dsaf_device *dsaf_dev; struct mac_priv priv; + struct fwnode_handle *fw_port; u8 __iomem *vaddr; - u8 __iomem *cpld_vaddr; u8 __iomem *sys_ctl_vaddr; u8 __iomem *serdes_vaddr; + struct regmap *serdes_ctrl; + struct regmap *cpld_ctrl; + u32 cpld_ctrl_reg; + u32 port_rst_off; + u32 port_mode_off; struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM]; u8 sfp_prsnt; u8 cpld_led_value; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 5978a5c8ef35..1c2ddb25e776 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -7,27 +7,29 @@ * (at your option) any later version. */ -#include <linux/module.h> -#include <linux/kernel.h> +#include <linux/device.h> #include <linux/init.h> #include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/netdevice.h> -#include <linux/platform_device.h> +#include <linux/mfd/syscon.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> -#include <linux/device.h> +#include <linux/platform_device.h> #include <linux/vmalloc.h> +#include "hns_dsaf_mac.h" #include "hns_dsaf_main.h" -#include "hns_dsaf_rcb.h" #include "hns_dsaf_ppe.h" -#include "hns_dsaf_mac.h" +#include "hns_dsaf_rcb.h" const char *g_dsaf_mode_match[DSAF_MODE_MAX] = { [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf", [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss", [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf", + [DSAF_MODE_DISABLE_SP] = "single-port", }; int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) @@ -35,8 +37,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) int ret, i; u32 desc_num; u32 buf_size; + u32 reset_offset = 0; + u32 res_idx = 0; const char *mode_str; + struct regmap *syscon; + struct resource *res; struct device_node *np = dsaf_dev->dev->of_node; + struct platform_device *pdev = to_platform_device(dsaf_dev->dev); if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) dsaf_dev->dsaf_ver = AE_VERSION_1; @@ -73,42 +80,68 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) else dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE; - dsaf_dev->sc_base = of_iomap(np, 0); - if (!dsaf_dev->sc_base) { - dev_err(dsaf_dev->dev, - "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name); - ret = -ENOMEM; - goto unmap_base_addr; - } + syscon = syscon_node_to_regmap( + of_parse_phandle(np, "subctrl-syscon", 0)); + if (IS_ERR_OR_NULL(syscon)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "subctrl info is needed!\n"); + return -ENOMEM; + } + dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res); + if (!dsaf_dev->sc_base) { + dev_err(dsaf_dev->dev, "subctrl can not map!\n"); + return -ENOMEM; + } - dsaf_dev->sds_base = of_iomap(np, 1); - if (!dsaf_dev->sds_base) { - dev_err(dsaf_dev->dev, - "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name); - ret = -ENOMEM; - goto unmap_base_addr; + res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n"); + return -ENOMEM; + } + dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res); + if (!dsaf_dev->sds_base) { + dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n"); + return -ENOMEM; + } + } else { + dsaf_dev->sub_ctrl = syscon; } - dsaf_dev->ppe_base = of_iomap(np, 2); - if (!dsaf_dev->ppe_base) { - dev_err(dsaf_dev->dev, - "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name); - ret = -ENOMEM; - goto unmap_base_addr; + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base"); + if (!res) { + res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++); + if (!res) { + dev_err(dsaf_dev->dev, "ppe-base info is needed!\n"); + return -ENOMEM; + } } - - dsaf_dev->io_base = of_iomap(np, 3); - if (!dsaf_dev->io_base) { - dev_err(dsaf_dev->dev, - "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name); - ret = -ENOMEM; - goto unmap_base_addr; + dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res); + if (!dsaf_dev->ppe_base) { + dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n"); + return -ENOMEM; + } + dsaf_dev->ppe_paddr = res->start; + + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "dsaf-base"); + if (!res) { + res = platform_get_resource(pdev, IORESOURCE_MEM, + res_idx); + if (!res) { + dev_err(dsaf_dev->dev, + "dsaf-base info is needed!\n"); + return -ENOMEM; + } + } + dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res); + if (!dsaf_dev->io_base) { + dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n"); + return -ENOMEM; + } } - dsaf_dev->cpld_base = of_iomap(np, 4); - if (!dsaf_dev->cpld_base) - dev_dbg(dsaf_dev->dev, "NO CPLD ADDR"); - ret = of_property_read_u32(np, "desc-num", &desc_num); if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT || desc_num > HNS_DSAF_MAX_DESC_CNT) { @@ -118,6 +151,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) } dsaf_dev->desc_num = desc_num; + ret = of_property_read_u32(np, "reset-field-offset", &reset_offset); + if (ret < 0) { + dev_dbg(dsaf_dev->dev, + "get reset-field-offset fail, ret=%d!\r\n", ret); + } + dsaf_dev->reset_offset = reset_offset; + ret = of_property_read_u32(np, "buf-size", &buf_size); if (ret < 0) { dev_err(dsaf_dev->dev, @@ -149,8 +189,6 @@ unmap_base_addr: iounmap(dsaf_dev->sds_base); if (dsaf_dev->sc_base) iounmap(dsaf_dev->sc_base); - if (dsaf_dev->cpld_base) - iounmap(dsaf_dev->cpld_base); return ret; } @@ -167,9 +205,6 @@ static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev) if (dsaf_dev->sc_base) iounmap(dsaf_dev->sc_base); - - if (dsaf_dev->cpld_base) - iounmap(dsaf_dev->cpld_base); } /** @@ -217,9 +252,7 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev) u32 q_id, q_num_per_port; u32 i; - hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, - HNS_DSAF_COMM_SERVICE_NW_IDX, - &max_vfn, &max_q_per_vf); + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf); q_num_per_port = max_vfn * max_q_per_vf; for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) { @@ -239,9 +272,7 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev) if (AE_IS_VER1(dsaf_dev->dsaf_ver)) return; - hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, - HNS_DSAF_COMM_SERVICE_NW_IDX, - &max_vfn, &max_q_per_vf); + hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf); q_num_per_port = max_vfn * max_q_per_vf; for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) { @@ -712,13 +743,15 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul( void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) { - dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) + dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, + DSAF_CFG_MIX_MODE_S, !!en); } void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en) { if (AE_IS_VER1(dsaf_dev->dsaf_ver) || - dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG) + dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG) return; dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id, @@ -1022,12 +1055,52 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev) * @mac_cb: mac contrl block */ static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev, - int mac_id, int en) + int mac_id, int tc_en) +{ + dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en); +} + +static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev, + int mac_id, int tx_en, int rx_en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + if (!tx_en || !rx_en) + dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n"); + + return; + } + + dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, + DSAF_PFC_PAUSE_RX_EN_B, !!rx_en); + dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, + DSAF_PFC_PAUSE_TX_EN_B, !!tx_en); +} + +int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, + u32 en) { - if (!en) - dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0); + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + if (!en) + dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n"); + + return -EINVAL; + } + + dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4, + DSAF_MAC_PAUSE_RX_EN_B, !!en); + + return 0; +} + +void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, + u32 *en) +{ + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) + *en = 1; else - dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff); + *en = dsaf_get_dev_bit(dsaf_dev, + DSAF_PAUSE_CFG_REG + mac_id * 4, + DSAF_MAC_PAUSE_RX_EN_B); } /** @@ -1039,6 +1112,7 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) { u32 i; u32 o_dsaf_cfg; + bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG); dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en); @@ -1064,8 +1138,10 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev) hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); /*set dsaf pfc to 0 for parseing rx pause*/ - for (i = 0; i < DSAF_COMM_CHN; i++) + for (i = 0; i < DSAF_COMM_CHN; i++) { hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0); + hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1); + } /*msk and clr exception irqs */ for (i = 0; i < DSAF_COMM_CHN; i++) { @@ -1264,6 +1340,9 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev) u32 i; int ret; + if (HNS_DSAF_IS_DEBUG(dsaf_dev)) + return 0; + ret = hns_dsaf_init_hw(dsaf_dev); if (ret) return ret; @@ -2013,6 +2092,8 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) { struct dsaf_hw_stats *hw_stats = &dsaf_dev->hw_stats[node_num]; + bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver); + u32 reg_tmp; hw_stats->pad_drop += dsaf_read_dev(dsaf_dev, DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num); @@ -2022,8 +2103,12 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num) DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev, DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num); - hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev, - DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num); + + reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG : + DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG; + hw_stats->rx_pause_frame += + dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num); + hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev, DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num); hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev, @@ -2056,6 +2141,8 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) u32 i = 0; u32 j; u32 *p = data; + u32 reg_tmp; + bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver); /* dsaf common registers */ p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG); @@ -2120,8 +2207,9 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80); p[190 + i] = dsaf_read_dev(ddev, DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80); - p[193 + i] = dsaf_read_dev(ddev, - DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80); + reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG : + DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG; + p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80); p[196 + i] = dsaf_read_dev(ddev, DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80); p[199 + i] = dsaf_read_dev(ddev, @@ -2368,8 +2456,11 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data) p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4); p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4); + if (!is_ver1) + p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4); + /* mark end of dsaf regs */ - for (i = 498; i < 504; i++) + for (i = 499; i < 504; i++) p[i] = 0xdddddddd; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 5fea226efaf3..f0502ba0a677 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -41,6 +41,7 @@ struct hns_mac_cb; #define DSAF_STATIC_NUM 28 #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) +#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) enum hal_dsaf_mode { HRD_DSAF_NO_DSAF_MODE = 0x0, @@ -117,6 +118,7 @@ enum dsaf_mode { DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */ DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */ DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/ + DSAF_MODE_DISABLE_SP, /* <non-dsaf, single port mode */ DSAF_MODE_DISABLE_FIX, /**< non-dasf, fixed to queue*/ DSAF_MODE_DISABLE_2PORT_8VM, /**< non-dasf, 2port 8VM */ DSAF_MODE_DISABLE_2PORT_16VM, /**< non-dasf, 2port 16VM */ @@ -275,10 +277,12 @@ struct dsaf_device { u8 __iomem *sds_base; u8 __iomem *ppe_base; u8 __iomem *io_base; - u8 __iomem *cpld_base; + struct regmap *sub_ctrl; + phys_addr_t ppe_paddr; u32 desc_num; /* desc num per queue*/ u32 buf_size; /* ring buffer size */ + u32 reset_offset; /* reset field offset in sub sysctrl */ int buf_size_type; /* ring buffer size-type */ enum dsaf_mode dsaf_mode; /* dsaf mode */ enum hal_dsaf_mode dsaf_en; @@ -287,7 +291,7 @@ struct dsaf_device { struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM]; struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM]; - struct hns_mac_cb *mac_cb; + struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM]; struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM]; struct dsaf_int_stat int_stat; @@ -359,14 +363,6 @@ static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev, tab_line_addr); } -static inline int hns_dsaf_get_comm_idx_by_port(int port) -{ - if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP)) - return 0; - else - return (port - DSAF_COMM_CHN + 1); -} - static inline struct hnae_vf_cb *hns_ae_get_vf_cb( struct hnae_handle *handle) { @@ -417,6 +413,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port); void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); int hns_dsaf_get_regs_count(void); void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); + +void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, + u32 *en); +int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, + u32 en); void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en); #endif /* __HNS_DSAF_MAIN_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index e69b02287c44..a837bb9e3839 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -7,10 +7,30 @@ * (at your option) any later version. */ -#include "hns_dsaf_misc.h" #include "hns_dsaf_mac.h" -#include "hns_dsaf_reg.h" +#include "hns_dsaf_misc.h" #include "hns_dsaf_ppe.h" +#include "hns_dsaf_reg.h" + +static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val) +{ + if (dsaf_dev->sub_ctrl) + dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val); + else + dsaf_write_reg(dsaf_dev->sc_base, reg, val); +} + +static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg) +{ + u32 ret; + + if (dsaf_dev->sub_ctrl) + ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg); + else + ret = dsaf_read_reg(dsaf_dev->sc_base, reg); + + return ret; +} void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, u16 speed, int data) @@ -22,8 +42,8 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, pr_err("sfp_led_opt mac_dev is null!\n"); return; } - if (!mac_cb->cpld_vaddr) { - dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n", + if (!mac_cb->cpld_ctrl) { + dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n", mac_cb->mac_id); return; } @@ -40,21 +60,24 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, dsaf_set_bit(value, DSAF_LED_DATA_B, data); if (value != mac_cb->cpld_led_value) { - dsaf_write_b(mac_cb->cpld_vaddr, value); + dsaf_write_syscon(mac_cb->cpld_ctrl, + mac_cb->cpld_ctrl_reg, value); mac_cb->cpld_led_value = value; } } else { - dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE); + dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + CPLD_LED_DEFAULT_VALUE); mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; } } void cpld_led_reset(struct hns_mac_cb *mac_cb) { - if (!mac_cb || !mac_cb->cpld_vaddr) + if (!mac_cb || !mac_cb->cpld_ctrl) return; - dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE); + dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + CPLD_LED_DEFAULT_VALUE); mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; } @@ -63,15 +86,19 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb, { switch (status) { case HNAE_LED_ACTIVE: - mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr); + mac_cb->cpld_led_value = + dsaf_read_syscon(mac_cb->cpld_ctrl, + mac_cb->cpld_ctrl_reg); dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE); - dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value); + dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + mac_cb->cpld_led_value); return 2; case HNAE_LED_INACTIVE: dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B, CPLD_LED_DEFAULT_VALUE); - dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value); + dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg, + mac_cb->cpld_led_value); break; default: break; @@ -95,10 +122,8 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val) nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG; } - dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr, - RESET_REQ_OR_DREQ); - dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr, - RESET_REQ_OR_DREQ); + dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ); + dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ); } void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) @@ -110,14 +135,14 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) return; reg_val |= RESET_REQ_OR_DREQ; - reg_val |= 0x2082082 << port; + reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off; if (val == 0) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; - dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val); + dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, @@ -129,68 +154,63 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, if (port >= DSAF_XGE_NUM) return; - reg_val |= XGMAC_TRX_CORE_SRST_M << port; + reg_val |= XGMAC_TRX_CORE_SRST_M + << dsaf_dev->mac_cb[port]->port_rst_off; if (val == 0) reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG; - dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val); + dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) { u32 reg_val_1; u32 reg_val_2; + u32 port_rst_off; if (port >= DSAF_GE_NUM) return; - if (port < DSAF_SERVICE_NW_NUM) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { reg_val_1 = 0x1 << port; + port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off; /* there is difference between V1 and V2 in register.*/ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) - reg_val_2 = 0x1041041 << port; + reg_val_2 = 0x1041041 << port_rst_off; else - reg_val_2 = 0x2082082 << port; + reg_val_2 = 0x2082082 << port_rst_off; if (val == 0) { - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_REQ1_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, reg_val_1); - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_REQ0_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG, reg_val_2); } else { - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_DREQ0_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG, reg_val_2); - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_DREQ1_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG, reg_val_1); } } else { - reg_val_1 = 0x15540 << (port - 6); - reg_val_2 = 0x100 << (port - 6); + reg_val_1 = 0x15540 << dsaf_dev->reset_offset; + reg_val_2 = 0x100 << dsaf_dev->reset_offset; if (val == 0) { - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_REQ1_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG, reg_val_1); - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_PPE_RESET_REQ_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG, reg_val_2); } else { - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_GE_RESET_DREQ1_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG, reg_val_1); - dsaf_write_reg(dsaf_dev->sc_base, - DSAF_SUB_SC_PPE_RESET_DREQ_REG, + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG, reg_val_2); } } @@ -201,24 +221,23 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val) u32 reg_val = 0; u32 reg_addr; - reg_val |= RESET_REQ_OR_DREQ << port; + reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off; if (val == 0) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; else reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; - dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val); + dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) { - int comm_index = ppe_common->comm_index; struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev; u32 reg_val; u32 reg_addr; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) { reg_val = RESET_REQ_OR_DREQ; if (val == 0) reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG; @@ -226,7 +245,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG; } else { - reg_val = 0x100 << (comm_index - 1); + reg_val = 0x100 << dsaf_dev->reset_offset; if (val == 0) reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG; @@ -234,7 +253,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val) reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG; } - dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val); + dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } /** @@ -246,36 +265,45 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb) { u32 mode; u32 reg; - u32 shift; bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver); - void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr; int mac_id = mac_cb->mac_id; - phy_interface_t phy_if = PHY_INTERFACE_MODE_NA; + phy_interface_t phy_if; - if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) { - phy_if = PHY_INTERFACE_MODE_SGMII; - } else if (mac_id >= 0 && mac_id <= 3) { - reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG; - mode = dsaf_read_reg(sys_ctl_vaddr, reg); - /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */ - shift = is_ver1 ? 0 : mac_id; - if (dsaf_get_bit(mode, shift)) - phy_if = PHY_INTERFACE_MODE_XGMII; + if (is_ver1) { + if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) + return PHY_INTERFACE_MODE_SGMII; + + if (mac_id >= 0 && mac_id <= 3) + reg = HNS_MAC_HILINK4_REG; else - phy_if = PHY_INTERFACE_MODE_SGMII; - } else if (mac_id >= 4 && mac_id <= 7) { - reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG; - mode = dsaf_read_reg(sys_ctl_vaddr, reg); - /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */ - shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6; - if (dsaf_get_bit(mode, shift)) - phy_if = PHY_INTERFACE_MODE_XGMII; + reg = HNS_MAC_HILINK3_REG; + } else{ + if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3) + reg = HNS_MAC_HILINK4V2_REG; else - phy_if = PHY_INTERFACE_MODE_SGMII; + reg = HNS_MAC_HILINK3V2_REG; } + + mode = dsaf_read_sub(mac_cb->dsaf_dev, reg); + if (dsaf_get_bit(mode, mac_cb->port_mode_off)) + phy_if = PHY_INTERFACE_MODE_XGMII; + else + phy_if = PHY_INTERFACE_MODE_SGMII; + return phy_if; } +int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt) +{ + if (!mac_cb->cpld_ctrl) + return -ENODEV; + + *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg + + MAC_SFP_PORT_OFFSET); + + return 0; +} + /** * hns_mac_config_sds_loopback - set loop back for serdes * @mac_cb: mac control block @@ -312,7 +340,14 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en) pr_info("no sfp in this eth\n"); } - dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en); + if (mac_cb->serdes_ctrl) { + u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset); + + dsaf_set_field(origin, 1ull << 10, 10, !!en); + dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); + } else { + dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en); + } return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c index 5b7ae5ff43e8..8cd151a5245e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c @@ -61,22 +61,10 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb, } } -static void __iomem *hns_ppe_common_get_ioaddr( - struct ppe_common_cb *ppe_common) +static void __iomem * +hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) { - void __iomem *base_addr; - - int idx = ppe_common->comm_index; - - if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX) - base_addr = ppe_common->dsaf_dev->ppe_base - + PPE_COMMON_REG_OFFSET; - else - base_addr = ppe_common->dsaf_dev->sds_base - + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET - + PPE_COMMON_REG_OFFSET; - - return base_addr; + return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; } /** @@ -90,7 +78,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) struct ppe_common_cb *ppe_common; int ppe_num; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM; else ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM; @@ -103,7 +91,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index) ppe_common->ppe_num = ppe_num; ppe_common->dsaf_dev = dsaf_dev; ppe_common->comm_index = comm_index; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE; else ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG; @@ -124,32 +112,8 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index) static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, int ppe_idx) { - void __iomem *base_addr; - int common_idx = ppe_common->comm_index; - - if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) { - base_addr = ppe_common->dsaf_dev->ppe_base + - ppe_idx * PPE_REG_OFFSET; - - } else { - base_addr = ppe_common->dsaf_dev->sds_base + - (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET; - } - - return base_addr; -} - -static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx) -{ - int port; - - if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) - port = idx; - else - port = HNS_PPE_SERVICE_NW_ENGINE_NUM - + ppe_common->comm_index - 1; - return port; + return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; } static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common) @@ -164,7 +128,6 @@ static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common) ppe_cb->next = NULL; ppe_cb->ppe_common_cb = ppe_common; ppe_cb->index = i; - ppe_cb->port = hns_ppe_get_port(ppe_common, i); ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i); ppe_cb->virq = 0; } @@ -318,7 +281,7 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en) static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) { struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb; - u32 port = ppe_cb->port; + u32 port = ppe_cb->index; struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev; int i; @@ -332,10 +295,12 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb) /* clr and msk except irq*/ hns_ppe_exc_irq_en(ppe_cb, 0); - if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) + if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) { hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE); - else + dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0); + } else { hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE); + } hns_ppe_checksum_hw(ppe_cb, 0xffffffff); hns_ppe_cnt_clr_ce(ppe_cb); @@ -375,7 +340,8 @@ void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common) u32 i; for (i = 0; i < ppe_common->ppe_num; i++) { - hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]); + if (ppe_common->dsaf_dev->mac_cb[i]) + hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]); memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb)); } } @@ -408,8 +374,11 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index) if (ret) return; - for (i = 0; i < ppe_common->ppe_num; i++) - hns_ppe_init_hw(&ppe_common->ppe_cb[i]); + for (i = 0; i < ppe_common->ppe_num; i++) { + /* We only need to initiate ppe when the port exists */ + if (dsaf_dev->mac_cb[i]) + hns_ppe_init_hw(&ppe_common->ppe_cb[i]); + } ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h index e9c0ec2fa0dd..9d8e643e8aa6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h @@ -80,7 +80,6 @@ struct hns_ppe_cb { struct hns_ppe_hw_stats hw_stats; u8 index; /* index in a ppe common device */ - u8 port; /* port id in dsaf */ void __iomem *io_base; int virq; u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c index 28ee26e5c478..4ef6d23d998e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c @@ -270,7 +270,7 @@ static void hns_rcb_set_port_timeout( static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common) { - if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) return HNS_RCB_SERVICE_NW_ENGINE_NUM; else return HNS_RCB_DEBUG_NW_ENGINE_NUM; @@ -430,36 +430,20 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb) static int hns_rcb_get_port_in_comm( struct rcb_common_cb *rcb_common, int ring_idx) { - int comm_index = rcb_common->comm_index; - int port; - int q_num; - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { - q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn; - port = ring_idx / q_num; - } else { - port = 0; /* config debug-ports port_id_in_comm to 0*/ - } - - return port; + return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn); } #define SERVICE_RING_IRQ_IDX(v1) \ ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX) -#define DEBUG_RING_IRQ_IDX(v1) \ - ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX) -#define DEBUG_RING_IRQ_OFFSET(v1) \ - ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET) static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common) { - int comm_index = rcb_common->comm_index; bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver); - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) + if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) return SERVICE_RING_IRQ_IDX(is_ver1); else - return DEBUG_RING_IRQ_IDX(is_ver1) + - (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1); + return HNS_DEBUG_RING_IRQ_IDX; } #define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\ @@ -549,7 +533,7 @@ int hns_rcb_set_coalesce_usecs( return 0; if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) { - if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { + if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) { dev_err(rcb_common->dsaf_dev->dev, "error: not support coalesce_usecs setting!\n"); return -EINVAL; @@ -601,113 +585,82 @@ int hns_rcb_set_coalesced_frames( *@max_vfn : max vfn number *@max_q_per_vf:max ring number per vm */ -void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, - u16 *max_vfn, u16 *max_q_per_vf) +void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, + u16 *max_q_per_vf) { - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { - switch (dsaf_mode) { - case DSAF_MODE_DISABLE_6PORT_0VM: - *max_vfn = 1; - *max_q_per_vf = 16; - break; - case DSAF_MODE_DISABLE_FIX: - *max_vfn = 1; - *max_q_per_vf = 1; - break; - case DSAF_MODE_DISABLE_2PORT_64VM: - *max_vfn = 64; - *max_q_per_vf = 1; - break; - case DSAF_MODE_DISABLE_6PORT_16VM: - *max_vfn = 16; - *max_q_per_vf = 1; - break; - default: - *max_vfn = 1; - *max_q_per_vf = 16; - break; - } - } else { + switch (dsaf_mode) { + case DSAF_MODE_DISABLE_6PORT_0VM: + *max_vfn = 1; + *max_q_per_vf = 16; + break; + case DSAF_MODE_DISABLE_FIX: + case DSAF_MODE_DISABLE_SP: *max_vfn = 1; *max_q_per_vf = 1; + break; + case DSAF_MODE_DISABLE_2PORT_64VM: + *max_vfn = 64; + *max_q_per_vf = 1; + break; + case DSAF_MODE_DISABLE_6PORT_16VM: + *max_vfn = 16; + *max_q_per_vf = 1; + break; + default: + *max_vfn = 1; + *max_q_per_vf = 16; + break; } } -int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index) +int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev) { - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { - switch (dsaf_dev->dsaf_mode) { - case DSAF_MODE_ENABLE_FIX: - return 1; - - case DSAF_MODE_DISABLE_FIX: - return 6; - - case DSAF_MODE_ENABLE_0VM: - return 32; - - case DSAF_MODE_DISABLE_6PORT_0VM: - case DSAF_MODE_ENABLE_16VM: - case DSAF_MODE_DISABLE_6PORT_2VM: - case DSAF_MODE_DISABLE_6PORT_16VM: - case DSAF_MODE_DISABLE_6PORT_4VM: - case DSAF_MODE_ENABLE_8VM: - return 96; - - case DSAF_MODE_DISABLE_2PORT_16VM: - case DSAF_MODE_DISABLE_2PORT_8VM: - case DSAF_MODE_ENABLE_32VM: - case DSAF_MODE_DISABLE_2PORT_64VM: - case DSAF_MODE_ENABLE_128VM: - return 128; - - default: - dev_warn(dsaf_dev->dev, - "get ring num fail,use default!dsaf_mode=%d\n", - dsaf_dev->dsaf_mode); - return 128; - } - } else { + switch (dsaf_dev->dsaf_mode) { + case DSAF_MODE_ENABLE_FIX: + case DSAF_MODE_DISABLE_SP: return 1; + + case DSAF_MODE_DISABLE_FIX: + return 6; + + case DSAF_MODE_ENABLE_0VM: + return 32; + + case DSAF_MODE_DISABLE_6PORT_0VM: + case DSAF_MODE_ENABLE_16VM: + case DSAF_MODE_DISABLE_6PORT_2VM: + case DSAF_MODE_DISABLE_6PORT_16VM: + case DSAF_MODE_DISABLE_6PORT_4VM: + case DSAF_MODE_ENABLE_8VM: + return 96; + + case DSAF_MODE_DISABLE_2PORT_16VM: + case DSAF_MODE_DISABLE_2PORT_8VM: + case DSAF_MODE_ENABLE_32VM: + case DSAF_MODE_DISABLE_2PORT_64VM: + case DSAF_MODE_ENABLE_128VM: + return 128; + + default: + dev_warn(dsaf_dev->dev, + "get ring num fail,use default!dsaf_mode=%d\n", + dsaf_dev->dsaf_mode); + return 128; } } -void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev, - int comm_index) +void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) { - void __iomem *base_addr; - - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) - base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; - else - base_addr = dsaf_dev->sds_base - + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET - + RCB_COMMON_REG_OFFSET; + struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; - return base_addr; + return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET; } -static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev, - int comm_index) +static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common) { - struct device_node *np = dsaf_dev->dev->of_node; - phys_addr_t phy_addr; - const __be32 *tmp_addr; - u64 addr_offset = 0; - u64 size = 0; - int index = 0; - - if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) { - index = 2; - addr_offset = RCB_COMMON_REG_OFFSET; - } else { - index = 1; - addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET + - RCB_COMMON_REG_OFFSET; - } - tmp_addr = of_get_address(np, index, &size, NULL); - phy_addr = of_translate_address(np, tmp_addr); - return phy_addr + addr_offset; + struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; + + return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET; } int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, @@ -717,7 +670,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode; u16 max_vfn; u16 max_q_per_vf; - int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index); + int ring_num = hns_rcb_get_ring_num(dsaf_dev); rcb_common = devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) + @@ -732,12 +685,12 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, rcb_common->desc_num = dsaf_dev->desc_num; - hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf); + hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf); rcb_common->max_vfn = max_vfn; rcb_common->max_q_per_vf = max_q_per_vf; - rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index); - rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index); + rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common); + rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common); dsaf_dev->rcb_common[comm_index] = rcb_common; return 0; @@ -932,7 +885,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data) { u32 *regs = data; bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver); - bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX); + bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev); u32 reg_tmp; u32 reg_num_tmp; u32 i = 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h index eb61014ad615..bd54dac82ee0 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h @@ -111,7 +111,7 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index); int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common); void hns_rcb_start(struct hnae_queue *q, u32 val); void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common); -void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index, +void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn, u16 *max_q_per_vf); void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 7d7204f45e78..7c3b5103d151 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -10,25 +10,20 @@ #ifndef _DSAF_REG_H_ #define _DSAF_REG_H_ -#define HNS_DEBUG_RING_IRQ_IDX 55 -#define HNS_SERVICE_RING_IRQ_IDX 59 -#define HNS_DEBUG_RING_IRQ_OFFSET 2 -#define HNSV2_DEBUG_RING_IRQ_IDX 409 -#define HNSV2_SERVICE_RING_IRQ_IDX 25 -#define HNSV2_DEBUG_RING_IRQ_OFFSET 9 - -#define DSAF_MAX_PORT_NUM_PER_CHIP 8 -#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6 -#define DSAF_MAX_VM_NUM 128 - -#define DSAF_COMM_DEV_NUM 3 -#define DSAF_PPE_INODE_BASE 6 -#define HNS_DSAF_COMM_SERVICE_NW_IDX 0 +#include <linux/regmap.h> +#define HNS_DEBUG_RING_IRQ_IDX 0 +#define HNS_SERVICE_RING_IRQ_IDX 59 +#define HNSV2_SERVICE_RING_IRQ_IDX 25 + +#define DSAF_MAX_PORT_NUM 6 +#define DSAF_MAX_VM_NUM 128 + +#define DSAF_COMM_DEV_NUM 1 +#define DSAF_PPE_INODE_BASE 6 #define DSAF_DEBUG_NW_NUM 2 #define DSAF_SERVICE_NW_NUM 6 #define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM #define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) -#define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM)) #define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM #define DSAF_PORT_TYPE_NUM 3 #define DSAF_NODE_NUM 18 @@ -137,6 +132,7 @@ #define DSAF_PPE_INT_STS_0_REG 0x1E0 #define DSAF_ROCEE_INT_STS_0_REG 0x200 #define DSAFV2_SERDES_LBK_0_REG 0x220 +#define DSAF_PAUSE_CFG_REG 0x240 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -155,6 +151,7 @@ #define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030 #define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038 #define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C +#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x1024 #define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C #define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050 #define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054 @@ -711,6 +708,10 @@ #define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1) #define DSAF_PFC_UNINT_CNT_S 0 +#define DSAF_MAC_PAUSE_RX_EN_B 2 +#define DSAF_PFC_PAUSE_RX_EN_B 1 +#define DSAF_PFC_PAUSE_TX_EN_B 0 + #define DSAF_PPE_QID_CFG_M 0xFF #define DSAF_PPE_QID_CFG_S 0 @@ -988,6 +989,19 @@ static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) return readl(reg_addr + reg); } +static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value) +{ + regmap_write(base, reg, value); +} + +static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg) +{ + unsigned int val; + + regmap_read(base, reg, &val); + return val; +} + #define dsaf_read_dev(a, reg) \ dsaf_read_reg((a)->io_base, (reg)) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 687204b780b0..e621636e69b9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1275,7 +1275,7 @@ void hns_nic_net_reinit(struct net_device *netdev) { struct hns_nic_priv *priv = netdev_priv(netdev); - priv->netdev->trans_start = jiffies; + netif_trans_update(priv->netdev); while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) usleep_range(1000, 2000); @@ -1376,7 +1376,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb, ret = hns_nic_net_xmit_hw(ndev, skb, &tx_ring_data(priv, skb->queue_mapping)); if (ret == NETDEV_TX_OK) { - ndev->trans_start = jiffies; + netif_trans_update(ndev); ndev->stats.tx_bytes += skb->len; ndev->stats.tx_packets++; } @@ -1648,7 +1648,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) rtnl_lock(); /* put off any impending NetWatchDogTimeout */ - priv->netdev->trans_start = jiffies; + netif_trans_update(priv->netdev); if (type == HNAE_PORT_DEBUG) { hns_nic_net_reinit(priv->netdev); @@ -1873,6 +1873,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) struct net_device *ndev; struct hns_nic_priv *priv; struct device_node *node = dev->of_node; + u32 port_id; int ret; ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF); @@ -1896,10 +1897,18 @@ static int hns_nic_dev_probe(struct platform_device *pdev) dev_err(dev, "not find ae-handle\n"); goto out_read_prop_fail; } - - ret = of_property_read_u32(node, "port-id", &priv->port_id); - if (ret) - goto out_read_prop_fail; + /* try to find port-idx-in-ae first */ + ret = of_property_read_u32(node, "port-idx-in-ae", &port_id); + if (ret) { + /* only for old code compatible */ + ret = of_property_read_u32(node, "port-id", &port_id); + if (ret) + goto out_read_prop_fail; + /* for old dts, we need to caculate the port offset */ + port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET + : port_id - HNS_SRV_OFFSET; + } + priv->port_id = port_id; hns_init_mac_addr(ndev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index c68ab3d34fc2..337efa582bac 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -18,6 +18,9 @@ #include "hnae.h" +#define HNS_DEBUG_OFFSET 6 +#define HNS_SRV_OFFSET 2 + enum hns_nic_state { NIC_STATE_TESTING = 0, NIC_STATE_RESETTING, diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 3daf2d4a7ca0..631dbc7b4dbb 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev) return -EAGAIN; } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_start_queue(dev); lp->lan_type = hp100_sense_lan(dev); diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index 7ce6379fd1a3..befb4ac3e2b0 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1042,7 +1042,7 @@ static void i596_tx_timeout (struct net_device *dev) lp->last_restart = dev->stats.tx_packets; } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue (dev); } diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c index c984998b34a0..3dbc53c21baa 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -960,7 +960,7 @@ static void i596_tx_timeout (struct net_device *dev) lp->last_restart = dev->stats.tx_packets; } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue (dev); } diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 353f57f675d0..21c84cc9c871 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -983,7 +983,7 @@ static void sun3_82586_timeout(struct net_device *dev) p->scb->cmd_cuc = CUC_START; sun3_attn586(); WAIT_4_SCB_CMD(); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ return 0; } #endif @@ -996,7 +996,7 @@ static void sun3_82586_timeout(struct net_device *dev) sun3_82586_close(dev); sun3_82586_open(dev); } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ } /****************************************************** diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 5d7db6c01c46..4c9771d57d6e 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -301,7 +301,7 @@ static inline void emac_netif_stop(struct emac_instance *dev) dev->no_mcast = 1; netif_addr_unlock(dev->ndev); netif_tx_unlock_bh(dev->ndev); - dev->ndev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev->ndev); /* prevent tx timeout */ mal_poll_disable(dev->mal, &dev->commac); netif_tx_disable(dev->ndev); } @@ -1377,7 +1377,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len) DBG2(dev, "stopped TX queue" NL); } - ndev->trans_start = jiffies; + netif_trans_update(ndev); ++dev->stats.tx_packets; dev->stats.tx_bytes += len; diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c index d3b9d103353e..5b88cc690c22 100644 --- a/drivers/net/ethernet/ibm/emac/phy.c +++ b/drivers/net/ethernet/ibm/emac/phy.c @@ -470,12 +470,38 @@ static struct mii_phy_def m88e1112_phy_def = { .ops = &m88e1112_phy_ops, }; +static int ar8035_init(struct mii_phy *phy) +{ + phy_write(phy, 0x1d, 0x5); /* Address debug register 5 */ + phy_write(phy, 0x1e, 0x2d47); /* Value copied from u-boot */ + phy_write(phy, 0x1d, 0xb); /* Address hib ctrl */ + phy_write(phy, 0x1e, 0xbc20); /* Value copied from u-boot */ + + return 0; +} + +static struct mii_phy_ops ar8035_phy_ops = { + .init = ar8035_init, + .setup_aneg = genmii_setup_aneg, + .setup_forced = genmii_setup_forced, + .poll_link = genmii_poll_link, + .read_link = genmii_read_link, +}; + +static struct mii_phy_def ar8035_phy_def = { + .phy_id = 0x004dd070, + .phy_id_mask = 0xfffffff0, + .name = "Atheros 8035 Gigabit Ethernet", + .ops = &ar8035_phy_ops, +}; + static struct mii_phy_def *mii_phy_table[] = { &et1011c_phy_def, &cis8201_phy_def, &bcm5248_phy_def, &m88e1111_phy_def, &m88e1112_phy_def, + &ar8035_phy_def, &genmii_phy_def, NULL }; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6e9e16eee5d0..864cb21351a4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -61,6 +61,7 @@ #include <linux/proc_fs.h> #include <linux/in.h> #include <linux/ip.h> +#include <linux/ipv6.h> #include <linux/irq.h> #include <linux/kthread.h> #include <linux/seq_file.h> @@ -94,6 +95,7 @@ static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *); static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *); static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, union sub_crq *sub_crq); +static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64); static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance); static int enable_scrq_irq(struct ibmvnic_adapter *, struct ibmvnic_sub_crq_queue *); @@ -561,10 +563,141 @@ static int ibmvnic_close(struct net_device *netdev) return 0; } +/** + * build_hdr_data - creates L2/L3/L4 header data buffer + * @hdr_field - bitfield determining needed headers + * @skb - socket buffer + * @hdr_len - array of header lengths + * @tot_len - total length of data + * + * Reads hdr_field to determine which headers are needed by firmware. + * Builds a buffer containing these headers. Saves individual header + * lengths and total buffer length to be used to build descriptors. + */ +static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, + int *hdr_len, u8 *hdr_data) +{ + int len = 0; + u8 *hdr; + + hdr_len[0] = sizeof(struct ethhdr); + + if (skb->protocol == htons(ETH_P_IP)) { + hdr_len[1] = ip_hdr(skb)->ihl * 4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + hdr_len[2] = tcp_hdrlen(skb); + else if (ip_hdr(skb)->protocol == IPPROTO_UDP) + hdr_len[2] = sizeof(struct udphdr); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hdr_len[1] = sizeof(struct ipv6hdr); + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + hdr_len[2] = tcp_hdrlen(skb); + else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) + hdr_len[2] = sizeof(struct udphdr); + } + + memset(hdr_data, 0, 120); + if ((hdr_field >> 6) & 1) { + hdr = skb_mac_header(skb); + memcpy(hdr_data, hdr, hdr_len[0]); + len += hdr_len[0]; + } + + if ((hdr_field >> 5) & 1) { + hdr = skb_network_header(skb); + memcpy(hdr_data + len, hdr, hdr_len[1]); + len += hdr_len[1]; + } + + if ((hdr_field >> 4) & 1) { + hdr = skb_transport_header(skb); + memcpy(hdr_data + len, hdr, hdr_len[2]); + len += hdr_len[2]; + } + return len; +} + +/** + * create_hdr_descs - create header and header extension descriptors + * @hdr_field - bitfield determining needed headers + * @data - buffer containing header data + * @len - length of data buffer + * @hdr_len - array of individual header lengths + * @scrq_arr - descriptor array + * + * Creates header and, if needed, header extension descriptors and + * places them in a descriptor array, scrq_arr + */ + +static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, + union sub_crq *scrq_arr) +{ + union sub_crq hdr_desc; + int tmp_len = len; + u8 *data, *cur; + int tmp; + + while (tmp_len > 0) { + cur = hdr_data + len - tmp_len; + + memset(&hdr_desc, 0, sizeof(hdr_desc)); + if (cur != hdr_data) { + data = hdr_desc.hdr_ext.data; + tmp = tmp_len > 29 ? 29 : tmp_len; + hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD; + hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC; + hdr_desc.hdr_ext.len = tmp; + } else { + data = hdr_desc.hdr.data; + tmp = tmp_len > 24 ? 24 : tmp_len; + hdr_desc.hdr.first = IBMVNIC_CRQ_CMD; + hdr_desc.hdr.type = IBMVNIC_HDR_DESC; + hdr_desc.hdr.len = tmp; + hdr_desc.hdr.l2_len = (u8)hdr_len[0]; + hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]); + hdr_desc.hdr.l4_len = (u8)hdr_len[2]; + hdr_desc.hdr.flag = hdr_field << 1; + } + memcpy(data, cur, tmp); + tmp_len -= tmp; + *scrq_arr = hdr_desc; + scrq_arr++; + } +} + +/** + * build_hdr_descs_arr - build a header descriptor array + * @skb - socket buffer + * @num_entries - number of descriptors to be sent + * @subcrq - first TX descriptor + * @hdr_field - bit field determining which headers will be sent + * + * This function will build a TX descriptor array with applicable + * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect. + */ + +static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, + int *num_entries, u8 hdr_field) +{ + int hdr_len[3] = {0, 0, 0}; + int tot_len, len; + u8 *hdr_data = txbuff->hdr_data; + + tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, + txbuff->hdr_data); + len = tot_len; + len -= 24; + if (len > 0) + num_entries += len % 29 ? len / 29 + 1 : len / 29; + create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, + txbuff->indir_arr + 1); +} + static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); int queue_num = skb_get_queue_mapping(skb); + u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req; struct device *dev = &adapter->vdev->dev; struct ibmvnic_tx_buff *tx_buff = NULL; struct ibmvnic_tx_pool *tx_pool; @@ -579,6 +712,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned long lpar_rc; union sub_crq tx_crq; unsigned int offset; + int num_entries = 1; unsigned char *dst; u64 *handle_array; int index = 0; @@ -644,11 +778,35 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP; } - if (skb->ip_summed == CHECKSUM_PARTIAL) + if (skb->ip_summed == CHECKSUM_PARTIAL) { tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; - - lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq); - + hdrs += 2; + } + /* determine if l2/3/4 headers are sent to firmware */ + if ((*hdrs >> 7) & 1 && + (skb->protocol == htons(ETH_P_IP) || + skb->protocol == htons(ETH_P_IPV6))) { + build_hdr_descs_arr(tx_buff, &num_entries, *hdrs); + tx_crq.v1.n_crq_elem = num_entries; + tx_buff->indir_arr[0] = tx_crq; + tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr, + sizeof(tx_buff->indir_arr), + DMA_TO_DEVICE); + if (dma_mapping_error(dev, tx_buff->indir_dma)) { + if (!firmware_has_feature(FW_FEATURE_CMO)) + dev_err(dev, "tx: unable to map descriptor array\n"); + tx_map_failed++; + tx_dropped++; + ret = NETDEV_TX_BUSY; + goto out; + } + lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num], + (u64)tx_buff->indir_dma, + (u64)num_entries); + } else { + lpar_rc = send_subcrq(adapter, handle_array[queue_num], + &tx_crq); + } if (lpar_rc != H_SUCCESS) { dev_err(dev, "tx failed with code %ld\n", lpar_rc); @@ -832,7 +990,7 @@ restart_poll: netdev->stats.rx_bytes += length; frames_processed++; } - replenish_pools(adapter); + replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]); if (frames_processed < budget) { enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]); @@ -1159,6 +1317,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter, union sub_crq *next; int index; int i, j; + u8 first; restart_loop: while (pending_scrq(adapter, scrq)) { @@ -1181,6 +1340,13 @@ restart_loop: txbuff->data_dma[j] = 0; txbuff->used_bounce = false; } + /* if sub_crq was sent indirectly */ + first = txbuff->indir_arr[0].generic.first; + if (first == IBMVNIC_CRQ_CMD) { + dma_unmap_single(dev, txbuff->indir_dma, + sizeof(txbuff->indir_arr), + DMA_TO_DEVICE); + } if (txbuff->last_frag) dev_kfree_skb_any(txbuff->skb); @@ -1261,9 +1427,9 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) entries_page : adapter->max_rx_add_entries_per_subcrq; /* Choosing the maximum number of queues supported by firmware*/ - adapter->req_tx_queues = adapter->min_tx_queues; - adapter->req_rx_queues = adapter->min_rx_queues; - adapter->req_rx_add_queues = adapter->min_rx_add_queues; + adapter->req_tx_queues = adapter->max_tx_queues; + adapter->req_rx_queues = adapter->max_rx_queues; + adapter->req_rx_add_queues = adapter->max_rx_add_queues; adapter->req_mtu = adapter->max_mtu; } @@ -1494,6 +1660,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, return rc; } +static int send_subcrq_indirect(struct ibmvnic_adapter *adapter, + u64 remote_handle, u64 ioba, u64 num_entries) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + int rc; + + /* Make sure the hypervisor sees the complete request */ + mb(); + rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua, + cpu_to_be64(remote_handle), + ioba, num_entries); + + if (rc) { + if (rc == H_CLOSED) + dev_warn(dev, "CRQ Queue closed\n"); + dev_err(dev, "Send (indirect) error (rc=%d)\n", rc); + } + + return rc; +} + static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter, union ibmvnic_crq *crq) { @@ -1589,13 +1777,11 @@ static void send_login(struct ibmvnic_adapter *adapter) goto buf_map_failed; } - rsp_buffer_size = - sizeof(struct ibmvnic_login_rsp_buffer) + - sizeof(u64) * (adapter->req_tx_queues + - adapter->req_rx_queues * - adapter->req_rx_add_queues + adapter-> - req_rx_add_queues) + - sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS); + rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) + + sizeof(u64) * adapter->req_tx_queues + + sizeof(u64) * adapter->req_rx_queues + + sizeof(u64) * adapter->req_rx_queues + + sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS; login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC); if (!login_rsp_buffer) @@ -1918,6 +2104,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) adapter->netdev->features |= NETIF_F_IPV6_CSUM; + if ((adapter->netdev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) + adapter->netdev->features |= NETIF_F_RXCSUM; + memset(&crq, 0, sizeof(crq)); crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; @@ -2210,6 +2400,16 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, dma_unmap_single(dev, adapter->login_rsp_buf_token, adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); + /* If the number of queues requested can't be allocated by the + * server, the login response will return with code 1. We will need + * to resend the login buffer with fewer queues requested. + */ + if (login_rsp_crq->generic.rc.code) { + adapter->renegotiate = true; + complete(&adapter->init_done); + return 0; + } + netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { netdev_dbg(adapter->netdev, "%016lx\n", @@ -3437,14 +3637,21 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) init_completion(&adapter->init_done); wait_for_completion(&adapter->init_done); - /* needed to pull init_sub_crqs outside of an interrupt context - * because it creates IRQ mappings for the subCRQ queues, causing - * a kernel warning - */ - init_sub_crqs(adapter, 0); + do { + adapter->renegotiate = false; - reinit_completion(&adapter->init_done); - wait_for_completion(&adapter->init_done); + init_sub_crqs(adapter, 0); + reinit_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + + if (adapter->renegotiate) { + release_sub_crqs(adapter); + send_cap_queries(adapter); + + reinit_completion(&adapter->init_done); + wait_for_completion(&adapter->init_done); + } + } while (adapter->renegotiate); /* if init_sub_crqs is partially successful, retry */ while (!adapter->tx_scrq || !adapter->rx_scrq) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 1a9993cc79b5..0b66a506a4e4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -879,6 +879,9 @@ struct ibmvnic_tx_buff { int pool_index; bool last_frag; bool used_bounce; + union sub_crq indir_arr[6]; + u8 hdr_data[140]; + dma_addr_t indir_dma; }; struct ibmvnic_tx_pool { @@ -977,6 +980,7 @@ struct ibmvnic_adapter { struct ibmvnic_sub_crq_queue **tx_scrq; struct ibmvnic_sub_crq_queue **rx_scrq; int requested_caps; + bool renegotiate; /* rx structs */ struct napi_struct *napi; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 3772f3ac956e..714bd1014ddb 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -25,16 +25,13 @@ config E100 on the adapter. Look for a label that has a barcode and a number in the format 123456-001 (six digits hyphen three digits). - Use the above information and the Adapter & Driver ID Guide at: + Use the above information and the Adapter & Driver ID Guide that + can be located at: - <http://support.intel.com/support/network/adapter/pro100/21397.htm> + <http://support.intel.com> to identify the adapter. - For the latest Intel PRO/100 network driver for Linux, see: - - <http://www.intel.com/p/en_US/support/highlights/network/pro100plus> - More specific information on configuring the driver is in <file:Documentation/networking/e100.txt>. @@ -47,12 +44,7 @@ config E1000 ---help--- This driver supports Intel(R) PRO/1000 gigabit ethernet family of adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/adapter/pro100/21397.htm> - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: <http://support.intel.com> @@ -71,12 +63,8 @@ config E1000E This driver supports the PCI-Express Intel(R) PRO/1000 gigabit ethernet family of adapters. For PCI or PCI-X e1000 adapters, use the regular e1000 driver For more information on how to - identify your adapter, go to the Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/adapter/pro100/21397.htm> - - For general information and support, go to the Intel support - website at: + identify your adapter, go to the Adapter & Driver ID Guide that + can be located at: <http://support.intel.com> @@ -101,12 +89,7 @@ config IGB ---help--- This driver supports Intel(R) 82575/82576 gigabit ethernet family of adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/adapter/pro100/21397.htm> - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: <http://support.intel.com> @@ -142,12 +125,7 @@ config IGBVF ---help--- This driver supports Intel(R) 82576 virtual functions. For more information on how to identify your adapter, go to the Adapter & - Driver ID Guide at: - - <http://support.intel.com/support/network/adapter/pro100/21397.htm> - - For general information and support, go to the Intel support - website at: + Driver ID Guide that can be located at: <http://support.intel.com> @@ -164,12 +142,7 @@ config IXGB This driver supports Intel(R) PRO/10GbE family of adapters for PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver instead. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/adapter/pro100/21397.htm> - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: <http://support.intel.com> @@ -187,12 +160,7 @@ config IXGBE ---help--- This driver supports Intel(R) 10GbE PCI Express family of adapters. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/adapter/pro100/21397.htm> - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: <http://support.intel.com> @@ -243,12 +211,7 @@ config IXGBEVF ---help--- This driver supports Intel(R) PCI Express virtual functions for the Intel(R) ixgbe driver. For more information on how to identify your - adapter, go to the Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/sb/CS-008441.htm> - - For general information and support, go to the Intel support - website at: + adapter, go to the Adapter & Driver ID Guide that can be located at: <http://support.intel.com> @@ -266,12 +229,7 @@ config I40E ---help--- This driver supports Intel(R) Ethernet Controller XL710 Family of devices. For more information on how to identify your adapter, go - to the Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/adapter/pro100/21397.htm> - - For general information and support, go to the Intel support - website at: + to the Adapter & Driver ID Guide that can be located at: <http://support.intel.com> @@ -326,12 +284,7 @@ config I40EVF ---help--- This driver supports Intel(R) XL710 and X710 virtual functions. For more information on how to identify your adapter, go to the - Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/sb/CS-008441.htm> - - For general information and support, go to the Intel support - website at: + Adapter & Driver ID Guide that can be located at: <http://support.intel.com> @@ -347,12 +300,7 @@ config FM10K ---help--- This driver supports Intel(R) FM10000 Ethernet Switch Host Interface. For more information on how to identify your adapter, - go to the Adapter & Driver ID Guide at: - - <http://support.intel.com/support/network/sb/CS-008441.htm> - - For general information and support, go to the Intel support - website at: + go to the Adapter & Driver ID Guide that can be located at: <http://support.intel.com> diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h index 98fe5a2cd6e3..d7bdea79e9fa 100644 --- a/drivers/net/ethernet/intel/e1000/e1000.h +++ b/drivers/net/ethernet/intel/e1000/e1000.h @@ -358,6 +358,8 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); extern char e1000_driver_name[]; extern const char e1000_driver_version[]; +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); int e1000_up(struct e1000_adapter *adapter); void e1000_down(struct e1000_adapter *adapter); void e1000_reinit_locked(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 83e557c7f279..975eeb885ca2 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1553,7 +1553,7 @@ static void e1000_diag_test(struct net_device *netdev, if (if_running) /* indicate we're in test mode */ - dev_close(netdev); + e1000_close(netdev); else e1000_reset(adapter); @@ -1582,7 +1582,7 @@ static void e1000_diag_test(struct net_device *netdev, e1000_reset(adapter); clear_bit(__E1000_TESTING, &adapter->flags); if (if_running) - dev_open(netdev); + e1000_open(netdev); } else { e_info(hw, "online testing starting\n"); /* Online tests */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 3fc7bde699ba..f42129d09e2c 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -114,8 +114,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void e1000_remove(struct pci_dev *pdev); static int e1000_alloc_queues(struct e1000_adapter *adapter); static int e1000_sw_init(struct e1000_adapter *adapter); -static int e1000_open(struct net_device *netdev); -static int e1000_close(struct net_device *netdev); +int e1000_open(struct net_device *netdev); +int e1000_close(struct net_device *netdev); static void e1000_configure_tx(struct e1000_adapter *adapter); static void e1000_configure_rx(struct e1000_adapter *adapter); static void e1000_setup_rctl(struct e1000_adapter *adapter); @@ -1360,7 +1360,7 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter) * handler is registered with the OS, the watchdog task is started, * and the stack is notified that the interface is ready. **/ -static int e1000_open(struct net_device *netdev) +int e1000_open(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -1437,7 +1437,7 @@ err_setup_tx: * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ -static int e1000_close(struct net_device *netdev) +int e1000_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, return __e1000_maybe_stop_tx(netdev, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { @@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, nr_frags, mss); if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + netdev_sent_queue(netdev, skb->len); skb_tx_timestamp(skb); e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); if (!skb->xmit_more || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c index 2af603f3e418..cd391376036c 100644 --- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c +++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c @@ -121,7 +121,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; - nvm->word_size = 1 << size; + nvm->word_size = BIT(size); return 0; } @@ -845,27 +845,27 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); reg &= ~(0xF << 27); /* 30:27 */ if (hw->phy.media_type != e1000_media_type_copper) - reg &= ~(1 << 20); + reg &= ~BIT(20); ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); + reg &= ~BIT(28); else - reg |= (1 << 28); + reg |= BIT(28); ew32(TARC(1), reg); /* Disable IPv6 extension header parsing because some malformed diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 5f7016442ec4..7fd4d54599e4 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -185,7 +185,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) /* EEPROM access above 16k is unsupported */ if (size > 14) size = 14; - nvm->word_size = 1 << size; + nvm->word_size = BIT(size); break; } @@ -1163,12 +1163,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ @@ -1177,11 +1177,11 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82571: case e1000_82572: - reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26); break; case e1000_82574: case e1000_82583: - reg |= (1 << 26); + reg |= BIT(26); break; default: break; @@ -1193,12 +1193,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) switch (hw->mac.type) { case e1000_82571: case e1000_82572: - reg &= ~((1 << 29) | (1 << 30)); - reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + reg &= ~(BIT(29) | BIT(30)); + reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26); if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); + reg &= ~BIT(28); else - reg |= (1 << 28); + reg |= BIT(28); ew32(TARC(1), reg); break; default: @@ -1211,7 +1211,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) case e1000_82574: case e1000_82583: reg = er32(CTRL); - reg &= ~(1 << 29); + reg &= ~BIT(29); ew32(CTRL, reg); break; default: @@ -1224,8 +1224,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) case e1000_82574: case e1000_82583: reg = er32(CTRL_EXT); - reg &= ~(1 << 23); - reg |= (1 << 22); + reg &= ~BIT(23); + reg |= BIT(22); ew32(CTRL_EXT, reg); break; default: @@ -1261,7 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) case e1000_82574: case e1000_82583: reg = er32(GCR); - reg |= (1 << 22); + reg |= BIT(22); ew32(GCR, reg); /* Workaround for hardware errata. @@ -1308,8 +1308,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw) E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; vfta_bit_in_reg = - 1 << (hw->mng_cookie.vlan_id & - E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + BIT(hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); } break; default: diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 1dc293bad87b..ef96cd11d6d2 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -109,18 +109,18 @@ struct e1000_info; #define E1000_TXDCTL_DMA_BURST_ENABLE \ (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ E1000_TXDCTL_COUNT_DESC | \ - (1 << 16) | /* wthresh must be +1 more than desired */\ - (1 << 8) | /* hthresh */ \ - 0x1f) /* pthresh */ + (1u << 16) | /* wthresh must be +1 more than desired */\ + (1u << 8) | /* hthresh */ \ + 0x1f) /* pthresh */ #define E1000_RXDCTL_DMA_BURST_ENABLE \ (0x01000000 | /* set descriptor granularity */ \ - (4 << 16) | /* set writeback threshold */ \ - (4 << 8) | /* set prefetch threshold */ \ + (4u << 16) | /* set writeback threshold */ \ + (4u << 8) | /* set prefetch threshold */ \ 0x20) /* set hthresh */ -#define E1000_TIDV_FPD (1 << 31) -#define E1000_RDTR_FPD (1 << 31) +#define E1000_TIDV_FPD BIT(31) +#define E1000_RDTR_FPD BIT(31) enum e1000_boards { board_82571, @@ -347,6 +347,7 @@ struct e1000_adapter { struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; struct pm_qos_request pm_qos_req; + s32 ptp_delta; u16 eee_advert; }; @@ -404,53 +405,53 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); #define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL) /* hardware capability, feature, and workaround flags */ -#define FLAG_HAS_AMT (1 << 0) -#define FLAG_HAS_FLASH (1 << 1) -#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) -#define FLAG_HAS_WOL (1 << 3) -/* reserved bit4 */ -#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) -#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) -#define FLAG_HAS_JUMBO_FRAMES (1 << 7) -#define FLAG_READ_ONLY_NVM (1 << 8) -#define FLAG_IS_ICH (1 << 9) -#define FLAG_HAS_MSIX (1 << 10) -#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) -#define FLAG_IS_QUAD_PORT_A (1 << 12) -#define FLAG_IS_QUAD_PORT (1 << 13) -#define FLAG_HAS_HW_TIMESTAMP (1 << 14) -#define FLAG_APME_IN_WUC (1 << 15) -#define FLAG_APME_IN_CTRL3 (1 << 16) -#define FLAG_APME_CHECK_PORT_B (1 << 17) -#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) -#define FLAG_NO_WAKE_UCAST (1 << 19) -#define FLAG_MNG_PT_ENABLED (1 << 20) -#define FLAG_RESET_OVERWRITES_LAA (1 << 21) -#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) -#define FLAG_TARC_SET_BIT_ZERO (1 << 23) -#define FLAG_RX_NEEDS_RESTART (1 << 24) -#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) -#define FLAG_SMART_POWER_DOWN (1 << 26) -#define FLAG_MSI_ENABLED (1 << 27) -/* reserved (1 << 28) */ -#define FLAG_TSO_FORCE (1 << 29) -#define FLAG_RESTART_NOW (1 << 30) -#define FLAG_MSI_TEST_FAILED (1 << 31) - -#define FLAG2_CRC_STRIPPING (1 << 0) -#define FLAG2_HAS_PHY_WAKEUP (1 << 1) -#define FLAG2_IS_DISCARDING (1 << 2) -#define FLAG2_DISABLE_ASPM_L1 (1 << 3) -#define FLAG2_HAS_PHY_STATS (1 << 4) -#define FLAG2_HAS_EEE (1 << 5) -#define FLAG2_DMA_BURST (1 << 6) -#define FLAG2_DISABLE_ASPM_L0S (1 << 7) -#define FLAG2_DISABLE_AIM (1 << 8) -#define FLAG2_CHECK_PHY_HANG (1 << 9) -#define FLAG2_NO_DISABLE_RX (1 << 10) -#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) -#define FLAG2_DFLT_CRC_STRIPPING (1 << 12) -#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13) +#define FLAG_HAS_AMT BIT(0) +#define FLAG_HAS_FLASH BIT(1) +#define FLAG_HAS_HW_VLAN_FILTER BIT(2) +#define FLAG_HAS_WOL BIT(3) +/* reserved BIT(4) */ +#define FLAG_HAS_CTRLEXT_ON_LOAD BIT(5) +#define FLAG_HAS_SWSM_ON_LOAD BIT(6) +#define FLAG_HAS_JUMBO_FRAMES BIT(7) +#define FLAG_READ_ONLY_NVM BIT(8) +#define FLAG_IS_ICH BIT(9) +#define FLAG_HAS_MSIX BIT(10) +#define FLAG_HAS_SMART_POWER_DOWN BIT(11) +#define FLAG_IS_QUAD_PORT_A BIT(12) +#define FLAG_IS_QUAD_PORT BIT(13) +#define FLAG_HAS_HW_TIMESTAMP BIT(14) +#define FLAG_APME_IN_WUC BIT(15) +#define FLAG_APME_IN_CTRL3 BIT(16) +#define FLAG_APME_CHECK_PORT_B BIT(17) +#define FLAG_DISABLE_FC_PAUSE_TIME BIT(18) +#define FLAG_NO_WAKE_UCAST BIT(19) +#define FLAG_MNG_PT_ENABLED BIT(20) +#define FLAG_RESET_OVERWRITES_LAA BIT(21) +#define FLAG_TARC_SPEED_MODE_BIT BIT(22) +#define FLAG_TARC_SET_BIT_ZERO BIT(23) +#define FLAG_RX_NEEDS_RESTART BIT(24) +#define FLAG_LSC_GIG_SPEED_DROP BIT(25) +#define FLAG_SMART_POWER_DOWN BIT(26) +#define FLAG_MSI_ENABLED BIT(27) +/* reserved BIT(28) */ +#define FLAG_TSO_FORCE BIT(29) +#define FLAG_RESTART_NOW BIT(30) +#define FLAG_MSI_TEST_FAILED BIT(31) + +#define FLAG2_CRC_STRIPPING BIT(0) +#define FLAG2_HAS_PHY_WAKEUP BIT(1) +#define FLAG2_IS_DISCARDING BIT(2) +#define FLAG2_DISABLE_ASPM_L1 BIT(3) +#define FLAG2_HAS_PHY_STATS BIT(4) +#define FLAG2_HAS_EEE BIT(5) +#define FLAG2_DMA_BURST BIT(6) +#define FLAG2_DISABLE_ASPM_L0S BIT(7) +#define FLAG2_DISABLE_AIM BIT(8) +#define FLAG2_CHECK_PHY_HANG BIT(9) +#define FLAG2_NO_DISABLE_RX BIT(10) +#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11) +#define FLAG2_DFLT_CRC_STRIPPING BIT(12) +#define FLAG2_CHECK_RX_HWTSTAMP BIT(13) #define E1000_RX_DESC_PS(R, i) \ (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) @@ -480,6 +481,8 @@ extern const char e1000e_driver_version[]; void e1000e_check_options(struct e1000_adapter *adapter); void e1000e_set_ethtool_ops(struct net_device *netdev); +int e1000e_open(struct net_device *netdev); +int e1000e_close(struct net_device *netdev); void e1000e_up(struct e1000_adapter *adapter); void e1000e_down(struct e1000_adapter *adapter, bool reset); void e1000e_reinit_locked(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 6cab1f30d41e..7aff68a4a4df 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -201,6 +201,9 @@ static int e1000_get_settings(struct net_device *netdev, else ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + if (hw->phy.media_type != e1000_media_type_copper) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + return 0; } @@ -236,8 +239,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) mac->forced_speed_duplex = ADVERTISE_100_FULL; break; case SPEED_1000 + DUPLEX_FULL: - mac->autoneg = 1; - adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = + ADVERTISE_1000_FULL; + } else { + mac->forced_speed_duplex = ADVERTISE_1000_FULL; + } break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: @@ -439,8 +447,9 @@ static void e1000_get_regs(struct net_device *netdev, memset(p, 0, E1000_REGS_LEN * sizeof(u32)); - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | - adapter->pdev->device; + regs->version = (1u << 24) | + (adapter->pdev->revision << 16) | + adapter->pdev->device; regs_buff[0] = er32(CTRL); regs_buff[1] = er32(STATUS); @@ -895,7 +904,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch2lan: case e1000_pch_lpt: case e1000_pch_spt: - mask |= (1 << 18); + mask |= BIT(18); break; default: break; @@ -914,9 +923,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) /* SHRAH[9] different than the others */ if (i == 10) - mask |= (1 << 30); + mask |= BIT(30); else - mask &= ~(1 << 30); + mask &= ~BIT(30); } if (mac->type == e1000_pch2lan) { /* SHRAH[0,1,2] different than previous */ @@ -924,7 +933,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) mask &= 0xFFF4FFFF; /* SHRAH[3] different than SHRAH[0,1,2] */ if (i == 4) - mask |= (1 << 30); + mask |= BIT(30); /* RAR[1-6] owned by management engine - skipping */ if (i > 0) i += 6; @@ -1019,7 +1028,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) /* Test each interrupt */ for (i = 0; i < 10; i++) { /* Interrupt to test */ - mask = 1 << i; + mask = BIT(i); if (adapter->flags & FLAG_IS_ICH) { switch (mask) { @@ -1387,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) case e1000_phy_82579: /* Disable PHY energy detect power down */ e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); - e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3)); /* Disable full chip energy detect */ e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); @@ -1453,7 +1462,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) /* disable autoneg */ ctrl = er32(TXCW); - ctrl &= ~(1 << 31); + ctrl &= ~BIT(31); ew32(TXCW, ctrl); link = (er32(STATUS) & E1000_STATUS_LU); @@ -1816,7 +1825,7 @@ static void e1000_diag_test(struct net_device *netdev, if (if_running) /* indicate we're in test mode */ - dev_close(netdev); + e1000e_close(netdev); if (e1000_reg_test(adapter, &data[0])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1849,7 +1858,7 @@ static void e1000_diag_test(struct net_device *netdev, clear_bit(__E1000_TESTING, &adapter->state); if (if_running) - dev_open(netdev); + e1000e_open(netdev); } else { /* Online tests */ @@ -2283,19 +2292,19 @@ static int e1000e_get_ts_info(struct net_device *netdev, SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE); - info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); - - info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | - (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_ALL)); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + + info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) | + BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_ALL)); if (adapter->ptp_clock) info->phc_index = ptp_clock_index(adapter->ptp_clock); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index c0f4887ea44d..3e11322d8d58 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1048,7 +1048,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) while (value > PCI_LTR_VALUE_MASK) { scale++; - value = DIV_ROUND_UP(value, (1 << 5)); + value = DIV_ROUND_UP(value, BIT(5)); } if (scale > E1000_LTRV_SCALE_MAX) { e_dbg("Invalid LTR latency scale %d\n", scale); @@ -1573,7 +1573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) - phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); break; @@ -2044,9 +2044,9 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw) /* Restore SMBus frequency */ if (freq--) { phy_data &= ~HV_SMB_ADDR_FREQ_MASK; - phy_data |= (freq & (1 << 0)) << + phy_data |= (freq & BIT(0)) << HV_SMB_ADDR_FREQ_LOW_SHIFT; - phy_data |= (freq & (1 << 1)) << + phy_data |= (freq & BIT(1)) << (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); } else { e_dbg("Unsupported SMB frequency in PHY\n"); @@ -2530,7 +2530,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) /* disable Rx path while enabling/disabling workaround */ e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); - ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14)); if (ret_val) return ret_val; @@ -2561,7 +2561,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) /* Enable jumbo frame workaround in the MAC */ mac_reg = er32(FFLT_DBG); - mac_reg &= ~(1 << 14); + mac_reg &= ~BIT(14); mac_reg |= (7 << 15); ew32(FFLT_DBG, mac_reg); @@ -2576,7 +2576,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, - data | (1 << 0)); + data | BIT(0)); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, @@ -2600,7 +2600,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); - data &= ~(1 << 13); + data &= ~BIT(13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; @@ -2614,7 +2614,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); - ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10)); if (ret_val) return ret_val; } else { @@ -2634,7 +2634,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) return ret_val; ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, - data & ~(1 << 0)); + data & ~BIT(0)); if (ret_val) return ret_val; ret_val = e1000e_read_kmrn_reg(hw, @@ -2657,7 +2657,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; e1e_rphy(hw, PHY_REG(769, 16), &data); - data |= (1 << 13); + data |= BIT(13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) return ret_val; @@ -2671,13 +2671,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) if (ret_val) return ret_val; e1e_rphy(hw, HV_PM_CTRL, &data); - ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10)); if (ret_val) return ret_val; } /* re-enable Rx path after enabling/disabling workaround */ - return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); + return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14)); } /** @@ -4841,7 +4841,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) /* Extended Device Control */ reg = er32(CTRL_EXT); - reg |= (1 << 22); + reg |= BIT(22); /* Enable PHY low-power state when MAC is at D3 w/o WoL */ if (hw->mac.type >= e1000_pchlan) reg |= E1000_CTRL_EXT_PHYPDEN; @@ -4849,34 +4849,34 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) /* Transmit Descriptor Control 0 */ reg = er32(TXDCTL(0)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(0), reg); /* Transmit Descriptor Control 1 */ reg = er32(TXDCTL(1)); - reg |= (1 << 22); + reg |= BIT(22); ew32(TXDCTL(1), reg); /* Transmit Arbitration Control 0 */ reg = er32(TARC(0)); if (hw->mac.type == e1000_ich8lan) - reg |= (1 << 28) | (1 << 29); - reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + reg |= BIT(28) | BIT(29); + reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27); ew32(TARC(0), reg); /* Transmit Arbitration Control 1 */ reg = er32(TARC(1)); if (er32(TCTL) & E1000_TCTL_MULR) - reg &= ~(1 << 28); + reg &= ~BIT(28); else - reg |= (1 << 28); - reg |= (1 << 24) | (1 << 26) | (1 << 30); + reg |= BIT(28); + reg |= BIT(24) | BIT(26) | BIT(30); ew32(TARC(1), reg); /* Device Status */ if (hw->mac.type == e1000_ich8lan) { reg = er32(STATUS); - reg &= ~(1 << 31); + reg &= ~BIT(31); ew32(STATUS, reg); } diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 2311f6003f58..67163ca898ba 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -73,10 +73,10 @@ (ID_LED_OFF1_ON2 << 4) | \ (ID_LED_DEF1_DEF2)) -#define E1000_ICH_NVM_SIG_WORD 0x13 -#define E1000_ICH_NVM_SIG_MASK 0xC000 -#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 -#define E1000_ICH_NVM_SIG_VALUE 0x80 +#define E1000_ICH_NVM_SIG_WORD 0x13u +#define E1000_ICH_NVM_SIG_MASK 0xC000u +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0u +#define E1000_ICH_NVM_SIG_VALUE 0x80u #define E1000_ICH8_LAN_INIT_TIMEOUT 1500 diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index e59d7c283cd4..b322011ec282 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -346,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); hash_bit = hash_value & 0x1F; - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); mc_addr_list += (ETH_ALEN); } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 9b4ec13d9161..75e60897b7e7 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -242,7 +242,7 @@ static void e1000e_dump(struct e1000_adapter *adapter) dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state trans_start last_rx\n"); pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, netdev->trans_start, netdev->last_rx); + netdev->state, dev_trans_start(netdev), netdev->last_rx); } /* Print Registers */ @@ -317,8 +317,8 @@ static void e1000e_dump(struct e1000_adapter *adapter) else next_desc = ""; pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", - (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : - ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), + (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' : + ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')), i, (unsigned long long)le64_to_cpu(u0->a), (unsigned long long)le64_to_cpu(u0->b), @@ -2018,7 +2018,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter) adapter->eiac_mask |= E1000_IMS_OTHER; /* Cause Tx interrupts on every write back */ - ivar |= (1 << 31); + ivar |= BIT(31); ew32(IVAR, ivar); @@ -2709,7 +2709,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev, if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); - vfta |= (1 << (vid & 0x1F)); + vfta |= BIT((vid & 0x1F)); hw->mac.ops.write_vfta(hw, index, vfta); } @@ -2737,7 +2737,7 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev, if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { index = (vid >> 5) & 0x7F; vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); - vfta &= ~(1 << (vid & 0x1F)); + vfta &= ~BIT((vid & 0x1F)); hw->mac.ops.write_vfta(hw, index, vfta); } @@ -2878,7 +2878,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) /* Enable this decision filter in MANC2H */ if (mdef) - manc2h |= (1 << i); + manc2h |= BIT(i); j |= mdef; } @@ -2891,7 +2891,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter) if (er32(MDEF(i)) == 0) { ew32(MDEF(i), (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)); - manc2h |= (1 << 1); + manc2h |= BIT(1); j++; break; } @@ -2971,7 +2971,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) /* set the speed mode bit, we'll clear it if we're not at * gigabit link later */ -#define SPEED_MODE_BIT (1 << 21) +#define SPEED_MODE_BIT BIT(21) tarc |= SPEED_MODE_BIT; ew32(TARC(0), tarc); } @@ -3071,12 +3071,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) e1e_rphy(hw, PHY_REG(770, 26), &phy_data); phy_data &= 0xfff8; - phy_data |= (1 << 2); + phy_data |= BIT(2); e1e_wphy(hw, PHY_REG(770, 26), phy_data); e1e_rphy(hw, 22, &phy_data); phy_data &= 0x0fff; - phy_data |= (1 << 14); + phy_data |= BIT(14); e1e_wphy(hw, 0x10, 0x2823); e1e_wphy(hw, 0x11, 0x0003); e1e_wphy(hw, 22, phy_data); @@ -3368,12 +3368,12 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev) * combining */ netdev_for_each_uc_addr(ha, netdev) { - int rval; + int ret_val; if (!rar_entries) break; - rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); - if (rval < 0) + ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); + if (ret_val < 0) return -ENOMEM; count++; } @@ -3503,8 +3503,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { u32 fextnvm7 = er32(FEXTNVM7); - if (!(fextnvm7 & (1 << 0))) { - ew32(FEXTNVM7, fextnvm7 | (1 << 0)); + if (!(fextnvm7 & BIT(0))) { + ew32(FEXTNVM7, fextnvm7 | BIT(0)); e1e_flush(); } } @@ -3580,7 +3580,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, bool is_l4 = false; bool is_l2 = false; u32 regval; - s32 ret_val; if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) return -EINVAL; @@ -3719,16 +3718,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, er32(RXSTMPH); er32(TXSTMPH); - /* Get and set the System Time Register SYSTIM base frequency */ - ret_val = e1000e_get_base_timinca(adapter, ®val); - if (ret_val) - return ret_val; - ew32(TIMINCA, regval); - - /* reset the ns time counter */ - timecounter_init(&adapter->tc, &adapter->cc, - ktime_to_ns(ktime_get_real())); - return 0; } @@ -3839,7 +3828,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter) /* update thresholds: prefetch threshold to 31, host threshold to 1 * and make sure the granularity is "descriptors" and not "cache lines" */ - rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC); ew32(RXDCTL(0), rxdctl); /* momentarily enable the RX ring for the changes to take effect */ @@ -3885,6 +3874,53 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter) } /** + * e1000e_systim_reset - reset the timesync registers after a hardware reset + * @adapter: board private structure + * + * When the MAC is reset, all hardware bits for timesync will be reset to the + * default values. This function will restore the settings last in place. + * Since the clock SYSTIME registers are reset, we will simply restore the + * cyclecounter to the kernel real clock time. + **/ +static void e1000e_systim_reset(struct e1000_adapter *adapter) +{ + struct ptp_clock_info *info = &adapter->ptp_clock_info; + struct e1000_hw *hw = &adapter->hw; + unsigned long flags; + u32 timinca; + s32 ret_val; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + if (info->adjfreq) { + /* restore the previous ptp frequency delta */ + ret_val = info->adjfreq(info, adapter->ptp_delta); + } else { + /* set the default base frequency if no adjustment possible */ + ret_val = e1000e_get_base_timinca(adapter, &timinca); + if (!ret_val) + ew32(TIMINCA, timinca); + } + + if (ret_val) { + dev_warn(&adapter->pdev->dev, + "Failed to restore TIMINCA clock rate delta: %d\n", + ret_val); + return; + } + + /* reset the systim ns time counter */ + spin_lock_irqsave(&adapter->systim_lock, flags); + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + /* restore the previous hwtstamp configuration settings */ + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); +} + +/** * e1000e_reset - bring the hardware into a known good state * * This function boots the hardware and enables some settings that @@ -4063,8 +4099,8 @@ void e1000e_reset(struct e1000_adapter *adapter) e1000e_reset_adaptive(hw); - /* initialize systim and reset the ns time counter */ - e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); + /* restore systim and hwtstamp settings */ + e1000e_systim_reset(adapter); /* Set EEE advertisement as appropriate */ if (adapter->flags2 & FLAG2_HAS_EEE) { @@ -4275,7 +4311,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, cc); struct e1000_hw *hw = &adapter->hw; - u32 systimel_1, systimel_2, systimeh; + u32 systimel, systimeh; cycle_t systim, systim_next; /* SYSTIMH latching upon SYSTIML read does not work well. * This means that if SYSTIML overflows after we read it but before @@ -4283,24 +4319,25 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) * will experience a huge non linear increment in the systime value * to fix that we test for overflow and if true, we re-read systime. */ - systimel_1 = er32(SYSTIML); + systimel = er32(SYSTIML); systimeh = er32(SYSTIMH); - systimel_2 = er32(SYSTIML); - /* Check for overflow. If there was no overflow, use the values */ - if (systimel_1 < systimel_2) { - systim = (cycle_t)systimel_1; - systim |= (cycle_t)systimeh << 32; - } else { - /* There was an overflow, read again SYSTIMH, and use - * systimel_2 - */ - systimeh = er32(SYSTIMH); - systim = (cycle_t)systimel_2; - systim |= (cycle_t)systimeh << 32; + /* Is systimel is so large that overflow is possible? */ + if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { + u32 systimel_2 = er32(SYSTIML); + if (systimel > systimel_2) { + /* There was an overflow, read again SYSTIMH, and use + * systimel_2 + */ + systimeh = er32(SYSTIMH); + systimel = systimel_2; + } } + systim = (cycle_t)systimel; + systim |= (cycle_t)systimeh << 32; if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { - u64 incvalue, time_delta, rem, temp; + u64 time_delta, rem, temp; + u32 incvalue; int i; /* errata for 82574/82583 possible bad bits read from SYSTIMH/L @@ -4495,7 +4532,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter) } /** - * e1000_open - Called when a network interface is made active + * e1000e_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure @@ -4506,7 +4543,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter) * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. **/ -static int e1000_open(struct net_device *netdev) +int e1000e_open(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; @@ -4604,7 +4641,7 @@ err_setup_tx: } /** - * e1000_close - Disables a network interface + * e1000e_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail @@ -4614,7 +4651,7 @@ err_setup_tx: * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. **/ -static int e1000_close(struct net_device *netdev) +int e1000e_close(struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = adapter->pdev; @@ -6861,7 +6898,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter) ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); le16_to_cpus(&buf); - if (!ret_val && (!(buf & (1 << 0)))) { + if (!ret_val && (!(buf & BIT(0)))) { /* Deep Smart Power Down (DSPD) */ dev_warn(&adapter->pdev->dev, "Warning: detected DSPD enabled in EEPROM\n"); @@ -6920,8 +6957,8 @@ static int e1000_set_features(struct net_device *netdev, } static const struct net_device_ops e1000e_netdev_ops = { - .ndo_open = e1000_open, - .ndo_stop = e1000_close, + .ndo_open = e1000e_open, + .ndo_stop = e1000e_close, .ndo_start_xmit = e1000_xmit_frame, .ndo_get_stats64 = e1000e_get_stats64, .ndo_set_rx_mode = e1000e_set_rx_mode, @@ -6965,7 +7002,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int bars, i, err, pci_using_dac; u16 eeprom_data = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; - s32 rval = 0; + s32 ret_val = 0; if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) aspm_disable_flag = PCIE_LINK_STATE_L0S; @@ -7200,18 +7237,18 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else if (adapter->flags & FLAG_APME_IN_CTRL3) { if (adapter->flags & FLAG_APME_CHECK_PORT_B && (adapter->hw.bus.func == 1)) - rval = e1000_read_nvm(&adapter->hw, + ret_val = e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); else - rval = e1000_read_nvm(&adapter->hw, + ret_val = e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); } /* fetch WoL from EEPROM */ - if (rval) - e_dbg("NVM read error getting WoL initial values: %d\n", rval); + if (ret_val) + e_dbg("NVM read error getting WoL initial values: %d\n", ret_val); else if (eeprom_data & eeprom_apme_mask) adapter->eeprom_wol |= E1000_WUFC_MAG; @@ -7231,13 +7268,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) device_wakeup_enable(&pdev->dev); /* save off EEPROM version number */ - rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); + ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); - if (rval) { - e_dbg("NVM read error getting EEPROM version: %d\n", rval); + if (ret_val) { + e_dbg("NVM read error getting EEPROM version: %d\n", ret_val); adapter->eeprom_vers = 0; } + /* init PTP hardware clock */ + e1000e_ptp_init(adapter); + /* reset the hardware with the new settings */ e1000e_reset(adapter); @@ -7256,9 +7296,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - /* init PTP hardware clock */ - e1000e_ptp_init(adapter); - e1000_print_device_info(adapter); if (pci_dev_run_wake(pdev)) diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index 49f205c023bf..2efd80dfd88e 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -67,7 +67,7 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) u32 eecd = er32(EECD); u32 mask; - mask = 0x01 << (count - 1); + mask = BIT(count - 1); if (nvm->type == e1000_nvm_eeprom_spi) eecd |= E1000_EECD_DO; diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index de13aeacae97..d78d47b41a71 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -2894,11 +2894,11 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, if ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision >= 1) && (hw->phy.addr == 2) && - !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { + !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) { u16 data2 = 0x7EFF; ret_val = e1000_access_phy_debug_regs_hv(hw, - (1 << 6) | 0x3, + BIT(6) | 0x3, &data2, false); if (ret_val) goto out; diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index 55bfe473514d..3027f63ee793 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -104,9 +104,9 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw); #define BM_WUC_DATA_OPCODE 0x12 #define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE #define BM_WUC_ENABLE_REG 17 -#define BM_WUC_ENABLE_BIT (1 << 2) -#define BM_WUC_HOST_WU_BIT (1 << 4) -#define BM_WUC_ME_WU_BIT (1 << 5) +#define BM_WUC_ENABLE_BIT BIT(2) +#define BM_WUC_HOST_WU_BIT BIT(4) +#define BM_WUC_ME_WU_BIT BIT(5) #define PHY_UPPER_SHIFT 21 #define BM_PHY_REG(page, reg) \ @@ -124,8 +124,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw); #define I82578_ADDR_REG 29 #define I82577_ADDR_REG 16 #define I82577_CFG_REG 22 -#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift */ #define I82577_CTRL_REG 23 /* 82577 specific PHY registers */ diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index e2ff3ef75d5d..2e1b17ad52a3 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -79,6 +79,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) ew32(TIMINCA, timinca); + adapter->ptp_delta = delta; + spin_unlock_irqrestore(&adapter->systim_lock, flags); return 0; diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile index b006ff66d028..cac645329cea 100644 --- a/drivers/net/ethernet/intel/fm10k/Makefile +++ b/drivers/net/ethernet/intel/fm10k/Makefile @@ -1,7 +1,7 @@ ################################################################################ # -# Intel Ethernet Switch Host Interface Driver -# Copyright(c) 2013 - 2015 Intel Corporation. +# Intel(R) Ethernet Switch Host Interface Driver +# Copyright(c) 2013 - 2016 Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, @@ -22,7 +22,7 @@ ################################################################################ # -# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver +# Makefile for the Intel(R) Ethernet Switch Host Interface Driver # obj-$(CONFIG_FM10K) += fm10k.o @@ -30,7 +30,6 @@ obj-$(CONFIG_FM10K) += fm10k.o fm10k-y := fm10k_main.o \ fm10k_common.o \ fm10k_pci.o \ - fm10k_ptp.o \ fm10k_netdev.o \ fm10k_ethtool.o \ fm10k_pf.o \ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index b34bb008b104..fcf106e545c5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -27,9 +27,6 @@ #include <linux/rtnetlink.h> #include <linux/if_vlan.h> #include <linux/pci.h> -#include <linux/net_tstamp.h> -#include <linux/clocksource.h> -#include <linux/ptp_clock_kernel.h> #include "fm10k_pf.h" #include "fm10k_vf.h" @@ -262,12 +259,12 @@ struct fm10k_intfc { unsigned long state; u32 flags; -#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0) -#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1) -#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2) -#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3) -#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4) -#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5) +#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0)) +#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1)) +#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2)) +#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3)) +#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4)) +#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5)) int xcast_mode; /* Tx fast path data */ @@ -333,6 +330,7 @@ struct fm10k_intfc { unsigned long last_reset; unsigned long link_down_event; bool host_ready; + bool lport_map_failed; u32 reta[FM10K_RETA_SIZE]; u32 rssrk[FM10K_RSSRK_SIZE]; @@ -342,22 +340,8 @@ struct fm10k_intfc { #ifdef CONFIG_DEBUG_FS struct dentry *dbg_intfc; - #endif /* CONFIG_DEBUG_FS */ - struct ptp_clock_info ptp_caps; - struct ptp_clock *ptp_clock; - - struct sk_buff_head ts_tx_skb_queue; - u32 tx_hwtstamp_timeouts; - struct hwtstamp_config ts_config; - /* We are unable to actually adjust the clock beyond the frequency - * value. Once the clock is started there is no resetting it. As - * such we maintain a separate offset from the actual hardware clock - * to allow for offset adjustment. - */ - s64 ptp_adjust; - rwlock_t systime_lock; #ifdef CONFIG_DCB u8 pfc_en; #endif @@ -510,6 +494,8 @@ int fm10k_close(struct net_device *netdev); /* Ethtool */ void fm10k_set_ethtool_ops(struct net_device *dev); +u32 fm10k_get_reta_size(struct net_device *netdev); +void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir); /* IOV */ s32 fm10k_iov_event(struct fm10k_intfc *interface); @@ -544,21 +530,6 @@ static inline void fm10k_dbg_init(void) {} static inline void fm10k_dbg_exit(void) {} #endif /* CONFIG_DEBUG_FS */ -/* Time Stamping */ -void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, - struct skb_shared_hwtstamps *hwtstamp, - u64 systime); -void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb); -void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, - u64 systime); -void fm10k_ts_reset(struct fm10k_intfc *interface); -void fm10k_ts_init(struct fm10k_intfc *interface); -void fm10k_ts_tx_subtask(struct fm10k_intfc *interface); -void fm10k_ptp_register(struct fm10k_intfc *interface); -void fm10k_ptp_unregister(struct fm10k_intfc *interface); -int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr); -int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr); - /* DCB */ #ifdef CONFIG_DCB void fm10k_dcbnl_set_ops(struct net_device *dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index 6cfae6ac04ea..5bbf19cfe29b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h index 45e4e5b1f20a..50f71e997448 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c index 2be4361839db..db4bd8bf9722 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 5d6137faf7d1..5116fd043630 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2f6a05b57228..9c0d87503977 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -77,19 +77,6 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = { FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail), FM10K_STAT("tx_hang_count", tx_timeout_count), - - FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), -}; - -static const struct fm10k_stats fm10k_gstrings_debug_stats[] = { - FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full), - FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good), - FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good), - FM10K_STAT("rx_switch_errors", rx_switch_errors), - FM10K_STAT("rx_drops", rx_drops), - FM10K_STAT("rx_pp_errors", rx_pp_errors), - FM10K_STAT("rx_link_errors", rx_link_errors), - FM10K_STAT("rx_length_errors", rx_length_errors), }; static const struct fm10k_stats fm10k_gstrings_pf_stats[] = { @@ -121,13 +108,21 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = { FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed), }; +#define FM10K_QUEUE_STAT(_name, _stat) { \ + .stat_string = _name, \ + .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \ + .stat_offset = offsetof(struct fm10k_ring, _stat) \ +} + +static const struct fm10k_stats fm10k_gstrings_queue_stats[] = { + FM10K_QUEUE_STAT("packets", stats.packets), + FM10K_QUEUE_STAT("bytes", stats.bytes), +}; + #define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats) -#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats) #define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats) #define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats) - -#define FM10K_QUEUE_STATS_LEN(_n) \ - ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64))) +#define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats) #define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \ FM10K_NETDEV_STATS_LEN + \ @@ -145,77 +140,56 @@ enum fm10k_self_test_types { }; enum { - FM10K_PRV_FLAG_DEBUG_STATS, FM10K_PRV_FLAG_LEN, }; static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = { - "debug-statistics", }; +static void fm10k_add_stat_strings(char **p, const char *prefix, + const struct fm10k_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + snprintf(*p, ETH_GSTRING_LEN, "%s%s", + prefix, stats[i].stat_string); + *p += ETH_GSTRING_LEN; + } +} + static void fm10k_get_stat_strings(struct net_device *dev, u8 *data) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_iov_data *iov_data = interface->iov_data; char *p = (char *)data; unsigned int i; - unsigned int j; - for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_net_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats, + FM10K_NETDEV_STATS_LEN); - for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_global_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats, + FM10K_GLOBAL_STATS_LEN); - if (interface->flags & FM10K_FLAG_DEBUG_STATS) { - for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_debug_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - } + fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats, + FM10K_MBX_STATS_LEN); - for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + if (interface->hw.mac.type != fm10k_mac_vf) + fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats, + FM10K_PF_STATS_LEN); - if (interface->hw.mac.type != fm10k_mac_vf) { - for (i = 0; i < FM10K_PF_STATS_LEN; i++) { - memcpy(p, fm10k_gstrings_pf_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - } + for (i = 0; i < interface->hw.mac.max_queues; i++) { + char prefix[ETH_GSTRING_LEN]; - if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { - for (i = 0; i < iov_data->num_vfs; i++) { - for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { - snprintf(p, - ETH_GSTRING_LEN, - "vf_%u_%s", i, - fm10k_gstrings_mbx_stats[j].stat_string); - p += ETH_GSTRING_LEN; - } - } - } + snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i); + fm10k_add_stat_strings(&p, prefix, + fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN); - for (i = 0; i < interface->hw.mac.max_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i); - p += ETH_GSTRING_LEN; + snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i); + fm10k_add_stat_strings(&p, prefix, + fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN); } } @@ -242,7 +216,6 @@ static void fm10k_get_strings(struct net_device *dev, static int fm10k_get_sset_count(struct net_device *dev, int sset) { struct fm10k_intfc *interface = netdev_priv(dev); - struct fm10k_iov_data *iov_data = interface->iov_data; struct fm10k_hw *hw = &interface->hw; int stats_len = FM10K_STATIC_STATS_LEN; @@ -250,19 +223,11 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) case ETH_SS_TEST: return FM10K_TEST_LEN; case ETH_SS_STATS: - stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues); + stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN; if (hw->mac.type != fm10k_mac_vf) stats_len += FM10K_PF_STATS_LEN; - if (interface->flags & FM10K_FLAG_DEBUG_STATS) { - stats_len += FM10K_DEBUG_STATS_LEN; - - if (iov_data) - stats_len += FM10K_MBX_STATS_LEN * - iov_data->num_vfs; - } - return stats_len; case ETH_SS_PRIV_FLAGS: return FM10K_PRV_FLAG_LEN; @@ -271,93 +236,80 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset) } } -static void fm10k_get_ethtool_stats(struct net_device *netdev, - struct ethtool_stats __always_unused *stats, - u64 *data) +static void fm10k_add_ethtool_stats(u64 **data, void *pointer, + const struct fm10k_stats stats[], + const unsigned int size) { - const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64); - struct fm10k_intfc *interface = netdev_priv(netdev); - struct fm10k_iov_data *iov_data = interface->iov_data; - struct net_device_stats *net_stats = &netdev->stats; + unsigned int i; char *p; - int i, j; - fm10k_update_stats(interface); - - for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) { - p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + if (!pointer) { + /* memory is not zero allocated so we have to clear it */ + for (i = 0; i < size; i++) + *((*data)++) = 0; + return; } - for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) { - p = (char *)interface + - fm10k_gstrings_global_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } + for (i = 0; i < size; i++) { + p = (char *)pointer + stats[i].stat_offset; - if (interface->flags & FM10K_FLAG_DEBUG_STATS) { - for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) { - p = (char *)interface + - fm10k_gstrings_debug_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + switch (stats[i].sizeof_stat) { + case sizeof(u64): + *((*data)++) = *(u64 *)p; + break; + case sizeof(u32): + *((*data)++) = *(u32 *)p; + break; + case sizeof(u16): + *((*data)++) = *(u16 *)p; + break; + case sizeof(u8): + *((*data)++) = *(u8 *)p; + break; + default: + *((*data)++) = 0; } } +} - for (i = 0; i < FM10K_MBX_STATS_LEN; i++) { - p = (char *)&interface->hw.mbx + - fm10k_gstrings_mbx_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } +static void fm10k_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct fm10k_intfc *interface = netdev_priv(netdev); + struct net_device_stats *net_stats = &netdev->stats; + int i; - if (interface->hw.mac.type != fm10k_mac_vf) { - for (i = 0; i < FM10K_PF_STATS_LEN; i++) { - p = (char *)interface + - fm10k_gstrings_pf_stats[i].stat_offset; - *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - } + fm10k_update_stats(interface); - if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) { - for (i = 0; i < iov_data->num_vfs; i++) { - struct fm10k_vf_info *vf_info; + fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats, + FM10K_NETDEV_STATS_LEN); - vf_info = &iov_data->vf_info[i]; + fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats, + FM10K_GLOBAL_STATS_LEN); - /* skip stats if we don't have a vf info */ - if (!vf_info) { - data += FM10K_MBX_STATS_LEN; - continue; - } + fm10k_add_ethtool_stats(&data, &interface->hw.mbx, + fm10k_gstrings_mbx_stats, + FM10K_MBX_STATS_LEN); - for (j = 0; j < FM10K_MBX_STATS_LEN; j++) { - p = (char *)&vf_info->mbx + - fm10k_gstrings_mbx_stats[j].stat_offset; - *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat == - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; - } - } + if (interface->hw.mac.type != fm10k_mac_vf) { + fm10k_add_ethtool_stats(&data, interface, + fm10k_gstrings_pf_stats, + FM10K_PF_STATS_LEN); } for (i = 0; i < interface->hw.mac.max_queues; i++) { struct fm10k_ring *ring; - u64 *queue_stat; ring = interface->tx_ring[i]; - if (ring) - queue_stat = (u64 *)&ring->stats; - for (j = 0; j < stat_count; j++) - *(data++) = ring ? queue_stat[j] : 0; + fm10k_add_ethtool_stats(&data, ring, + fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN); ring = interface->rx_ring[i]; - if (ring) - queue_stat = (u64 *)&ring->stats; - for (j = 0; j < stat_count; j++) - *(data++) = ring ? queue_stat[j] : 0; + fm10k_add_ethtool_stats(&data, ring, + fm10k_gstrings_queue_stats, + FM10K_QUEUE_STATS_LEN); } } @@ -425,7 +377,7 @@ static void fm10k_get_regs(struct net_device *netdev, u32 *buff = p; u16 i; - regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id; switch (hw->mac.type) { case fm10k_mac_pf: @@ -935,15 +887,15 @@ static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data) struct fm10k_mbx_info *mbx = &hw->mbx; u32 attr_flag, test_msg[6]; unsigned long timeout; - int err; + int err = -EINVAL; /* For now this is a VF only feature */ if (hw->mac.type != fm10k_mac_vf) return 0; /* loop through both nested and unnested attribute types */ - for (attr_flag = (1 << FM10K_TEST_MSG_UNSET); - attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED)); + for (attr_flag = BIT(FM10K_TEST_MSG_UNSET); + attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED); attr_flag += attr_flag) { /* generate message to be tested */ fm10k_tlv_msg_test_create(test_msg, attr_flag); @@ -1001,35 +953,56 @@ static void fm10k_self_test(struct net_device *dev, static u32 fm10k_get_priv_flags(struct net_device *netdev) { - struct fm10k_intfc *interface = netdev_priv(netdev); - u32 priv_flags = 0; - - if (interface->flags & FM10K_FLAG_DEBUG_STATS) - priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS; - - return priv_flags; + return 0; } static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags) { - struct fm10k_intfc *interface = netdev_priv(netdev); - - if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN)) + if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN)) return -EINVAL; - if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS)) - interface->flags |= FM10K_FLAG_DEBUG_STATS; - else - interface->flags &= ~FM10K_FLAG_DEBUG_STATS; - return 0; } -static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) +u32 fm10k_get_reta_size(struct net_device __always_unused *netdev) { return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG; } +void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir) +{ + u16 rss_i = interface->ring_feature[RING_F_RSS].indices; + struct fm10k_hw *hw = &interface->hw; + u32 table[4]; + int i, j; + + /* record entries to reta table */ + for (i = 0; i < FM10K_RETA_SIZE; i++) { + u32 reta, n; + + /* generate a new table if we weren't given one */ + for (j = 0; j < 4; j++) { + if (indir) + n = indir[i + j]; + else + n = ethtool_rxfh_indir_default(i + j, rss_i); + + table[j] = n; + } + + reta = table[0] | + (table[1] << 8) | + (table[2] << 16) | + (table[3] << 24); + + if (interface->reta[i] == reta) + continue; + + interface->reta[i] = reta; + fm10k_write_reg(hw, FM10K_RETA(0, i), reta); + } +} + static int fm10k_get_reta(struct net_device *netdev, u32 *indir) { struct fm10k_intfc *interface = netdev_priv(netdev); @@ -1053,7 +1026,6 @@ static int fm10k_get_reta(struct net_device *netdev, u32 *indir) static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) { struct fm10k_intfc *interface = netdev_priv(netdev); - struct fm10k_hw *hw = &interface->hw; int i; u16 rss_i; @@ -1068,19 +1040,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir) return -EINVAL; } - /* record entries to reta table */ - for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) { - u32 reta = indir[0] | - (indir[1] << 8) | - (indir[2] << 16) | - (indir[3] << 24); - - if (interface->reta[i] == reta) - continue; - - interface->reta[i] = reta; - fm10k_write_reg(hw, FM10K_RETA(0, i), reta); - } + fm10k_write_reta(interface, indir); return 0; } @@ -1145,7 +1105,7 @@ static unsigned int fm10k_max_channels(struct net_device *dev) /* For QoS report channels per traffic class */ if (tcs > 1) - max_combined = 1 << (fls(max_combined / tcs) - 1); + max_combined = BIT((fls(max_combined / tcs) - 1)); return max_combined; } @@ -1192,33 +1152,6 @@ static int fm10k_set_channels(struct net_device *dev, return fm10k_setup_tc(dev, netdev_get_num_tc(dev)); } -static int fm10k_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - struct fm10k_intfc *interface = netdev_priv(dev); - - info->so_timestamping = - SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - - if (interface->ptp_clock) - info->phc_index = ptp_clock_index(interface->ptp_clock); - else - info->phc_index = -1; - - info->tx_types = (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); - - info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_ALL); - - return 0; -} - static const struct ethtool_ops fm10k_ethtool_ops = { .get_strings = fm10k_get_strings, .get_sset_count = fm10k_get_sset_count, @@ -1246,7 +1179,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = { .set_rxfh = fm10k_set_rssh, .get_channels = fm10k_get_channels, .set_channels = fm10k_set_channels, - .get_ts_info = fm10k_get_ts_info, }; void fm10k_set_ethtool_ops(struct net_device *dev) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index acfb8b1f88a7..47f0743ec03b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -50,7 +50,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) s64 vflre; int i; - /* if there is no iov_data then there is no mailboxes to process */ + /* if there is no iov_data then there is no mailbox to process */ if (!ACCESS_ONCE(interface->iov_data)) return 0; @@ -98,7 +98,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface) struct fm10k_iov_data *iov_data; int i; - /* if there is no iov_data then there is no mailboxes to process */ + /* if there is no iov_data then there is no mailbox to process */ if (!ACCESS_ONCE(interface->iov_data)) return 0; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 4de17db3808c..0e166e9c90c8 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,15 +29,15 @@ #include "fm10k.h" #define DRV_VERSION "0.19.3-k" +#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; -static const char fm10k_driver_string[] = - "Intel(R) Ethernet Switch Host Interface Driver"; +static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = - "Copyright (c) 2013 Intel Corporation."; + "Copyright (c) 2013 - 2016 Intel Corporation."; MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); -MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver"); +MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); @@ -401,10 +401,10 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring, } #define FM10K_RSS_L4_TYPES_MASK \ - ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \ - (1ul << FM10K_RSSTYPE_IPV4_UDP) | \ - (1ul << FM10K_RSSTYPE_IPV6_TCP) | \ - (1ul << FM10K_RSSTYPE_IPV6_UDP)) + (BIT(FM10K_RSSTYPE_IPV4_TCP) | \ + BIT(FM10K_RSSTYPE_IPV4_UDP) | \ + BIT(FM10K_RSSTYPE_IPV6_TCP) | \ + BIT(FM10K_RSSTYPE_IPV6_UDP)) static inline void fm10k_rx_hash(struct fm10k_ring *ring, union fm10k_rx_desc *rx_desc, @@ -420,23 +420,10 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring, return; skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss), - (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? + (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ? PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); } -static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring, - union fm10k_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct fm10k_intfc *interface = rx_ring->q_vector->interface; - - FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; - - if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED)) - fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb), - le64_to_cpu(rx_desc->q.timestamp)); -} - static void fm10k_type_trans(struct fm10k_ring *rx_ring, union fm10k_rx_desc __maybe_unused *rx_desc, struct sk_buff *skb) @@ -486,8 +473,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring, fm10k_rx_checksum(rx_ring, rx_desc, skb); - fm10k_rx_hwtstamp(rx_ring, rx_desc, skb); - FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan; skb_record_rx_queue(skb, rx_ring->queue_index); @@ -835,6 +820,8 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, struct ipv6hdr *ipv6; u8 *raw; } network_hdr; + u8 *transport_hdr; + __be16 frag_off; __be16 protocol; u8 l4_hdr = 0; @@ -852,9 +839,11 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, goto no_csum; } network_hdr.raw = skb_inner_network_header(skb); + transport_hdr = skb_inner_transport_header(skb); } else { protocol = vlan_get_protocol(skb); network_hdr.raw = skb_network_header(skb); + transport_hdr = skb_transport_header(skb); } switch (protocol) { @@ -863,15 +852,17 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, break; case htons(ETH_P_IPV6): l4_hdr = network_hdr.ipv6->nexthdr; + if (likely((transport_hdr - network_hdr.raw) == + sizeof(struct ipv6hdr))) + break; + ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + + sizeof(struct ipv6hdr), + &l4_hdr, &frag_off); + if (unlikely(frag_off)) + l4_hdr = NEXTHDR_FRAGMENT; break; default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but ip version=%x!\n", - protocol); - } - tx_ring->tx_stats.csum_err++; - goto no_csum; + break; } switch (l4_hdr) { @@ -884,9 +875,10 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); + "partial checksum, version=%d l4 proto=%x\n", + protocol, l4_hdr); } + skb_checksum_help(skb); tx_ring->tx_stats.csum_err++; goto no_csum; } @@ -912,11 +904,6 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags) /* set type for advanced descriptor with frame checksum insertion */ u32 desc_flags = 0; - /* set timestamping bits */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) - desc_flags |= FM10K_TXD_FLAG_TIME; - /* set checksum offload bits */ desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM, FM10K_TXD_FLAG_CSUM); @@ -1198,9 +1185,10 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface) * fm10k_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, - struct fm10k_ring *tx_ring) + struct fm10k_ring *tx_ring, int napi_budget) { struct fm10k_intfc *interface = q_vector->interface; struct fm10k_tx_buffer *tx_buffer; @@ -1238,7 +1226,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1409,7 +1397,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container) * accounts for changes in the ITR due to PCIe link speed. */ itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8; - avg_wire_size += (1 << itr_round) - 1; + avg_wire_size += BIT(itr_round) - 1; avg_wire_size >>= itr_round; /* write back value and retain adaptive flag */ @@ -1449,8 +1437,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget) int per_ring_budget, work_done = 0; bool clean_complete = true; - fm10k_for_each_ring(ring, q_vector->tx) - clean_complete &= fm10k_clean_tx_irq(q_vector, ring); + fm10k_for_each_ring(ring, q_vector->tx) { + if (!fm10k_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } /* Handle case where we are called by netpoll with a budget of 0 */ if (budget <= 0) @@ -1468,7 +1458,8 @@ static int fm10k_poll(struct napi_struct *napi, int budget) int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += work; - clean_complete &= !!(work < per_ring_budget); + if (work >= per_ring_budget) + clean_complete = false; } /* If all work not completed, return budget and keep polling */ @@ -1511,17 +1502,17 @@ static bool fm10k_set_qos_queues(struct fm10k_intfc *interface) /* set QoS mask and indices */ f = &interface->ring_feature[RING_F_QOS]; f->indices = pcs; - f->mask = (1 << fls(pcs - 1)) - 1; + f->mask = BIT(fls(pcs - 1)) - 1; /* determine the upper limit for our current DCB mode */ rss_i = interface->hw.mac.max_queues / pcs; - rss_i = 1 << (fls(rss_i) - 1); + rss_i = BIT(fls(rss_i) - 1); /* set RSS mask and indices */ f = &interface->ring_feature[RING_F_RSS]; rss_i = min_t(u16, rss_i, f->limit); f->indices = rss_i; - f->mask = (1 << fls(rss_i - 1)) - 1; + f->mask = BIT(fls(rss_i - 1)) - 1; /* configure pause class to queue mapping */ for (i = 0; i < pcs; i++) @@ -1551,7 +1542,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) /* record indices and power of 2 mask for RSS */ f->indices = rss_i; - f->mask = (1 << fls(rss_i - 1)) - 1; + f->mask = BIT(fls(rss_i - 1)) - 1; interface->num_rx_queues = rss_i; interface->num_tx_queues = rss_i; @@ -1572,17 +1563,29 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface) **/ static void fm10k_set_num_queues(struct fm10k_intfc *interface) { - /* Start with base case */ - interface->num_rx_queues = 1; - interface->num_tx_queues = 1; - + /* Attempt to setup QoS and RSS first */ if (fm10k_set_qos_queues(interface)) return; + /* If we don't have QoS, just fallback to only RSS. */ fm10k_set_rss_queues(interface); } /** + * fm10k_reset_num_queues - Reset the number of queues to zero + * @interface: board private structure + * + * This function should be called whenever we need to reset the number of + * queues after an error condition. + */ +static void fm10k_reset_num_queues(struct fm10k_intfc *interface) +{ + interface->num_tx_queues = 0; + interface->num_rx_queues = 0; + interface->num_q_vectors = 0; +} + +/** * fm10k_alloc_q_vector - Allocate memory for a single interrupt vector * @interface: board private structure to initialize * @v_count: q_vectors allocated on interface, used for ring interleaving @@ -1765,9 +1768,7 @@ static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface) return 0; err_out: - interface->num_tx_queues = 0; - interface->num_rx_queues = 0; - interface->num_q_vectors = 0; + fm10k_reset_num_queues(interface); while (v_idx--) fm10k_free_q_vector(interface, v_idx); @@ -1787,9 +1788,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface) { int v_idx = interface->num_q_vectors; - interface->num_tx_queues = 0; - interface->num_rx_queues = 0; - interface->num_q_vectors = 0; + fm10k_reset_num_queues(interface); while (v_idx--) fm10k_free_q_vector(interface, v_idx); @@ -1935,7 +1934,7 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface) static void fm10k_init_reta(struct fm10k_intfc *interface) { u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; - u32 reta, base; + u32 reta; /* If the Rx flow indirection table has been configured manually, we * need to maintain it when possible. @@ -1960,21 +1959,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) } repopulate_reta: - /* Populate the redirection table 4 entries at a time. To do this - * we are generating the results for n and n+2 and then interleaving - * those with the results with n+1 and n+3. - */ - for (i = FM10K_RETA_SIZE; i--;) { - /* first pass generates n and n+2 */ - base = ((i * 0x00040004) + 0x00020000) * rss_i; - reta = (base & 0x3F803F80) >> 7; - - /* second pass generates n+1 and n+3 */ - base += 0x00010001 * rss_i; - reta |= (base & 0x3F803F80) << 1; - - interface->reta[i] = reta; - } + fm10k_write_reta(interface, NULL); } /** @@ -1997,14 +1982,15 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) if (err) { dev_err(&interface->pdev->dev, "Unable to initialize MSI-X capability\n"); - return err; + goto err_init_msix; } /* Allocate memory for queues */ err = fm10k_alloc_q_vectors(interface); if (err) { - fm10k_reset_msix_capability(interface); - return err; + dev_err(&interface->pdev->dev, + "Unable to allocate queue vectors\n"); + goto err_alloc_q_vectors; } /* Map rings to devices, and map devices to physical queues */ @@ -2014,6 +2000,12 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface) fm10k_init_reta(interface); return 0; + +err_alloc_q_vectors: + fm10k_reset_msix_capability(interface); +err_init_msix: + fm10k_reset_num_queues(interface); + return err; } /** diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 98202c3d591c..c9dfa6564fcf 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h index 245a0a3dc32e..b7dbc8a84c05 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index d09a8dd71fc2..2a08d3f5b6df 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -243,9 +243,6 @@ void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface) for (i = 0; i < interface->num_tx_queues; i++) fm10k_clean_tx_ring(interface->tx_ring[i]); - - /* remove any stale timestamp buffers and free them */ - skb_queue_purge(&interface->ts_tx_skb_queue); } /** @@ -440,7 +437,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface) * @sa_family: Address family of new port * @port: port number used for VXLAN * - * This funciton is called when a new VXLAN interface has added a new port + * This function is called when a new VXLAN interface has added a new port * number to the range that is currently in use for VXLAN. The new port * number is always added to the tail so that the port number list should * match the order in which the ports were allocated. The head of the list @@ -484,7 +481,7 @@ insert_tail: * @sa_family: Address family of freed port * @port: port number used for VXLAN * - * This funciton is called when a new VXLAN interface has freed a port + * This function is called when a new VXLAN interface has freed a port * number from the range that is currently in use for VXLAN. The freed * port is removed from the list and the new head is used to determine * the port number for offloads. @@ -660,10 +657,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) __skb_put(skb, pad_len); } - /* prepare packet for hardware time stamping */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) - fm10k_ts_tx_enqueue(interface, skb); - if (r_idx >= interface->num_tx_queues) r_idx %= interface->num_tx_queues; @@ -884,7 +877,7 @@ static int __fm10k_uc_sync(struct net_device *dev, return -EADDRNOTAVAIL; /* update table with current entries */ - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { err = hw->mac.ops.update_uc_addr(hw, glort, addr, @@ -947,7 +940,7 @@ static int __fm10k_mc_sync(struct net_device *dev, u16 vid, glort = interface->glort; /* update table with current entries */ - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); @@ -1002,11 +995,8 @@ static void fm10k_set_rx_mode(struct net_device *dev) } /* synchronize all of the addresses */ - if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { - __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); - if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) - __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); - } + __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync); + __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync); fm10k_mbx_unlock(interface); } @@ -1044,7 +1034,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) hw->mac.ops.update_vlan(hw, 0, 0, true); /* update table with current entries */ - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0; + for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { hw->mac.ops.update_vlan(hw, vid, 0, true); @@ -1056,11 +1046,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); /* synchronize all of the addresses */ - if (xcast_mode != FM10K_XCAST_MODE_PROMISC) { - __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); - if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI) - __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); - } + __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync); + __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync); fm10k_mbx_unlock(interface); @@ -1213,18 +1200,6 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, return fm10k_setup_tc(dev, tc->tc); } -static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - case SIOCGHWTSTAMP: - return fm10k_get_ts_config(netdev, ifr); - case SIOCSHWTSTAMP: - return fm10k_set_ts_config(netdev, ifr); - default: - return -EOPNOTSUPP; - } -} - static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, struct fm10k_l2_accel *l2_accel) { @@ -1402,7 +1377,6 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_get_vf_config = fm10k_ndo_get_vf_config, .ndo_add_vxlan_port = fm10k_add_vxlan_port, .ndo_del_vxlan_port = fm10k_del_vxlan_port, - .ndo_do_ioctl = fm10k_ioctl, .ndo_dfwd_add_station = fm10k_dfwd_add_station, .ndo_dfwd_del_station = fm10k_dfwd_del_station, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1429,7 +1403,7 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info) /* configure default debug level */ interface = netdev_priv(dev); - interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; /* configure default features */ dev->features |= NETIF_F_IP_CSUM | diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 4eb7a6fa6b0d..e05aca9bef0e 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -99,7 +99,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface) static void fm10k_service_event_complete(struct fm10k_intfc *interface) { - BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); + WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state)); /* flush memory to make sure state is correct before next watchog */ smp_mb__before_atomic(); @@ -145,7 +145,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface) WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ - netdev->trans_start = jiffies; + netif_trans_update(netdev); while (test_and_set_bit(__FM10K_RESETTING, &interface->state)) usleep_range(1000, 2000); @@ -209,9 +209,6 @@ static void fm10k_reinit(struct fm10k_intfc *interface) netdev->features |= NETIF_F_HW_VLAN_CTAG_RX; } - /* reset clock */ - fm10k_ts_reset(interface); - err = netif_running(netdev) ? fm10k_open(netdev) : 0; if (err) goto err_open; @@ -559,7 +556,6 @@ static void fm10k_service_task(struct work_struct *work) /* tasks only run when interface is up */ fm10k_watchdog_subtask(interface); fm10k_check_hang_subtask(interface); - fm10k_ts_tx_subtask(interface); /* release lock on service events to allow scheduling next event */ fm10k_service_event_complete(interface); @@ -579,7 +575,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface, u64 tdba = ring->dma; u32 size = ring->count * sizeof(struct fm10k_tx_desc); u32 txint = FM10K_INT_MAP_DISABLE; - u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT); + u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE; u8 reg_idx = ring->reg_idx; /* disable queue to avoid issues while updating state */ @@ -730,7 +726,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface, if (interface->pfc_en) rx_pause = interface->pfc_en; #endif - if (!(rx_pause & (1 << ring->qos_pc))) + if (!(rx_pause & BIT(ring->qos_pc))) rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); @@ -779,7 +775,7 @@ void fm10k_update_rx_drop_en(struct fm10k_intfc *interface) u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; u8 reg_idx = ring->reg_idx; - if (!(rx_pause & (1 << ring->qos_pc))) + if (!(rx_pause & BIT(ring->qos_pc))) rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY; fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl); @@ -903,8 +899,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data) /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> - hw->mac.itr_scale)); + (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) | + FM10K_ITR_ENABLE); /* service upstream mailbox */ if (fm10k_mbx_trylock(interface)) { @@ -1065,7 +1061,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr) if (maxholdq) fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq); for (q = 255;;) { - if (maxholdq & (1 << 31)) { + if (maxholdq & BIT(31)) { if (q < FM10K_MAX_QUEUES_PF) { interface->rx_overrun_pf++; fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); @@ -1135,22 +1131,24 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data) /* re-enable mailbox interrupt and indicate 20us delay */ fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR), - FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >> - hw->mac.itr_scale)); + (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) | + FM10K_ITR_ENABLE); return IRQ_HANDLED; } void fm10k_mbx_free_irq(struct fm10k_intfc *interface) { - struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR]; struct fm10k_hw *hw = &interface->hw; + struct msix_entry *entry; int itr_reg; /* no mailbox IRQ to free if MSI-X is not enabled */ if (!interface->msix_entries) return; + entry = &interface->msix_entries[FM10K_MBX_VECTOR]; + /* disconnect the mailbox */ hw->mbx.ops.disconnect(hw, &hw->mbx); @@ -1202,25 +1200,6 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results, return 0; } -static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info __always_unused *mbx) -{ - struct fm10k_intfc *interface; - u64 timestamp; - s32 err; - - err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP], - ×tamp); - if (err) - return err; - - interface = container_of(hw, struct fm10k_intfc, hw); - - fm10k_ts_tx_hwtstamp(interface, 0, timestamp); - - return 0; -} - /* generic error handler for mailbox issues */ static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info __always_unused *mbx) @@ -1241,7 +1220,6 @@ static const struct fm10k_msg_data vf_mbx_data[] = { FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr), FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), - FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf), FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), }; @@ -1253,7 +1231,7 @@ static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface) int err; /* Use timer0 for interrupt moderation on the mailbox */ - u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry; + u32 itr = entry->entry | FM10K_INT_MAP_TIMER0; /* register mailbox handlers */ err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data); @@ -1285,11 +1263,40 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results, u32 dglort_map = hw->mac.dglort_map; s32 err; + interface = container_of(hw, struct fm10k_intfc, hw); + + err = fm10k_msg_err_pf(hw, results, mbx); + if (!err && hw->swapi.status) { + /* force link down for a reasonable delay */ + interface->link_down_event = jiffies + (2 * HZ); + set_bit(__FM10K_LINK_DOWN, &interface->state); + + /* reset dglort_map back to no config */ + hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; + + fm10k_service_event_schedule(interface); + + /* prevent overloading kernel message buffer */ + if (interface->lport_map_failed) + return 0; + + interface->lport_map_failed = true; + + if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED) + dev_warn(&interface->pdev->dev, + "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n"); + dev_warn(&interface->pdev->dev, + "request logical port map failed: %d\n", + hw->swapi.status); + + return 0; + } + err = fm10k_msg_lport_map_pf(hw, results, mbx); if (err) return err; - interface = container_of(hw, struct fm10k_intfc, hw); + interface->lport_map_failed = false; /* we need to reset if port count was just updated */ if (dglort_map != hw->mac.dglort_map) @@ -1339,68 +1346,6 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results, return 0; } -static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results, - struct fm10k_mbx_info __always_unused *mbx) -{ - struct fm10k_swapi_1588_timestamp timestamp; - struct fm10k_iov_data *iov_data; - struct fm10k_intfc *interface; - u16 sglort, vf_idx; - s32 err; - - err = fm10k_tlv_attr_get_le_struct( - results[FM10K_PF_ATTR_ID_1588_TIMESTAMP], - ×tamp, sizeof(timestamp)); - if (err) - return err; - - interface = container_of(hw, struct fm10k_intfc, hw); - - if (timestamp.dglort) { - fm10k_ts_tx_hwtstamp(interface, timestamp.dglort, - le64_to_cpu(timestamp.egress)); - return 0; - } - - /* either dglort or sglort must be set */ - if (!timestamp.sglort) - return FM10K_ERR_PARAM; - - /* verify GLORT is at least one of the ones we own */ - sglort = le16_to_cpu(timestamp.sglort); - if (!fm10k_glort_valid_pf(hw, sglort)) - return FM10K_ERR_PARAM; - - if (sglort == interface->glort) { - fm10k_ts_tx_hwtstamp(interface, 0, - le64_to_cpu(timestamp.ingress)); - return 0; - } - - /* if there is no iov_data then there is no mailboxes to process */ - if (!ACCESS_ONCE(interface->iov_data)) - return FM10K_ERR_PARAM; - - rcu_read_lock(); - - /* notify VF if this timestamp belongs to it */ - iov_data = interface->iov_data; - vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort; - - if (!iov_data || vf_idx >= iov_data->num_vfs) { - err = FM10K_ERR_PARAM; - goto err_unlock; - } - - err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx], - le64_to_cpu(timestamp.ingress)); - -err_unlock: - rcu_read_unlock(); - - return err; -} - static const struct fm10k_msg_data pf_mbx_data[] = { FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), @@ -1408,7 +1353,6 @@ static const struct fm10k_msg_data pf_mbx_data[] = { FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid), - FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf), FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error), }; @@ -1420,8 +1364,8 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface) int err; /* Use timer0 for interrupt moderation on the mailbox */ - u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry; - u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry; + u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0; + u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE; /* register mailbox handlers */ err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data); @@ -1654,6 +1598,7 @@ void fm10k_down(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; + int err; /* signal that we are down to the interrupt handler and service task */ set_bit(__FM10K_DOWN, &interface->state); @@ -1678,7 +1623,9 @@ void fm10k_down(struct fm10k_intfc *interface) fm10k_update_stats(interface); /* Disable DMA engine for Tx/Rx */ - hw->mac.ops.stop_hw(hw); + err = hw->mac.ops.stop_hw(hw); + if (err) + dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err); /* free any buffers still on the rings */ fm10k_clean_all_tx_rings(interface); @@ -1776,35 +1723,17 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, netdev->addr_assign_type |= NET_ADDR_RANDOM; } - memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); - memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, hw->mac.addr); + ether_addr_copy(netdev->perm_addr, hw->mac.addr); if (!is_valid_ether_addr(netdev->perm_addr)) { dev_err(&pdev->dev, "Invalid MAC Address\n"); return -EIO; } - /* assign BAR 4 resources for use with PTP */ - if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED) - interface->sw_addr = ioremap(pci_resource_start(pdev, 4), - pci_resource_len(pdev, 4)); - hw->sw_addr = interface->sw_addr; - /* initialize DCBNL interface */ fm10k_dcbnl_set_ops(netdev); - /* Initialize service timer and service task */ - set_bit(__FM10K_SERVICE_DISABLE, &interface->state); - setup_timer(&interface->service_timer, &fm10k_service_timer, - (unsigned long)interface); - INIT_WORK(&interface->service_task, fm10k_service_task); - - /* kick off service timer now, even when interface is down */ - mod_timer(&interface->service_timer, (HZ * 2) + jiffies); - - /* Intitialize timestamp data */ - fm10k_ts_init(interface); - /* set default ring sizes */ interface->tx_ring_count = FM10K_DEFAULT_TXD; interface->rx_ring_count = FM10K_DEFAULT_RXD; @@ -1987,6 +1916,12 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + /* the mbx interrupt might attempt to schedule the service task, so we + * must ensure it is disabled since we haven't yet requested the timer + * or work item. + */ + set_bit(__FM10K_SERVICE_DISABLE, &interface->state); + err = fm10k_mbx_request_irq(interface); if (err) goto err_mbx_interrupt; @@ -2006,8 +1941,15 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* stop all the transmit queues from transmitting until link is up */ netif_tx_stop_all_queues(netdev); - /* Register PTP interface */ - fm10k_ptp_register(interface); + /* Initialize service timer and service task late in order to avoid + * cleanup issues. + */ + setup_timer(&interface->service_timer, &fm10k_service_timer, + (unsigned long)interface); + INIT_WORK(&interface->service_task, fm10k_service_task); + + /* kick off service timer now, even when interface is down */ + mod_timer(&interface->service_timer, (HZ * 2) + jiffies); /* print warning for non-optimal configurations */ fm10k_slot_warn(interface); @@ -2065,9 +2007,6 @@ static void fm10k_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - /* cleanup timestamp handling */ - fm10k_ptp_unregister(interface); - /* release VFs */ fm10k_iov_disable(pdev); @@ -2140,9 +2079,6 @@ static int fm10k_resume(struct pci_dev *pdev) /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); - /* reset clock */ - fm10k_ts_reset(interface); - rtnl_lock(); err = fm10k_init_queueing_scheme(interface); @@ -2259,15 +2195,17 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; + rtnl_lock(); + if (netif_running(netdev)) fm10k_close(netdev); + fm10k_mbx_free_irq(interface); + /* free interrupts */ fm10k_clear_queueing_scheme(interface); - fm10k_mbx_free_irq(interface); - - pci_disable_device(pdev); + rtnl_unlock(); /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -2337,27 +2275,31 @@ static void fm10k_io_resume(struct pci_dev *pdev) /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); + rtnl_lock(); + err = fm10k_init_queueing_scheme(interface); if (err) { dev_err(&interface->pdev->dev, "init_queueing_scheme failed: %d\n", err); - return; + goto unlock; } /* reassociate interrupts */ fm10k_mbx_request_irq(interface); - /* reset clock */ - fm10k_ts_reset(interface); - + rtnl_lock(); if (netif_running(netdev)) err = fm10k_open(netdev); + rtnl_unlock(); /* final check of hardware state before registering the interface */ err = err ? : fm10k_hw_ready(interface); if (!err) netif_device_attach(netdev); + +unlock: + rtnl_unlock(); } static const struct pci_error_handlers fm10k_err_handler = { @@ -2382,7 +2324,7 @@ static struct pci_driver fm10k_driver = { /** * fm10k_register_pci_driver - register driver interface * - * This funciton is called on module load in order to register the driver. + * This function is called on module load in order to register the driver. **/ int fm10k_register_pci_driver(void) { @@ -2392,7 +2334,7 @@ int fm10k_register_pci_driver(void) /** * fm10k_unregister_pci_driver - unregister driver interface * - * This funciton is called on module unload in order to remove the driver. + * This function is called on module unload in order to remove the driver. **/ void fm10k_unregister_pci_driver(void) { diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 62ccebc5f728..dc75507c9926 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -219,8 +219,8 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) /* VLAN multi-bit write: * The multi-bit write has several parts to it. - * 3 2 1 0 - * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * 24 16 8 0 + * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | RSVD0 | Length |C|RSVD0| VLAN ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -488,6 +488,10 @@ static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort, if (!fm10k_glort_valid_pf(hw, glort)) return FM10K_ERR_PARAM; + /* reset multicast mode if deleting lport */ + if (!enable) + fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE); + /* construct the lport message from the 2 pieces of data we have */ lport_msg = ((u32)count << 16) | glort; @@ -527,8 +531,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, return FM10K_ERR_PARAM; /* determine count of VSIs and queues */ - queue_count = 1 << (dglort->rss_l + dglort->pc_l); - vsi_count = 1 << (dglort->vsi_l + dglort->queue_l); + queue_count = BIT(dglort->rss_l + dglort->pc_l); + vsi_count = BIT(dglort->vsi_l + dglort->queue_l); glort = dglort->glort; q_idx = dglort->queue_b; @@ -544,8 +548,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw, } /* determine count of PCs and queues */ - queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l); - pc_count = 1 << dglort->pc_l; + queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l); + pc_count = BIT(dglort->pc_l); /* configure PC for Tx queues */ for (pc = 0; pc < pc_count; pc++) { @@ -711,8 +715,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs, FM10K_RXDCTL_WRITE_BACK_MIN_DELAY | FM10K_RXDCTL_DROP_ON_EMPTY); fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx), - FM10K_RXQCTL_VF | - (i << FM10K_RXQCTL_VF_SHIFT)); + (i << FM10K_RXQCTL_VF_SHIFT) | + FM10K_RXQCTL_VF); /* map queue pair to VF */ fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx); @@ -864,9 +868,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw, fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0); fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0); - /* determine correct default VLAN ID */ + /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is + * used here to indicate to the VF that it will not have privilege to + * write VLAN_TABLE. All policy is enforced on the PF but this allows + * the VF to correctly report errors to userspace rqeuests. + */ if (vf_info->pf_vid) - vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR; + vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE; else vf_vid = vf_info->sw_vid; @@ -952,7 +960,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, return FM10K_ERR_PARAM; /* clear event notification of VF FLR */ - fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32)); + fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32)); /* force timeout and then disconnect the mailbox */ vf_info->mbx.timeout = 0; @@ -987,7 +995,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw, txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) | (vf_idx << FM10K_TXQCTL_TC_SHIFT) | FM10K_TXQCTL_VF | vf_idx; - rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT); + rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF; /* stop further DMA and reset queue ownership back to VF */ for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) { @@ -1140,19 +1148,6 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw, fm10k_update_hw_stats_q(hw, q, idx, qpp); } -static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw, - struct fm10k_vf_info *vf_info, - u64 timestamp) -{ - u32 msg[4]; - - /* generate port state response to notify VF it is not ready */ - fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588); - fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp); - - return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg); -} - /** * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF * @hw: Pointer to hardware structure @@ -1223,18 +1218,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results, if (err) return err; - /* verify upper 16 bits are zero */ - if (vid >> 16) - return FM10K_ERR_PARAM; - set = !(vid & FM10K_VLAN_CLEAR); vid &= ~FM10K_VLAN_CLEAR; - err = fm10k_iov_select_vid(vf_info, (u16)vid); - if (err < 0) - return err; + /* if the length field has been set, this is a multi-bit + * update request. For multi-bit requests, simply disallow + * them when the pf_vid has been set. In this case, the PF + * should have already cleared the VLAN_TABLE, and if we + * allowed them, it could allow a rogue VF to receive traffic + * on a VLAN it was not assigned. In the single-bit case, we + * need to modify requests for VLAN 0 to use the default PF or + * SW vid when assigned. + */ - vid = err; + if (vid >> 16) { + /* prevent multi-bit requests when PF has + * administratively set the VLAN for this VF + */ + if (vf_info->pf_vid) + return FM10K_ERR_PARAM; + } else { + err = fm10k_iov_select_vid(vf_info, (u16)vid); + if (err < 0) + return err; + + vid = err; + } /* update VSI info for VF in regards to VLAN table */ err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); @@ -1370,7 +1379,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results, mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode); /* if mode is not currently enabled, enable it */ - if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode))) + if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode))) fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode); /* swap mode back to a bit flag */ @@ -1604,7 +1613,7 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw) * @hw: pointer to hardware structure * @switch_ready: pointer to boolean value that will record switch state * - * This funciton will check the DMA_CTRL2 register and mailbox in order + * This function will check the DMA_CTRL2 register and mailbox in order * to determine if the switch is ready for the PF to begin requesting * addresses and mapping traffic to the local interface. **/ @@ -1633,6 +1642,8 @@ out: /* This structure defines the attibutes to be parsed below */ const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = { + FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR, + sizeof(struct fm10k_swapi_error)), FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP), FM10K_TLV_ATTR_LAST }; @@ -1773,89 +1784,6 @@ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results, return 0; } -const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = { - FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP, - sizeof(struct fm10k_swapi_1588_timestamp)), - FM10K_TLV_ATTR_LAST -}; - -/* currently there is no shared 1588 timestamp handler */ - -/** - * fm10k_adjust_systime_pf - Adjust systime frequency - * @hw: pointer to hardware structure - * @ppb: adjustment rate in parts per billion - * - * This function will adjust the SYSTIME_CFG register contained in BAR 4 - * if this function is supported for BAR 4 access. The adjustment amount - * is based on the parts per billion value provided and adjusted to a - * value based on parts per 2^48 clock cycles. - * - * If adjustment is not supported or the requested value is too large - * we will return an error. - **/ -static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb) -{ - u64 systime_adjust; - - /* if sw_addr is not set we don't have switch register access */ - if (!hw->sw_addr) - return ppb ? FM10K_ERR_PARAM : 0; - - /* we must convert the value from parts per billion to parts per - * 2^48 cycles. In addition I have opted to only use the 30 most - * significant bits of the adjustment value as the 8 least - * significant bits are located in another register and represent - * a value significantly less than a part per billion, the result - * of dropping the 8 least significant bits is that the adjustment - * value is effectively multiplied by 2^8 when we write it. - * - * As a result of all this the math for this breaks down as follows: - * ppb / 10^9 == adjust * 2^8 / 2^48 - * If we solve this for adjust, and simplify it comes out as: - * ppb * 2^31 / 5^9 == adjust - */ - systime_adjust = (ppb < 0) ? -ppb : ppb; - systime_adjust <<= 31; - do_div(systime_adjust, 1953125); - - /* verify the requested adjustment value is in range */ - if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK) - return FM10K_ERR_PARAM; - - if (ppb > 0) - systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE; - - fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust); - - return 0; -} - -/** - * fm10k_read_systime_pf - Reads value of systime registers - * @hw: pointer to the hardware structure - * - * Function reads the content of 2 registers, combined to represent a 64 bit - * value measured in nanosecods. In order to guarantee the value is accurate - * we check the 32 most significant bits both before and after reading the - * 32 least significant bits to verify they didn't change as we were reading - * the registers. - **/ -static u64 fm10k_read_systime_pf(struct fm10k_hw *hw) -{ - u32 systime_l, systime_h, systime_tmp; - - systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); - - do { - systime_tmp = systime_h; - systime_l = fm10k_read_reg(hw, FM10K_SYSTIME); - systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1); - } while (systime_tmp != systime_h); - - return ((u64)systime_h << 32) | systime_l; -} - static const struct fm10k_msg_data fm10k_msg_data_pf[] = { FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), @@ -1885,8 +1813,6 @@ static const struct fm10k_mac_ops mac_ops_pf = { .set_dma_mask = fm10k_set_dma_mask_pf, .get_fault = fm10k_get_fault_pf, .get_host_state = fm10k_get_host_state_pf, - .adjust_systime = fm10k_adjust_systime_pf, - .read_systime = fm10k_read_systime_pf, }; static const struct fm10k_iov_ops iov_ops_pf = { @@ -1898,7 +1824,6 @@ static const struct fm10k_iov_ops iov_ops_pf = { .set_lport = fm10k_iov_set_lport_pf, .reset_lport = fm10k_iov_reset_lport_pf, .update_stats = fm10k_iov_update_stats_pf, - .report_timestamp = fm10k_iov_report_timestamp_pf, }; static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index b2d96b45ca3c..3336d3c10760 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -42,8 +42,6 @@ enum fm10k_pf_tlv_msg_id_v1 { FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503, FM10K_PF_MSG_ID_DELETE_FLOW = 0x504, FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505, - FM10K_PF_MSG_ID_GET_1588_INFO = 0x506, - FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701, }; enum fm10k_pf_tlv_attr_id_v1 { @@ -61,7 +59,6 @@ enum fm10k_pf_tlv_attr_id_v1 { FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B, FM10K_PF_ATTR_ID_PORT = 0x0C, FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D, - FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10, }; #define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0 @@ -74,6 +71,8 @@ enum fm10k_pf_tlv_attr_id_v1 { #define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16 #define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16 +#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280 + /* The following data structures are overlayed directly onto TLV mailbox * messages, and must not break 4 byte alignment. Ensure the structures line * up correctly as per their TLV definition. @@ -100,13 +99,6 @@ struct fm10k_swapi_error { struct fm10k_global_table_data ffu; } __aligned(4) __packed; -struct fm10k_swapi_1588_timestamp { - __le64 egress; - __le64 ingress; - __le16 dglort; - __le16 sglort; -} __aligned(4) __packed; - s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[]; #define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \ @@ -122,11 +114,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; #define FM10K_PF_MSG_ERR_HANDLER(msg, func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func) -extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[]; -#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \ - FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \ - fm10k_1588_timestamp_msg_attr, func) - s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c deleted file mode 100644 index b4945e8abe03..000000000000 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c +++ /dev/null @@ -1,462 +0,0 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * The full GNU General Public License is included in this distribution in - * the file called "COPYING". - * - * Contact Information: - * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - */ - -#include <linux/ptp_classify.h> -#include <linux/ptp_clock_kernel.h> - -#include "fm10k.h" - -#define FM10K_TS_TX_TIMEOUT (HZ * 15) - -void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface, - struct skb_shared_hwtstamps *hwtstamp, - u64 systime) -{ - unsigned long flags; - - read_lock_irqsave(&interface->systime_lock, flags); - systime += interface->ptp_adjust; - read_unlock_irqrestore(&interface->systime_lock, flags); - - hwtstamp->hwtstamp = ns_to_ktime(systime); -} - -static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface, - __le16 dglort) -{ - struct sk_buff_head *list = &interface->ts_tx_skb_queue; - struct sk_buff *skb; - - skb_queue_walk(list, skb) { - if (FM10K_CB(skb)->fi.w.dglort == dglort) - return skb; - } - - return NULL; -} - -void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb) -{ - struct sk_buff_head *list = &interface->ts_tx_skb_queue; - struct sk_buff *clone; - unsigned long flags; - - /* create clone for us to return on the Tx path */ - clone = skb_clone_sk(skb); - if (!clone) - return; - - FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT; - spin_lock_irqsave(&list->lock, flags); - - /* attempt to locate any buffers with the same dglort, - * if none are present then insert skb in tail of list - */ - skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort); - if (!skb) { - skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; - __skb_queue_tail(list, clone); - } - - spin_unlock_irqrestore(&list->lock, flags); - - /* if list is already has one then we just free the clone */ - if (skb) - dev_kfree_skb(clone); -} - -void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort, - u64 systime) -{ - struct skb_shared_hwtstamps shhwtstamps; - struct sk_buff_head *list = &interface->ts_tx_skb_queue; - struct sk_buff *skb; - unsigned long flags; - - spin_lock_irqsave(&list->lock, flags); - - /* attempt to locate and pull the sk_buff out of the list */ - skb = fm10k_ts_tx_skb(interface, dglort); - if (skb) - __skb_unlink(skb, list); - - spin_unlock_irqrestore(&list->lock, flags); - - /* if not found do nothing */ - if (!skb) - return; - - /* timestamp the sk_buff and free out copy */ - fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime); - skb_tstamp_tx(skb, &shhwtstamps); - dev_kfree_skb_any(skb); -} - -void fm10k_ts_tx_subtask(struct fm10k_intfc *interface) -{ - struct sk_buff_head *list = &interface->ts_tx_skb_queue; - struct sk_buff *skb, *tmp; - unsigned long flags; - - /* If we're down or resetting, just bail */ - if (test_bit(__FM10K_DOWN, &interface->state) || - test_bit(__FM10K_RESETTING, &interface->state)) - return; - - spin_lock_irqsave(&list->lock, flags); - - /* walk though the list and flush any expired timestamp packets */ - skb_queue_walk_safe(list, skb, tmp) { - if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout)) - continue; - __skb_unlink(skb, list); - kfree_skb(skb); - interface->tx_hwtstamp_timeouts++; - } - - spin_unlock_irqrestore(&list->lock, flags); -} - -static u64 fm10k_systime_read(struct fm10k_intfc *interface) -{ - struct fm10k_hw *hw = &interface->hw; - - return hw->mac.ops.read_systime(hw); -} - -void fm10k_ts_reset(struct fm10k_intfc *interface) -{ - s64 ns = ktime_to_ns(ktime_get_real()); - unsigned long flags; - - /* reinitialize the clock */ - write_lock_irqsave(&interface->systime_lock, flags); - interface->ptp_adjust = fm10k_systime_read(interface) - ns; - write_unlock_irqrestore(&interface->systime_lock, flags); -} - -void fm10k_ts_init(struct fm10k_intfc *interface) -{ - /* Initialize lock protecting systime access */ - rwlock_init(&interface->systime_lock); - - /* Initialize skb queue for pending timestamp requests */ - skb_queue_head_init(&interface->ts_tx_skb_queue); - - /* reset the clock to current kernel time */ - fm10k_ts_reset(interface); -} - -/** - * fm10k_get_ts_config - get current hardware timestamping configuration - * @netdev: network interface device structure - * @ifreq: ioctl data - * - * This function returns the current timestamping settings. Rather than - * attempt to deconstruct registers to fill in the values, simply keep a copy - * of the old settings around, and return a copy when requested. - */ -int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr) -{ - struct fm10k_intfc *interface = netdev_priv(netdev); - struct hwtstamp_config *config = &interface->ts_config; - - return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? - -EFAULT : 0; -} - -/** - * fm10k_set_ts_config - control hardware time stamping - * @netdev: network interface device structure - * @ifreq: ioctl data - * - * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't cause any overhead - * when no packet needs it. At most one packet in the queue may be - * marked for time stamping, otherwise it would be impossible to tell - * for sure to which packet the hardware time stamp belongs. - * - * Incoming time stamping has to be configured via the hardware - * filters. Not all combinations are supported, in particular event - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". - * - * Since hardware always timestamps Path delay packets when timestamping V2 - * packets, regardless of the type specified in the register, only use V2 - * Event mode. This more accurately tells the user what the hardware is going - * to do anyways. - */ -int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr) -{ - struct fm10k_intfc *interface = netdev_priv(netdev); - struct hwtstamp_config ts_config; - - if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config))) - return -EFAULT; - - /* reserved for future extensions */ - if (ts_config.flags) - return -EINVAL; - - switch (ts_config.tx_type) { - case HWTSTAMP_TX_OFF: - break; - case HWTSTAMP_TX_ON: - /* we likely need some check here to see if this is supported */ - break; - default: - return -ERANGE; - } - - switch (ts_config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_ALL: - interface->flags |= FM10K_FLAG_RX_TS_ENABLED; - ts_config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - return -ERANGE; - } - - /* save these settings for future reference */ - interface->ts_config = ts_config; - - return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ? - -EFAULT : 0; -} - -static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) -{ - struct fm10k_intfc *interface; - struct fm10k_hw *hw; - int err; - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - hw = &interface->hw; - - err = hw->mac.ops.adjust_systime(hw, ppb); - - /* the only error we should see is if the value is out of range */ - return (err == FM10K_ERR_PARAM) ? -ERANGE : err; -} - -static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - struct fm10k_intfc *interface; - unsigned long flags; - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - - write_lock_irqsave(&interface->systime_lock, flags); - interface->ptp_adjust += delta; - write_unlock_irqrestore(&interface->systime_lock, flags); - - return 0; -} - -static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) -{ - struct fm10k_intfc *interface; - unsigned long flags; - u64 now; - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - - read_lock_irqsave(&interface->systime_lock, flags); - now = fm10k_systime_read(interface) + interface->ptp_adjust; - read_unlock_irqrestore(&interface->systime_lock, flags); - - *ts = ns_to_timespec64(now); - - return 0; -} - -static int fm10k_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - struct fm10k_intfc *interface; - unsigned long flags; - u64 ns = timespec64_to_ns(ts); - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - - write_lock_irqsave(&interface->systime_lock, flags); - interface->ptp_adjust = fm10k_systime_read(interface) - ns; - write_unlock_irqrestore(&interface->systime_lock, flags); - - return 0; -} - -static int fm10k_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int __always_unused on) -{ - struct ptp_clock_time *t = &rq->perout.period; - struct fm10k_intfc *interface; - struct fm10k_hw *hw; - u64 period; - u32 step; - - /* we can only support periodic output */ - if (rq->type != PTP_CLK_REQ_PEROUT) - return -EINVAL; - - /* verify the requested channel is there */ - if (rq->perout.index >= ptp->n_per_out) - return -EINVAL; - - /* we cannot enforce start time as there is no - * mechanism for that in the hardware, we can only control - * the period. - */ - - /* we cannot support periods greater than 4 seconds due to reg limit */ - if (t->sec > 4 || t->sec < 0) - return -ERANGE; - - interface = container_of(ptp, struct fm10k_intfc, ptp_caps); - hw = &interface->hw; - - /* we simply cannot support the operation if we don't have BAR4 */ - if (!hw->sw_addr) - return -ENOTSUPP; - - /* convert to unsigned 64b ns, verify we can put it in a 32b register */ - period = t->sec * 1000000000LL + t->nsec; - - /* determine the minimum size for period */ - step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) & - FM10K_SYSTIME_CFG_STEP_MASK); - - /* verify the value is in range supported by hardware */ - if ((period && (period < step)) || (period > U32_MAX)) - return -ERANGE; - - /* notify hardware of request to being sending pulses */ - fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index), - (u32)period); - - return 0; -} - -static struct ptp_pin_desc fm10k_ptp_pd[2] = { - { - .name = "IEEE1588_PULSE0", - .index = 0, - .func = PTP_PF_PEROUT, - .chan = 0 - }, - { - .name = "IEEE1588_PULSE1", - .index = 1, - .func = PTP_PF_PEROUT, - .chan = 1 - } -}; - -static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, - enum ptp_pin_function func, unsigned int chan) -{ - /* verify the requested pin is there */ - if (pin >= ptp->n_pins || !ptp->pin_config) - return -EINVAL; - - /* enforce locked channels, no changing them */ - if (chan != ptp->pin_config[pin].chan) - return -EINVAL; - - /* we want to keep the functions locked as well */ - if (func != ptp->pin_config[pin].func) - return -EINVAL; - - return 0; -} - -void fm10k_ptp_register(struct fm10k_intfc *interface) -{ - struct ptp_clock_info *ptp_caps = &interface->ptp_caps; - struct device *dev = &interface->pdev->dev; - struct ptp_clock *ptp_clock; - - snprintf(ptp_caps->name, sizeof(ptp_caps->name), - "%s", interface->netdev->name); - ptp_caps->owner = THIS_MODULE; - /* This math is simply the inverse of the math in - * fm10k_adjust_systime_pf applied to an adjustment value - * of 2^30 - 1 which is the maximum value of the register: - * max_ppb == ((2^30 - 1) * 5^9) / 2^31 - */ - ptp_caps->max_adj = 976562; - ptp_caps->adjfreq = fm10k_ptp_adjfreq; - ptp_caps->adjtime = fm10k_ptp_adjtime; - ptp_caps->gettime64 = fm10k_ptp_gettime; - ptp_caps->settime64 = fm10k_ptp_settime; - - /* provide pins if BAR4 is accessible */ - if (interface->sw_addr) { - /* enable periodic outputs */ - ptp_caps->n_per_out = 2; - ptp_caps->enable = fm10k_ptp_enable; - - /* enable clock pins */ - ptp_caps->verify = fm10k_ptp_verify; - ptp_caps->n_pins = 2; - ptp_caps->pin_config = fm10k_ptp_pd; - } - - ptp_clock = ptp_clock_register(ptp_caps, dev); - if (IS_ERR(ptp_clock)) { - ptp_clock = NULL; - dev_err(dev, "ptp_clock_register failed\n"); - } else { - dev_info(dev, "registered PHC device %s\n", ptp_caps->name); - } - - interface->ptp_clock = ptp_clock; -} - -void fm10k_ptp_unregister(struct fm10k_intfc *interface) -{ - struct ptp_clock *ptp_clock = interface->ptp_clock; - struct device *dev = &interface->pdev->dev; - - if (!ptp_clock) - return; - - interface->ptp_clock = NULL; - - ptp_clock_unregister(ptp_clock); - dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name); -} diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c index ab01bb30752f..f8e87bf086b9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -222,7 +222,7 @@ s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len) attr = &msg[FM10K_TLV_DWORD_LEN(*msg)]; if (len < 4) { - attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1); + attr[1] = (u32)value & (BIT(8 * len) - 1); } else { attr[1] = (u32)value; if (len > 4) @@ -481,7 +481,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr, * up into an array of pointers stored in results. The function will * return FM10K_ERR_PARAM on any input or message error, * FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array - * and 0 on success. + * and 0 on success. Any attributes not found in tlv_attr will be silently + * ignored. **/ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, const struct fm10k_tlv_attr *tlv_attr) @@ -518,14 +519,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results, while (offset < len) { attr_id = *attr & FM10K_TLV_ID_MASK; - if (attr_id < FM10K_TLV_RESULTS_MAX) - err = fm10k_tlv_attr_validate(attr, tlv_attr); - else - err = FM10K_NOT_IMPLEMENTED; + if (attr_id >= FM10K_TLV_RESULTS_MAX) + return FM10K_NOT_IMPLEMENTED; - if (err < 0) + err = fm10k_tlv_attr_validate(attr, tlv_attr); + if (err == FM10K_NOT_IMPLEMENTED) + ; /* silently ignore non-implemented attributes */ + else if (err) return err; - if (!err) + else results[attr_id] = attr; /* update offset */ @@ -652,29 +654,29 @@ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = { **/ static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags) { - if (attr_flags & (1 << FM10K_TEST_MSG_STRING)) + if (attr_flags & BIT(FM10K_TEST_MSG_STRING)) fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING, test_str); - if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR)) + if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR)) fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR, test_mac, test_vlan); - if (attr_flags & (1 << FM10K_TEST_MSG_U8)) + if (attr_flags & BIT(FM10K_TEST_MSG_U8)) fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8); - if (attr_flags & (1 << FM10K_TEST_MSG_U16)) + if (attr_flags & BIT(FM10K_TEST_MSG_U16)) fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16); - if (attr_flags & (1 << FM10K_TEST_MSG_U32)) + if (attr_flags & BIT(FM10K_TEST_MSG_U32)) fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32); - if (attr_flags & (1 << FM10K_TEST_MSG_U64)) + if (attr_flags & BIT(FM10K_TEST_MSG_U64)) fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64); - if (attr_flags & (1 << FM10K_TEST_MSG_S8)) + if (attr_flags & BIT(FM10K_TEST_MSG_S8)) fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8); - if (attr_flags & (1 << FM10K_TEST_MSG_S16)) + if (attr_flags & BIT(FM10K_TEST_MSG_S16)) fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16); - if (attr_flags & (1 << FM10K_TEST_MSG_S32)) + if (attr_flags & BIT(FM10K_TEST_MSG_S32)) fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32); - if (attr_flags & (1 << FM10K_TEST_MSG_S64)) + if (attr_flags & BIT(FM10K_TEST_MSG_S64)) fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64); - if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT)) + if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT)) fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT, test_le, 8); } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h index e1845e0a17d8..a1f1027fe184 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h index 854ebb1906bf..b8bc06183720 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -225,11 +225,6 @@ struct fm10k_hw; #define FM10K_STATS_LOOPBACK_DROP 0x3806 #define FM10K_STATS_NODESC_DROP 0x3807 -/* Timesync registers */ -#define FM10K_SYSTIME 0x3814 -#define FM10K_SYSTIME_CFG 0x3818 -#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F - /* PCIe state registers */ #define FM10K_PHYADDR 0x381C @@ -355,6 +350,7 @@ struct fm10k_hw; #define FM10K_VLAN_TABLE_VSI_MAX 64 #define FM10K_VLAN_LENGTH_SHIFT 16 #define FM10K_VLAN_CLEAR BIT(15) +#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR #define FM10K_VLAN_ALL \ ((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT) @@ -381,12 +377,6 @@ struct fm10k_hw; #define FM10K_VFSYSTIME 0x00040 #define FM10K_VFITR(_n) ((_n) + 0x00060) -/* Registers contained in BAR 4 for Switch management */ -#define FM10K_SW_SYSTIME_ADJUST 0x0224D -#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF -#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000 -#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252) - enum fm10k_int_source { fm10k_int_mailbox = 0, fm10k_int_pcie_fault = 1, @@ -550,8 +540,6 @@ struct fm10k_mac_ops { struct fm10k_dglort_cfg *); void (*set_dma_mask)(struct fm10k_hw *, u64); s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *); - s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb); - u64 (*read_systime)(struct fm10k_hw *); }; enum fm10k_mac_type { @@ -617,10 +605,10 @@ struct fm10k_vf_info { */ }; -#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI) -#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI) -#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC) -#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE) +#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI)) +#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI)) +#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC)) +#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE)) #define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF) #define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4) #define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode)) @@ -643,7 +631,6 @@ struct fm10k_iov_ops { s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8); void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *); void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16); - s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64); }; struct fm10k_iov_info { @@ -667,7 +654,6 @@ struct fm10k_info { struct fm10k_hw { u32 __iomem *hw_addr; - u32 __iomem *sw_addr; void *back; struct fm10k_mac_info mac; struct fm10k_bus_info bus; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c index 91f8d7311f3b..3b06685ea63b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2015 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -188,7 +188,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) if (vsi) return FM10K_ERR_PARAM; - /* verify upper 4 bits of vid and length are 0 */ + /* clever trick to verify reserved bits in both vid and length */ if ((vid << 16 | vid) >> 28) return FM10K_ERR_PARAM; @@ -228,7 +228,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, ether_addr_copy(hw->mac.perm_addr, perm_addr); hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1); - hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR); + hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE); return 0; } @@ -451,13 +451,6 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) return mbx->ops.enqueue_tx(hw, mbx, msg); } -const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = { - FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP), - FM10K_TLV_ATTR_LAST -}; - -/* currently there is no shared 1588 timestamp handler */ - /** * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF * @hw: pointer to hardware structure @@ -509,52 +502,6 @@ static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, return 0; } -/** - * fm10k_adjust_systime_vf - Adjust systime frequency - * @hw: pointer to hardware structure - * @ppb: adjustment rate in parts per billion - * - * This function takes an adjustment rate in parts per billion and will - * verify that this value is 0 as the VF cannot support adjusting the - * systime clock. - * - * If the ppb value is non-zero the return is ERR_PARAM else success - **/ -static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) -{ - /* The VF cannot adjust the clock frequency, however it should - * already have a syntonic clock with whichever host interface is - * running as the master for the host interface clock domain so - * there should be not frequency adjustment necessary. - */ - return ppb ? FM10K_ERR_PARAM : 0; -} - -/** - * fm10k_read_systime_vf - Reads value of systime registers - * @hw: pointer to the hardware structure - * - * Function reads the content of 2 registers, combined to represent a 64 bit - * value measured in nanoseconds. In order to guarantee the value is accurate - * we check the 32 most significant bits both before and after reading the - * 32 least significant bits to verify they didn't change as we were reading - * the registers. - **/ -static u64 fm10k_read_systime_vf(struct fm10k_hw *hw) -{ - u32 systime_l, systime_h, systime_tmp; - - systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); - - do { - systime_tmp = systime_h; - systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME); - systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); - } while (systime_tmp != systime_h); - - return ((u64)systime_h << 32) | systime_l; -} - static const struct fm10k_msg_data fm10k_msg_data_vf[] = { FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), @@ -579,8 +526,6 @@ static const struct fm10k_mac_ops mac_ops_vf = { .rebind_hw_stats = fm10k_rebind_hw_stats_vf, .configure_dglort_map = fm10k_configure_dglort_map_vf, .get_host_state = fm10k_get_host_state_generic, - .adjust_systime = fm10k_adjust_systime_vf, - .read_systime = fm10k_read_systime_vf, }; static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h index c4439f1313a0..2662f33c0c71 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h @@ -1,5 +1,5 @@ -/* Intel Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2014 Intel Corporation. +/* Intel(R) Ethernet Switch Host Interface Driver + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -29,7 +29,6 @@ enum fm10k_vf_tlv_msg_id { FM10K_VF_MSG_ID_MSIX, FM10K_VF_MSG_ID_MAC_VLAN, FM10K_VF_MSG_ID_LPORT_STATE, - FM10K_VF_MSG_ID_1588, FM10K_VF_MSG_ID_MAX, }; @@ -49,11 +48,6 @@ enum fm10k_tlv_lport_state_attr_id { FM10K_LPORT_STATE_MSG_MAX }; -enum fm10k_tlv_1588_attr_id { - FM10K_1588_MSG_TIMESTAMP, - FM10K_1588_MSG_MAX -}; - #define FM10K_VF_MSG_MSIX_HANDLER(func) \ FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func) @@ -70,9 +64,5 @@ extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[]; FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \ fm10k_lport_state_msg_attr, func) -extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[]; -#define FM10K_VF_MSG_1588_HANDLER(func) \ - FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func) - extern const struct fm10k_info fm10k_vf_info; #endif /* _FM10K_VF_H */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 1ce6e9c0427d..9c44739da5e2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -97,12 +97,12 @@ #define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16) /* Ethtool Private Flags */ -#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0) -#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) -#define I40E_PRIV_FLAGS_FD_ATR BIT(2) -#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) -#define I40E_PRIV_FLAGS_PS BIT(4) -#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5) +#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0) +#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1) +#define I40E_PRIV_FLAGS_FD_ATR BIT(2) +#define I40E_PRIV_FLAGS_VEB_STATS BIT(3) +#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4) +#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) @@ -112,7 +112,9 @@ #define I40E_OEM_VER_PATCH_MASK 0xff #define I40E_OEM_VER_BUILD_SHIFT 8 #define I40E_OEM_VER_SHIFT 24 -#define I40E_PHY_DEBUG_PORT BIT(4) +#define I40E_PHY_DEBUG_ALL \ + (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \ + I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW) /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 @@ -123,10 +125,7 @@ #define XSTRINGIFY(bar) STRINGIFY(bar) #define I40E_RX_DESC(R, i) \ - ((ring_is_16byte_desc_enabled(R)) \ - ? (union i40e_32byte_rx_desc *) \ - (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \ - : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))) + (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])) #define I40E_TX_DESC(R, i) \ (&(((struct i40e_tx_desc *)((R)->desc))[i])) #define I40E_TX_CTXTDESC(R, i) \ @@ -202,6 +201,7 @@ struct i40e_lump_tracking { #define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4) #define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4) +#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4) enum i40e_fd_stat_idx { I40E_FD_STAT_ATR, @@ -244,7 +244,6 @@ struct i40e_fdir_filter { #define I40E_DCB_PRIO_TYPE_STRICT 0 #define I40E_DCB_PRIO_TYPE_ETS 1 #define I40E_DCB_STRICT_PRIO_CREDITS 127 -#define I40E_MAX_USER_PRIORITY 8 /* DCB per TC information data structure */ struct i40e_tc_info { u16 qoffset; /* Queue offset from base queue */ @@ -320,8 +319,6 @@ struct i40e_pf { #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) -#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4) -#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8) @@ -330,7 +327,6 @@ struct i40e_pf { #ifdef I40E_FCOE #define I40E_FLAG_FCOE_ENABLED BIT_ULL(11) #endif /* I40E_FCOE */ -#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13) #define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14) #define I40E_FLAG_FILTER_SYNC BIT_ULL(15) #define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) @@ -363,6 +359,7 @@ struct i40e_pf { #define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) #define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48) #define I40E_FLAG_PF_MAC BIT_ULL(50) +#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) /* tracks features that get auto disabled by errors */ u64 auto_disable_flags; @@ -534,9 +531,7 @@ struct i40e_vsi { u8 *rss_lut_user; /* User configured lookup table entries */ u16 max_frame; - u16 rx_hdr_len; u16 rx_buf_len; - u8 dtype; /* List of q_vectors allocated to this VSI */ struct i40e_q_vector **q_vectors; @@ -554,7 +549,7 @@ struct i40e_vsi { u16 num_queue_pairs; /* Used tx and rx pairs */ u16 num_desc; enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ - u16 vf_id; /* Virtual function ID for SRIOV VSIs */ + s16 vf_id; /* Virtual function ID for SRIOV VSIs */ struct i40e_tc_configuration tc_config; struct i40e_aqc_vsi_properties_data info; @@ -811,6 +806,7 @@ int i40e_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid); #endif int i40e_open(struct net_device *netdev); +int i40e_close(struct net_device *netdev); int i40e_vsi_open(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); @@ -823,7 +819,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi); struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE -int i40e_close(struct net_device *netdev); int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, struct tc_to_netdev *tc); void i40e_netpoll(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index df8e2fd6a649..738b42a44f20 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -33,16 +33,6 @@ static void i40e_resume_aq(struct i40e_hw *hw); /** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) || - (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update)); -} - -/** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure * @@ -624,13 +614,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); - hw->aq.nvm_release_on_done = false; + hw->nvm_release_on_done = false; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - ret_code = i40e_aq_set_hmc_resource_profile(hw, - I40E_HMC_PROFILE_DEFAULT, - 0, - NULL); ret_code = 0; /* success! */ @@ -1023,26 +1009,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; - if (i40e_is_nvm_update_op(&e->desc)) { - if (hw->aq.nvm_release_on_done) { - i40e_release_nvm(hw); - hw->aq.nvm_release_on_done = false; - } - - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - break; - - case I40E_NVMUPD_STATE_WRITE_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; - break; - - default: - break; - } - } - + i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode)); clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index 12fbbddea299..d92aad38afdc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -97,7 +97,6 @@ struct i40e_adminq_info { u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ struct mutex arq_mutex; /* Receive queue lock */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 8d5c65ab6267..11cf1a5ebccf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -78,17 +78,17 @@ struct i40e_aq_desc { #define I40E_AQ_FLAG_EI_SHIFT 14 #define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -205,10 +205,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, @@ -429,6 +425,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 #define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 @@ -1585,27 +1582,6 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F - /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ @@ -1652,11 +1628,11 @@ enum i40e_aq_phy_type { enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT) }; struct i40e_aqc_module_desc { @@ -1857,7 +1833,10 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +/* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 +/* Disable link manageability on all ports */ +#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20 u8 reserved[15]; }; @@ -1927,9 +1906,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) + BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) #define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 @@ -2226,13 +2205,11 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp); */ struct i40e_aqc_lldp_set_local_mib { #define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0 -#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) -#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \ - SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0 #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1) -#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \ - SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) +#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \ + BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT) #define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1 u8 type; u8 reserved0; @@ -2250,7 +2227,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib); struct i40e_aqc_lldp_stop_start_specific_agent { #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 #define I40E_AQC_START_SPECIFIC_AGENT_MASK \ - (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) + BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; @@ -2303,7 +2280,7 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) @@ -2323,14 +2300,13 @@ struct i40e_aqc_get_set_rss_key_data { I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h index bf6b453d93a1..a4601d97fb24 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.h +++ b/drivers/net/ethernet/intel/i40e/i40e_client.h @@ -217,7 +217,7 @@ struct i40e_client { #define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0) #define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2) enum i40e_client_type type; - struct i40e_client_ops *ops; /* client ops provided by the client */ + const struct i40e_client_ops *ops; /* client ops provided by the client */ }; static inline bool i40e_client_is_registered(struct i40e_client *client) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 4596294c2ab1..422b41d61c9a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -60,6 +60,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; default: @@ -694,7 +696,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = { /* Non Tunneled IPv6 */ I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), I40E_PTT_UNUSED_ENTRY(91), I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), @@ -1901,13 +1903,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, * * Reset the external PHY. **/ -enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, - struct i40e_asq_cmd_details *cmd_details) +i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; struct i40e_aqc_set_phy_debug *cmd = (struct i40e_aqc_set_phy_debug *)&desc.params.raw; - enum i40e_status_code status; + i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_phy_debug); @@ -1970,10 +1972,12 @@ aq_add_vsi_exit: * @seid: vsi number * @set: set unicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL + * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc **/ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -1986,8 +1990,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) + if (rx_only_promisc && + (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1))) flags |= I40E_AQC_SET_VSI_PROMISC_TX; } @@ -2037,6 +2042,76 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, } /** + * i40e_aq_set_vsi_mc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST; + + cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST); + cmd->seid = cpu_to_le16(seid); + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_set_vsi_uc_promisc_on_vlan + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + enum i40e_status_code status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + + cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + cmd->seid = cpu_to_le16(seid); + cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_set_vsi_broadcast * @hw: pointer to the hw struct * @seid: vsi number @@ -2157,6 +2232,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_aq_desc desc; struct i40e_aqc_add_get_update_vsi *cmd = (struct i40e_aqc_add_get_update_vsi *)&desc.params.raw; + struct i40e_aqc_add_get_update_vsi_completion *resp = + (struct i40e_aqc_add_get_update_vsi_completion *) + &desc.params.raw; i40e_status status; i40e_fill_default_direct_cmd_desc(&desc, @@ -2168,6 +2246,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info, sizeof(vsi_ctx->info), cmd_details); + vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + return status; } @@ -2205,6 +2286,35 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, } /** + * i40e_aq_set_switch_config + * @hw: pointer to the hardware structure + * @flags: bit flag values to set + * @valid_flags: which bit flags to set + * @cmd_details: pointer to command details structure or NULL + * + * Set switch configuration bits + **/ +enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, + u16 valid_flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_switch_config *scfg = + (struct i40e_aqc_set_switch_config *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_switch_config); + scfg->flags = cpu_to_le16(flags); + scfg->valid_flags = cpu_to_le16(valid_flags); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_get_firmware_version * @hw: pointer to the hw struct * @fw_major_version: firmware major version @@ -2660,10 +2770,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 *rules_used, u16 *rules_free) { /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ - if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) { - if (!rule_id) - return I40E_ERR_PARAM; - } else { + if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { /* count and mr_list shall be valid for rule_type INGRESS VLAN * mirroring. For other rule_type, count and rule_type should * not matter. @@ -2780,36 +2887,6 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw, } /** - * i40e_aq_set_hmc_resource_profile - * @hw: pointer to the hw struct - * @profile: type of profile the HMC is to be set as - * @pe_vf_enabled_count: the number of PE enabled VFs the system has - * @cmd_details: pointer to command details structure or NULL - * - * set the HMC profile of the device. - **/ -i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, - enum i40e_aq_hmc_profile profile, - u8 pe_vf_enabled_count, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aq_get_set_hmc_resource_profile *cmd = - (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; - i40e_status status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_hmc_resource_profile); - - cmd->pm_profile = (u8)profile; - cmd->pe_vf_enabled = pe_vf_enabled_count; - - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** * i40e_aq_request_resource * @hw: pointer to the hw struct * @resource: resource id @@ -3073,6 +3150,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, break; case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; + i40e_debug(hw, I40E_DEBUG_INIT, + "HW Capability: MSIX vector count = %d\n", + p->num_msix_vectors); break; case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; @@ -3128,6 +3208,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; break; + case I40E_AQ_CAP_ID_NVM_MGMT: + if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) + p->sec_rev_disabled = true; + if (number & I40E_NVM_MGMT_UPDATE_DISABLED) + p->update_disabled = true; + break; default: break; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 0c97733d253c..e6af8c8d7019 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", (unsigned long int)nd->vlan_features); } - if (vsi->active_vlans) - dev_info(&pf->pdev->dev, - " vlgrp: & = %p\n", vsi->active_vlans); + dev_info(&pf->pdev->dev, + " vlgrp: & = %p\n", vsi->active_vlans); dev_info(&pf->pdev->dev, " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", vsi->state, vsi->flags, @@ -269,13 +268,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->queue_index, rx_ring->reg_idx); dev_info(&pf->pdev->dev, - " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", - i, rx_ring->rx_hdr_len, - rx_ring->rx_buf_len, - rx_ring->dtype); + " rx_rings[%i]: rx_buf_len = %d\n", + i, rx_ring->rx_buf_len); dev_info(&pf->pdev->dev, - " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, ring_is_ps_enabled(rx_ring), + " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, rx_ring->next_to_use, rx_ring->next_to_clean, rx_ring->ring_active); @@ -327,9 +324,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) tx_ring->queue_index, tx_ring->reg_idx); dev_info(&pf->pdev->dev, - " tx_rings[%i]: dtype = %d\n", - i, tx_ring->dtype); - dev_info(&pf->pdev->dev, " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, tx_ring->next_to_use, @@ -366,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " work_limit = %d\n", vsi->work_limit); dev_info(&pf->pdev->dev, - " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", - vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); + " max_frame = %d, rx_buf_len = %d dtype = %d\n", + vsi->max_frame, vsi->rx_buf_len, 0); dev_info(&pf->pdev->dev, " num_q_vectors = %i, base_vector = %i\n", vsi->num_q_vectors, vsi->base_vector); @@ -592,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, " d[%03x] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); - } else if (sizeof(union i40e_rx_desc) == - sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(ring, i); - dev_info(&pf->pdev->dev, - " d[%03x] = 0x%016llx 0x%016llx\n", - i, rxd->read.pkt_addr, - rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, @@ -620,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); - } else if (sizeof(union i40e_rx_desc) == - sizeof(union i40e_16byte_rx_desc)) { - rxd = I40E_RX_DESC(ring, desc_n); - dev_info(&pf->pdev->dev, - "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", - vsi_seid, ring_id, desc_n, - rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 99257fcd1ef4..d701861c6e1e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -44,6 +44,8 @@ #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 784b1659457a..5e8d84ff7d5f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -230,12 +230,22 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = { #define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN) +static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = { + "MFP", + "LinkPolling", + "flow-director-atr", + "veb-stats", + "hw-atr-eviction", + "vf-true-promisc-support", +}; + +#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl) + static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "NPAR", "LinkPolling", "flow-director-atr", "veb-stats", - "packet-split", "hw-atr-eviction", }; @@ -252,6 +262,110 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) } /** + * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes + * @phy_types: PHY types to convert + * @supported: pointer to the ethtool supported variable to fill in + * @advertising: pointer to the ethtool advertising variable to fill in + * + **/ +static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, + u32 *advertising) +{ + enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types; + + *supported = 0x0; + *advertising = 0x0; + + if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + *supported |= SUPPORTED_100baseT_Full; + *advertising |= ADVERTISED_100baseT_Full; + } + } + if (phy_types & I40E_CAP_PHY_TYPE_XAUI || + phy_types & I40E_CAP_PHY_TYPE_XFI || + phy_types & I40E_CAP_PHY_TYPE_SFI || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) + *supported |= SUPPORTED_10000baseT_Full; + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_10000baseT_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_10000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || + phy_types & I40E_CAP_PHY_TYPE_XLPPI || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) + *supported |= SUPPORTED_40000baseCR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_40000baseCR4_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_40000baseCR4_Full; + } + if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && + !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_100baseT_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_100baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { + *supported |= SUPPORTED_Autoneg | + SUPPORTED_1000baseT_Full; + *advertising |= ADVERTISED_Autoneg | + ADVERTISED_1000baseT_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) + *supported |= SUPPORTED_40000baseSR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) + *supported |= SUPPORTED_40000baseLR4_Full; + if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { + *supported |= SUPPORTED_40000baseKR4_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_40000baseKR4_Full | + ADVERTISED_Autoneg; + } + if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { + *supported |= SUPPORTED_20000baseKR2_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_20000baseKR2_Full | + ADVERTISED_Autoneg; + } + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { + *supported |= SUPPORTED_10000baseKR_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_10000baseKR_Full | + ADVERTISED_Autoneg; + } + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { + *supported |= SUPPORTED_10000baseKX4_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_10000baseKX4_Full | + ADVERTISED_Autoneg; + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { + *supported |= SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg; + *advertising |= ADVERTISED_1000baseKX_Full | + ADVERTISED_Autoneg; + } +} + +/** * i40e_get_settings_link_up - Get the Link settings for when link is up * @hw: hw structure * @ecmd: ethtool command to fill in @@ -265,6 +379,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, { struct i40e_link_status *hw_link_info = &hw->phy.link_info; u32 link_speed = hw_link_info->link_speed; + u32 e_advertising = 0x0; + u32 e_supported = 0x0; /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { @@ -305,14 +421,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, break; case I40E_PHY_TYPE_10GBASE_T: case I40E_PHY_TYPE_1000BASE_T: + case I40E_PHY_TYPE_100BASE_TX: ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full; + SUPPORTED_1000baseT_Full | + SUPPORTED_100baseT_Full; ecmd->advertising = ADVERTISED_Autoneg; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) ecmd->advertising |= ADVERTISED_10000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) + ecmd->advertising |= ADVERTISED_100baseT_Full; break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ecmd->supported = SUPPORTED_Autoneg | @@ -320,12 +440,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ecmd->advertising = ADVERTISED_Autoneg | ADVERTISED_1000baseT_Full; break; - case I40E_PHY_TYPE_100BASE_TX: - ecmd->supported = SUPPORTED_Autoneg | - SUPPORTED_100baseT_Full; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - ecmd->advertising |= ADVERTISED_100baseT_Full; - break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ecmd->supported = SUPPORTED_Autoneg | @@ -352,14 +466,23 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ecmd->advertising |= ADVERTISED_100baseT_Full; } break; - /* Backplane is set based on supported phy types in get_settings - * so don't set anything here but don't warn either - */ case I40E_PHY_TYPE_40GBASE_KR4: case I40E_PHY_TYPE_20GBASE_KR2: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_1000BASE_KX: + ecmd->supported |= SUPPORTED_40000baseKR4_Full | + SUPPORTED_20000baseKR2_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_10000baseKX4_Full | + SUPPORTED_1000baseKX_Full | + SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_40000baseKR4_Full | + ADVERTISED_20000baseKR2_Full | + ADVERTISED_10000baseKR_Full | + ADVERTISED_10000baseKX4_Full | + ADVERTISED_1000baseKX_Full | + ADVERTISED_Autoneg; break; default: /* if we got here and link is up something bad is afoot */ @@ -367,6 +490,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, hw_link_info->phy_type); } + /* Now that we've worked out everything that could be supported by the + * current PHY type, get what is supported by the NVM and them to + * get what is truly supported + */ + i40e_phy_type_to_ethtool(pf, &e_supported, + &e_advertising); + + ecmd->supported = ecmd->supported & e_supported; + ecmd->advertising = ecmd->advertising & e_advertising; + /* Set speed and duplex */ switch (link_speed) { case I40E_LINK_SPEED_40GB: @@ -401,74 +534,11 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, struct ethtool_cmd *ecmd, struct i40e_pf *pf) { - enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types; - /* link is down and the driver needs to fall back on * supported phy types to figure out what info to display */ - ecmd->supported = 0x0; - ecmd->advertising = 0x0; - if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; - if (pf->hw.mac.type == I40E_MAC_X722) { - ecmd->supported |= SUPPORTED_100baseT_Full; - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { - ecmd->supported |= SUPPORTED_100baseT_Full; - ecmd->advertising |= ADVERTISED_100baseT_Full; - } - } - } - if (phy_types & I40E_CAP_PHY_TYPE_XAUI || - phy_types & I40E_CAP_PHY_TYPE_XFI || - phy_types & I40E_CAP_PHY_TYPE_SFI || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) - ecmd->supported |= SUPPORTED_10000baseT_Full; - if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; - } - if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || - phy_types & I40E_CAP_PHY_TYPE_XLPPI || - phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) - ecmd->supported |= SUPPORTED_40000baseCR4_Full; - if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || - phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_40000baseCR4_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_40000baseCR4_Full; - } - if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) && - !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_100baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_100baseT_Full; - } - if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { - ecmd->supported |= SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; - ecmd->advertising |= ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; - } - if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) - ecmd->supported |= SUPPORTED_40000baseSR4_Full; - if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) - ecmd->supported |= SUPPORTED_40000baseLR4_Full; + i40e_phy_type_to_ethtool(pf, &ecmd->supported, + &ecmd->advertising); /* With no link speed and duplex are unknown */ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); @@ -497,38 +567,6 @@ static int i40e_get_settings(struct net_device *netdev, i40e_get_settings_link_down(hw, ecmd, pf); /* Now set the settings that don't rely on link being up/down */ - - /* For backplane, supported and advertised are only reliant on the - * phy types the NVM specifies are supported. - */ - if (hw->device_id == I40E_DEV_ID_KX_B || - hw->device_id == I40E_DEV_ID_KX_C || - hw->device_id == I40E_DEV_ID_20G_KR2 || - hw->device_id == I40E_DEV_ID_20G_KR2_A) { - ecmd->supported = SUPPORTED_Autoneg; - ecmd->advertising = ADVERTISED_Autoneg; - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { - ecmd->supported |= SUPPORTED_40000baseKR4_Full; - ecmd->advertising |= ADVERTISED_40000baseKR4_Full; - } - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { - ecmd->supported |= SUPPORTED_20000baseKR2_Full; - ecmd->advertising |= ADVERTISED_20000baseKR2_Full; - } - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { - ecmd->supported |= SUPPORTED_10000baseKR_Full; - ecmd->advertising |= ADVERTISED_10000baseKR_Full; - } - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { - ecmd->supported |= SUPPORTED_10000baseKX4_Full; - ecmd->advertising |= ADVERTISED_10000baseKX4_Full; - } - if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { - ecmd->supported |= SUPPORTED_1000baseKX_Full; - ecmd->advertising |= ADVERTISED_1000baseKX_Full; - } - } - /* Set autoneg settings */ ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); @@ -1143,6 +1181,10 @@ static void i40e_get_drvinfo(struct net_device *netdev, sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(pf->pdev), sizeof(drvinfo->bus_info)); + if (pf->hw.pf_id == 0) + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN; + else + drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; } static void i40e_get_ringparam(struct net_device *netdev, @@ -1259,6 +1301,13 @@ static int i40e_set_ringparam(struct net_device *netdev, } for (i = 0; i < vsi->num_queue_pairs; i++) { + /* this is to allow wr32 to have something to write to + * during early allocation of Rx buffers + */ + u32 __iomem faketail = 0; + struct i40e_ring *ring; + u16 unused; + /* clone ring and setup updated count */ rx_rings[i] = *vsi->rx_rings[i]; rx_rings[i].count = new_rx_count; @@ -1267,12 +1316,22 @@ static int i40e_set_ringparam(struct net_device *netdev, */ rx_rings[i].desc = NULL; rx_rings[i].rx_bi = NULL; + rx_rings[i].tail = (u8 __iomem *)&faketail; err = i40e_setup_rx_descriptors(&rx_rings[i]); + if (err) + goto rx_unwind; + + /* now allocate the Rx buffers to make sure the OS + * has enough memory, any failure here means abort + */ + ring = &rx_rings[i]; + unused = I40E_DESC_UNUSED(ring); + err = i40e_alloc_rx_buffers(ring, unused); +rx_unwind: if (err) { - while (i) { - i--; + do { i40e_free_rx_resources(&rx_rings[i]); - } + } while (i--); kfree(rx_rings); rx_rings = NULL; @@ -1298,6 +1357,17 @@ static int i40e_set_ringparam(struct net_device *netdev, if (rx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) { i40e_free_rx_resources(vsi->rx_rings[i]); + /* get the real tail offset */ + rx_rings[i].tail = vsi->rx_rings[i]->tail; + /* this is to fake out the allocation routine + * into thinking it has to realloc everything + * but the recycling logic will let us re-use + * the buffers allocated above + */ + rx_rings[i].next_to_use = 0; + rx_rings[i].next_to_clean = 0; + rx_rings[i].next_to_alloc = 0; + /* do a struct copy */ *vsi->rx_rings[i] = rx_rings[i]; } kfree(rx_rings); @@ -1342,7 +1412,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) return I40E_VSI_STATS_LEN(netdev); } case ETH_SS_PRIV_FLAGS: - return I40E_PRIV_FLAGS_STR_LEN; + if (pf->hw.pf_id == 0) + return I40E_PRIV_FLAGS_GL_STR_LEN; + else + return I40E_PRIV_FLAGS_STR_LEN; default: return -EOPNOTSUPP; } @@ -1540,10 +1613,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, /* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */ break; case ETH_SS_PRIV_FLAGS: - for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { - memcpy(data, i40e_priv_flags_strings[i], - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; + if (pf->hw.pf_id == 0) { + for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings_gl[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + } else { + for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40e_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } } break; default: @@ -1714,7 +1795,7 @@ static void i40e_diag_test(struct net_device *netdev, /* If the device is online then take it offline */ if (if_running) /* indicate we're in test mode */ - dev_close(netdev); + i40e_close(netdev); else /* This reset does not affect link - if it is * changed to a type of reset that does affect @@ -1743,7 +1824,7 @@ static void i40e_diag_test(struct net_device *netdev, i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); if (if_running) - dev_open(netdev); + i40e_open(netdev); } else { /* Online tests */ netif_info(pf, drv, netdev, "online testing starting\n"); @@ -1837,7 +1918,7 @@ static int i40e_set_phys_id(struct net_device *netdev, if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) { pf->led_status = i40e_led_get(hw); } else { - i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL); + i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); ret = i40e_led_get_phy(hw, &temp_status, &pf->phy_led_val); pf->led_status = temp_status; @@ -2490,7 +2571,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, if (!vsi) return -EINVAL; - pf = vsi->back; if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) @@ -2548,15 +2628,18 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi, input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; if (ntohl(fsp->m_ext.data[1])) { - if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) { - netif_info(pf, drv, vsi->netdev, "Invalid VF id\n"); + vf_id = ntohl(fsp->h_ext.data[1]); + if (vf_id >= pf->num_alloc_vfs) { + netif_info(pf, drv, vsi->netdev, + "Invalid VF id %d\n", vf_id); goto free_input; } - vf_id = ntohl(fsp->h_ext.data[1]); /* Find vsi id from vf id and override dest vsi */ input->dest_vsi = pf->vf[vf_id].lan_vsi_id; if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { - netif_info(pf, drv, vsi->netdev, "Invalid queue id\n"); + netif_info(pf, drv, vsi->netdev, + "Invalid queue id %d for VF %d\n", + input->q_index, vf_id); goto free_input; } } @@ -2803,18 +2886,18 @@ static u32 i40e_get_priv_flags(struct net_device *dev) struct i40e_pf *pf = vsi->back; u32 ret_flags = 0; - ret_flags |= pf->hw.func_caps.npar_enable ? - I40E_PRIV_FLAGS_NPAR_FLAG : 0; ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ? I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0; ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ? I40E_PRIV_FLAGS_FD_ATR : 0; ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ? I40E_PRIV_FLAGS_VEB_STATS : 0; - ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ? - I40E_PRIV_FLAGS_PS : 0; ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; + if (pf->hw.pf_id == 0) { + ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ? + I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0; + } return ret_flags; } @@ -2829,27 +2912,13 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + u16 sw_flags = 0, valid_flags = 0; bool reset_required = false; + bool promisc_change = false; + int ret; /* NOTE: MFP is not settable */ - /* allow the user to control the method of receive - * buffer DMA, whether the packet is split at header - * boundaries into two separate buffers. In some cases - * one routine or the other will perform better. - */ - if ((flags & I40E_PRIV_FLAGS_PS) && - !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) { - pf->flags |= I40E_FLAG_RX_PS_ENABLED; - pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED; - reset_required = true; - } else if (!(flags & I40E_PRIV_FLAGS_PS) && - (pf->flags & I40E_FLAG_RX_PS_ENABLED)) { - pf->flags &= ~I40E_FLAG_RX_PS_ENABLED; - pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; - reset_required = true; - } - if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG) pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED; else @@ -2876,6 +2945,33 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) reset_required = true; } + if (pf->hw.pf_id == 0) { + if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) && + !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { + pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT; + promisc_change = true; + } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) && + (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { + pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT; + promisc_change = true; + } + } + if (promisc_change) { + if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) + sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, + NULL); + if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { + dev_info(&pf->pdev->dev, + "couldn't set switch config bits, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + /* not a fatal problem, just keep going */ + } + } + if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 8ad162c16f61..58e6c1570335 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -38,16 +38,6 @@ #include "i40e_fcoe.h" /** - * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE - * @ptype: the packet type field from rx descriptor write-back - **/ -static inline bool i40e_rx_is_fcoe(u16 ptype) -{ - return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && - (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); -} - -/** * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF * @sof: the FCoE start of frame delimiter **/ @@ -1371,7 +1361,7 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) goto out_drop; - count = TXD_USE_COUNT(skb->len); + count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c index 5ebe12d56ebf..a7c7b1d9b7c8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c +++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c @@ -49,7 +49,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, struct i40e_hmc_sd_entry *sd_entry; bool dma_mem_alloc_done = false; struct i40e_dma_mem mem; - i40e_status ret_code; + i40e_status ret_code = I40E_SUCCESS; u64 alloc_len; if (NULL == hmc_info->sd_table.sd_entry) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 67006431726a..1cd0ebf7520a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -45,8 +45,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 25 +#define DRV_VERSION_MINOR 5 +#define DRV_VERSION_BUILD 16 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -90,6 +90,8 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0}, /* required last entry */ @@ -326,7 +328,7 @@ static void i40e_tx_timeout(struct net_device *netdev) unsigned long trans_start; q = netdev_get_tx_queue(netdev, i); - trans_start = q->trans_start ? : netdev->trans_start; + trans_start = q->trans_start; if (netif_xmit_stopped(q) && time_after(jiffies, (trans_start + netdev->watchdog_timeo))) { @@ -396,24 +398,6 @@ static void i40e_tx_timeout(struct net_device *netdev) } /** - * i40e_release_rx_desc - Store the new tail and head values - * @rx_ring: ring to bump - * @val: new head index - **/ -static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) -{ - rx_ring->next_to_use = val; - - /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - writel(val, rx_ring->tail); -} - -/** * i40e_get_vsi_stats_struct - Get System Network Statistics * @vsi: the VSI we care about * @@ -2097,6 +2081,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } } + /* if the VF is not trusted do not do promisc */ + if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { + clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state); + goto out; + } + /* check for changes in promiscuous modes */ if (changed_flags & IFF_ALLMULTI) { bool cur_multipromisc; @@ -2138,7 +2128,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) aq_ret = i40e_aq_set_vsi_unicast_promiscuous( &vsi->back->hw, vsi->seid, - cur_promisc, NULL); + cur_promisc, NULL, + true); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, @@ -2865,34 +2856,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) memset(&rx_ctx, 0, sizeof(rx_ctx)); ring->rx_buf_len = vsi->rx_buf_len; - ring->rx_hdr_len = vsi->rx_hdr_len; rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT; - rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT; rx_ctx.base = (ring->dma / 128); rx_ctx.qlen = ring->count; - if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) { - set_ring_16byte_desc_enabled(ring); - rx_ctx.dsize = 0; - } else { - rx_ctx.dsize = 1; - } + /* use 32 byte descriptors */ + rx_ctx.dsize = 1; - rx_ctx.dtype = vsi->dtype; - if (vsi->dtype) { - set_ring_ps_enabled(ring); - rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 | - I40E_RX_SPLIT_IP | - I40E_RX_SPLIT_TCP_UDP | - I40E_RX_SPLIT_SCTP; - } else { - rx_ctx.hsplit_0 = 0; - } + /* descriptor type is always zero + * rx_ctx.dtype = 0; + */ + rx_ctx.hsplit_0 = 0; - rx_ctx.rxmax = min_t(u16, vsi->max_frame, - (chain_len * ring->rx_buf_len)); + rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); if (hw->revision_id == 0) rx_ctx.lrxqthresh = 0; else @@ -2929,12 +2907,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - if (ring_is_ps_enabled(ring)) { - i40e_alloc_rx_headers(ring); - i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring)); - } else { - i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring)); - } + i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); return 0; } @@ -2973,40 +2946,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) else vsi->max_frame = I40E_RXBUFFER_2048; - /* figure out correct receive buffer length */ - switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED | - I40E_FLAG_RX_PS_ENABLED)) { - case I40E_FLAG_RX_1BUF_ENABLED: - vsi->rx_hdr_len = 0; - vsi->rx_buf_len = vsi->max_frame; - vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; - break; - case I40E_FLAG_RX_PS_ENABLED: - vsi->rx_hdr_len = I40E_RX_HDR_SIZE; - vsi->rx_buf_len = I40E_RXBUFFER_2048; - vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT; - break; - default: - vsi->rx_hdr_len = I40E_RX_HDR_SIZE; - vsi->rx_buf_len = I40E_RXBUFFER_2048; - vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS; - break; - } + vsi->rx_buf_len = I40E_RXBUFFER_2048; #ifdef I40E_FCOE /* setup rx buffer for FCoE */ if ((vsi->type == I40E_VSI_FCOE) && (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) { - vsi->rx_hdr_len = 0; vsi->rx_buf_len = I40E_RXBUFFER_3072; vsi->max_frame = I40E_RXBUFFER_3072; - vsi->dtype = I40E_RX_DTYPE_NO_SPLIT; } #endif /* I40E_FCOE */ /* round up for the chip's needs */ - vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len, - BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT)); vsi->rx_buf_len = ALIGN(vsi->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -4164,7 +4115,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) int i; i40e_stop_misc_vector(pf); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { synchronize_irq(pf->msix_entries[0].vector); free_irq(pf->msix_entries[0].vector, pf); } @@ -5509,11 +5460,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) * * Returns 0, this is not allowed to fail **/ -#ifdef I40E_FCOE int i40e_close(struct net_device *netdev) -#else -static int i40e_close(struct net_device *netdev) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5538,8 +5485,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) WARN_ON(in_interrupt()); - if (i40e_check_asq_alive(&pf->hw)) - i40e_vc_notify_reset(pf); /* do the biggest reset indicated */ if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) { @@ -6377,7 +6322,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) break; default: dev_info(&pf->pdev->dev, - "ARQ Error: Unknown event 0x%04x received\n", + "ARQ: Unknown event 0x%04x ignored\n", opcode); break; } @@ -6742,6 +6687,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf) clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) return; + if (i40e_check_asq_alive(&pf->hw)) + i40e_vc_notify_reset(pf); dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); @@ -6862,6 +6809,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) */ ret = i40e_aq_set_phy_int_mask(&pf->hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", @@ -7525,10 +7473,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) rx_ring->count = vsi->num_desc; rx_ring->size = 0; rx_ring->dcb_tc = 0; - if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) - set_ring_16byte_desc_enabled(rx_ring); - else - clear_ring_16byte_desc_enabled(rx_ring); rx_ring->rx_itr_setting = pf->rx_itr_default; vsi->rx_rings[i] = rx_ring; } @@ -8084,24 +8028,45 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, { struct i40e_pf *pf = vsi->back; struct i40e_hw *hw = &pf->hw; + u16 vf_id = vsi->vf_id; u8 i; /* Fill out hash function seed */ if (seed) { u32 *seed_dw = (u32 *)seed; - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + if (vsi->type == I40E_VSI_MAIN) { + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), + seed_dw[i]); + } else if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, + I40E_VFQF_HKEY1(i, vf_id), + seed_dw[i]); + } else { + dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); + } } if (lut) { u32 *lut_dw = (u32 *)lut; - if (lut_size != I40E_HLUT_ARRAY_SIZE) - return -EINVAL; - - for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); + if (vsi->type == I40E_VSI_MAIN) { + if (lut_size != I40E_HLUT_ARRAY_SIZE) + return -EINVAL; + for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) + wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]); + } else if (vsi->type == I40E_VSI_SRIOV) { + if (lut_size != I40E_VF_HLUT_ARRAY_SIZE) + return -EINVAL; + for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) + i40e_write_rx_ctl(hw, + I40E_VFQF_HLUT1(i, vf_id), + lut_dw[i]); + } else { + dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); + } } i40e_flush(hw); @@ -8440,7 +8405,6 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)); - pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) { if (I40E_DEBUG_USER & debug) pf->hw.debug_mask = debug; @@ -8451,14 +8415,8 @@ static int i40e_sw_init(struct i40e_pf *pf) /* Set default capability flags */ pf->flags = I40E_FLAG_RX_CSUM_ENABLED | I40E_FLAG_MSI_ENABLED | - I40E_FLAG_LINK_POLLING_ENABLED | I40E_FLAG_MSIX_ENABLED; - if (iommu_present(&pci_bus_type)) - pf->flags |= I40E_FLAG_RX_PS_ENABLED; - else - pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; - /* Set default ITR */ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; @@ -8559,6 +8517,7 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_FLAG_NO_PCI_LINK_CHECK | I40E_FLAG_100M_SGMII_CAPABLE | I40E_FLAG_USE_SET_LLDP_MIB | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; @@ -9073,6 +9032,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_get_vf_config = i40e_ndo_get_vf_config, .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state, .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk, + .ndo_set_vf_trust = i40e_ndo_set_vf_trust, #if IS_ENABLED(CONFIG_VXLAN) .ndo_add_vxlan_port = i40e_add_vxlan_port, .ndo_del_vxlan_port = i40e_del_vxlan_port, @@ -9113,40 +9073,44 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | + netdev->hw_enc_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | 0; - netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_HIGHDMA | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO_ECN | - NETIF_F_TSO6 | - NETIF_F_RXCSUM | - NETIF_F_RXHASH | - 0; + if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)) + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + + /* record features VLANs can make use of */ + netdev->vlan_features |= netdev->hw_enc_features | + NETIF_F_TSO_MANGLEID; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - netdev->features |= NETIF_F_NTUPLE; - if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) - netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_features |= NETIF_F_NTUPLE; - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; + netdev->hw_features |= netdev->hw_enc_features | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; if (vsi->type == I40E_VSI_MAIN) { SET_NETDEV_DEV(netdev, &pf->pdev->dev); @@ -9162,6 +9126,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) I40E_VLAN_ANY, false, true); spin_unlock_bh(&vsi->mac_filter_list_lock); } + } else if ((pf->hw.aq.api_maj_ver > 1) || + ((pf->hw.aq.api_maj_ver == 1) && + (pf->hw.aq.api_min_ver > 4))) { + /* Supported in FW API version higher than 1.4 */ + pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; + pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } else { /* relate the VSI_VMDQ name to the VSI_MAIN name */ snprintf(netdev->name, IFNAMSIZ, "%sv%%d", @@ -9179,12 +9149,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); - /* vlan gets same features (except vlan offload) - * after any tweaks for specific VSI types - */ - netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; /* Setup netdev TC information */ @@ -9397,7 +9362,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= - I40E_AQ_VSI_QUE_OPT_TCP_ENA; + (I40E_AQ_VSI_QUE_OPT_TCP_ENA | + I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI); } ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); @@ -10443,6 +10409,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) **/ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) { + u16 flags = 0; int ret; /* find out what's out there already */ @@ -10456,6 +10423,32 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) } i40e_pf_reset_stats(pf); + /* set the switch config bit for the whole device to + * support limited promisc or true promisc + * when user requests promisc. The default is limited + * promisc. + */ + + if ((pf->hw.pf_id == 0) && + !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) + flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + + if (pf->hw.pf_id == 0) { + u16 valid_flags; + + valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, + NULL); + if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { + dev_info(&pf->pdev->dev, + "couldn't set switch config bits, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + /* not a fatal problem, just keep going */ + } + } + /* first time setup */ if (pf->lan_vsi == I40E_NO_VSI || reinit) { struct i40e_vsi *vsi = NULL; @@ -10683,11 +10676,9 @@ static void i40e_print_features(struct i40e_pf *pf) #ifdef CONFIG_PCI_IOV i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif - i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s", + i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs, - pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); - + pf->vsi[pf->lan_vsi]->num_queue_pairs); if (pf->flags & I40E_FLAG_RSS_ENABLED) i += snprintf(&buf[i], REMAIN(i), " RSS"); if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) @@ -10826,6 +10817,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.func = PCI_FUNC(pdev->devfn); pf->instance = pfs_found; + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + if (debug != -1) { pf->msg_enable = pf->hw.debug_mask; pf->msg_enable = debug; @@ -10871,12 +10868,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up a default setting for link flow control */ pf->hw.fc.requested_mode = I40E_FC_NONE; - /* set up the locks for the AQ, do this only once in probe - * and destroy them only once in remove - */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - err = i40e_init_adminq(hw); if (err) { if (err == I40E_ERR_FIRMWARE_API_VERSION) @@ -11068,6 +11059,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) */ err = i40e_aq_set_phy_int_mask(&pf->hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MEDIA_NA | I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", @@ -11269,7 +11261,6 @@ err_init_lan_hmc: kfree(pf->qp_pile); err_sw_init: err_adminq_setup: - (void)i40e_shutdown_adminq(hw); err_pf_reset: iounmap(hw->hw_addr); err_ioremap: @@ -11311,8 +11302,10 @@ static void i40e_remove(struct pci_dev *pdev) /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); - del_timer_sync(&pf->service_timer); - cancel_work_sync(&pf->service_task); + if (pf->service_timer.data) + del_timer_sync(&pf->service_timer); + if (pf->service_task.func) + cancel_work_sync(&pf->service_task); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 5730f8091e1b..954efe3118db 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -693,10 +693,10 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, - hw->aq.nvm_release_on_done, + hw->nvm_release_on_done, hw->nvm_wait_opcode, cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { @@ -710,7 +710,18 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, * going into the state machine */ if (upd_cmd == I40E_NVMUPD_STATUS) { + if (!cmd->data_size) { + *perrno = -EFAULT; + return I40E_ERR_BUF_TOO_SHORT; + } + bytes[0] = hw->nvmupd_state; + + if (cmd->data_size >= 4) { + bytes[1] = 0; + *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + } + return 0; } @@ -729,6 +740,14 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, case I40E_NVMUPD_STATE_INIT_WAIT: case I40E_NVMUPD_STATE_WRITE_WAIT: + /* if we need to stop waiting for an event, clear + * the wait info and return before doing anything else + */ + if (cmd->offset == 0xffff) { + i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); + return 0; + } + status = I40E_ERR_NOT_READY; *perrno = -EBUSY; break; @@ -799,7 +818,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, if (status) { i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -815,7 +835,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, if (status) { i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -828,10 +849,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); - if (status) + if (status) { i40e_release_nvm(hw); - else + } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } } break; @@ -849,7 +872,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw, -EIO; i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -940,8 +964,10 @@ retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); - if (!status) + if (!status) { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } break; case I40E_NVMUPD_WRITE_LCB: @@ -953,7 +979,8 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -967,6 +994,7 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; @@ -980,7 +1008,8 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -1030,6 +1059,37 @@ retry: } /** + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + **/ +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) +{ + if (opcode == hw->nvm_wait_opcode) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; + } + hw->nvm_wait_opcode = 0; + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } + } +} + +/** * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer @@ -1189,6 +1249,12 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw, *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } + /* should we wait for a followup event? */ + if (cmd->offset) { + hw->nvm_wait_opcode = cmd->offset; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index d51eee5bf79a..80403c6ee7f0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -130,9 +130,18 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 vsi_id, bool set_filter, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, + u16 seid, bool enable, + u16 vid, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, u16 seid, bool enable, struct i40e_asq_cmd_details *cmd_details); @@ -174,6 +183,10 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_aqc_get_switch_config_resp *buf, u16 buf_size, u16 *start_seid, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, + u16 flags, + u16 valid_flags, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, enum i40e_aq_resource_access_type access, @@ -228,10 +241,6 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, - enum i40e_aq_hmc_profile profile, - u8 pe_vf_enabled_count, - struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_bw, struct i40e_asq_cmd_details *cmd_details); @@ -308,6 +317,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, i40e_status i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *); +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 565ca7c835bc..ed39cbad24bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -158,9 +158,10 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now, then = ns_to_timespec64(delta); + struct timespec64 now, then; unsigned long flags; + then = ns_to_timespec64(delta); spin_lock_irqsave(&pf->tmreg_lock, flags); i40e_ptp_read(pf, &now); @@ -288,9 +289,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) rd32(hw, I40E_PRTTSYN_RXTIME_H(3)); pf->last_rx_ptp_check = jiffies; pf->rx_hwtstamp_cleared++; - dev_warn(&vsi->back->pdev->dev, - "%s: clearing Rx timestamp hang\n", - __func__); + WARN_ONCE(1, "Detected Rx timestamp register hang\n"); } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 084d0ab316b7..99a524db5560 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -636,19 +636,21 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) /** * i40e_clean_tx_irq - Reclaim resources after transmit completes - * @tx_ring: tx ring to clean - * @budget: how many cleans we're allowed + * @vsi: the VSI we care about + * @tx_ring: Tx ring to clean + * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; - unsigned int total_packets = 0; - unsigned int total_bytes = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = vsi->work_limit; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); @@ -678,7 +680,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) total_packets += tx_buf->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buf->skb); + napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -749,7 +751,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + !test_bit(__I40E_DOWN, &vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -767,7 +769,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + !test_bit(__I40E_DOWN, &vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -1022,7 +1024,6 @@ err: void i40e_clean_rx_ring(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; unsigned long bi_size; u16 i; @@ -1030,48 +1031,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; - if (ring_is_ps_enabled(rx_ring)) { - int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; - - rx_bi = &rx_ring->rx_bi[0]; - if (rx_bi->hdr_buf) { - dma_free_coherent(dev, - bufsz, - rx_bi->hdr_buf, - rx_bi->dma); - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = 0; - rx_bi->hdr_buf = NULL; - } - } - } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->dma) { - dma_unmap_single(dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - } + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->skb) { dev_kfree_skb(rx_bi->skb); rx_bi->skb = NULL; } - if (rx_bi->page) { - if (rx_bi->page_dma) { - dma_unmap_page(dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; - } - __free_page(rx_bi->page); - rx_bi->page = NULL; - rx_bi->page_offset = 0; - } + if (!rx_bi->page) + continue; + + dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(rx_bi->page, 0); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; } bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; @@ -1080,6 +1055,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -1104,37 +1080,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring) } /** - * i40e_alloc_rx_headers - allocate rx header buffers - * @rx_ring: ring to alloc buffers - * - * Allocate rx header buffers for the entire ring. As these are static, - * this is only called when setting up a new ring. - **/ -void i40e_alloc_rx_headers(struct i40e_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; - dma_addr_t dma; - void *buffer; - int buf_size; - int i; - - if (rx_ring->rx_bi[0].hdr_buf) - return; - /* Make sure the buffers don't cross cache line boundaries. */ - buf_size = ALIGN(rx_ring->rx_hdr_len, 256); - buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, - &dma, GFP_KERNEL); - if (!buffer) - return; - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = dma + (i * buf_size); - rx_bi->hdr_buf = buffer + (i * buf_size); - } -} - -/** * i40e_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -1155,9 +1100,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) - ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) - : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -1168,6 +1111,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) goto err; } + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -1186,6 +1130,10 @@ err: static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -1196,160 +1144,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace + * i40e_alloc_mapped_page - recycle or make a new page + * @rx_ring: ring to use + * @bi: rx_buffer struct to modify * - * Returns true if any errors on allocation + * Returns true if the page was successfully allocated or + * reused. **/ -bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) { - u16 i = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - const int current_node = numa_node_id(); + struct page *page = bi->page; + dma_addr_t dma; - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) - return false; + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } - if (bi->skb) /* desc is in use */ - goto no_buffers; + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - /* If we've been moved to a different NUMA node, release the - * page so we can get a new one on the current node. + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use */ - if (bi->page && page_to_nid(bi->page) != current_node) { - dma_unmap_page(rx_ring->dev, - bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else if (bi->page) { - rx_ring->rx_stats.page_reuse_count++; - } - - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - 0, - PAGE_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - bi->page_offset = 0; - goto no_buffers; - } - bi->page_offset = 0; - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = - cpu_to_le64(bi->page_dma + bi->page_offset); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - i++; - if (i == rx_ring->count) - i = 0; + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, 0); + rx_ring->rx_stats.alloc_page_failed++; + return false; } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; - return false; + return true; +} -no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + napi_gro_receive(&q_vector->napi, skb); } /** - * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * i40e_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * - * Returns true if any errors on allocation + * Returns false if all allocations were successful, true if any fail **/ -bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) { - u16 i = rx_ring->next_to_use; + u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; - struct sk_buff *skb; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; - skb = bi->skb; - - if (!skb) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - goto no_buffers; - } - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - bi->skb = skb; - } + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; - if (!bi->dma) { - bi->dma = dma_map_single(rx_ring->dev, - skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - rx_ring->rx_stats.alloc_buff_failed++; - bi->dma = 0; - dev_kfree_skb(bi->skb); - bi->skb = NULL; - goto no_buffers; - } - } + do { + if (!i40e_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc->read.hdr_addr = 0; - i++; - if (i == rx_ring->count) - i = 0; - } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.qword1.status_error_len = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); return false; no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); /* make sure to come back via polling to try again after * allocation failure @@ -1358,41 +1268,35 @@ no_buffers: } /** - * i40e_receive_skb - Send a completed packet up the stack - * @rx_ring: rx ring in play - * @skb: packet to send up - * @vlan_tag: vlan tag for packet - **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, - struct sk_buff *skb, u16 vlan_tag) -{ - struct i40e_q_vector *q_vector = rx_ring->q_vector; - - if (vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - - napi_gro_receive(&q_vector->napi, skb); -} - -/** * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified - * @rx_status: status value of last descriptor in packet - * @rx_error: error value of last descriptor in packet - * @rx_ptype: ptype value of last descriptor in packet + * @rx_desc: the receive descriptor + * + * skb->protocol must be set before this function is called **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, - u32 rx_status, - u32 rx_error, - u16 rx_ptype) + union i40e_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; + struct i40e_rx_ptype_decoded decoded; + bool ipv4, ipv6, tunnel = false; + u32 rx_error, rx_status; + u8 ptype; + u64 qword; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + /* Rx csum enabled and ip headers found? */ if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; @@ -1438,14 +1342,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * doesn't make it a hard requirement so if we have validated the * inner checksum report CHECKSUM_UNNECESSARY. */ - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | + I40E_RX_PTYPE_INNER_PROT_UDP | + I40E_RX_PTYPE_INNER_PROT_SCTP)) + tunnel = true; skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ipv4_tunnel || ipv6_tunnel; + skb->csum_level = tunnel ? 1 : 0; return; @@ -1459,7 +1362,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) +static inline int i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -1487,11 +1390,11 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, u8 rx_ptype) { u32 hash; - const __le64 rss_mask = + const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (ring->netdev->features & NETIF_F_RXHASH) + if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { @@ -1501,346 +1404,436 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, } /** - * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40e_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * @rx_ptype: the packet type decoded by hardware * - * Returns true if there's any budget left (e.g. the clean is finished) + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) +static inline +void i40e_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - u16 i = rx_ring->next_to_clean; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - bool failure = false; - u8 rx_ptype; - u64 qword; - u32 copysize; + u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT; - if (budget <= 0) - return 0; + if (unlikely(rsyn)) { + i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn); + rx_ring->last_rx_timestamp = jiffies; + } - do { - struct i40e_rx_buffer *rx_bi; - struct sk_buff *skb; - u16 vlan_tag; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - failure = failure || - i40e_alloc_rx_buffers_ps(rx_ring, - cleaned_count); - cleaned_count = 0; - } + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); - if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) - break; + i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. - */ - dma_rmb(); - /* sync header buffer for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - if (i40e_rx_is_programming_status(qword)) { - i40e_clean_programming_status(rx_ring, rx_desc); - I40E_RX_INCREMENT(rx_ring, i); - continue; - } - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - if (likely(!skb)) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - failure = true; - break; - } + skb_record_rx_queue(skb, rx_ring->queue_index); +} - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - } - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> - I40E_RXD_QW1_LENGTH_HBUF_SHIFT; - rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> - I40E_RXD_QW1_LENGTH_SPH_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); +/** + * i40e_pull_tail - i40e specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an i40e specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - /* sync half-page for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->page_dma, - rx_bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - prefetch(page_address(rx_bi->page) + rx_bi->page_offset); - rx_bi->skb = NULL; - cleaned_count++; - copysize = 0; - if (rx_hbo || rx_sph) { - int len; + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); - if (rx_hbo) - len = I40E_RX_HDR_SIZE; - else - len = rx_header_len; - memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); - } else if (skb->len == 0) { - int len; - unsigned char *va = page_address(rx_bi->page) + - rx_bi->page_offset; - - len = min(rx_packet_len, rx_ring->rx_hdr_len); - memcpy(__skb_put(skb, len), va, len); - copysize = len; - rx_packet_len -= len; - } - /* Get the rest of the data if this was a header split */ - if (rx_packet_len) { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset + copysize, - rx_packet_len, I40E_RXBUFFER_2048); - - /* If the page count is more than 2, then both halves - * of the page are used and we need to free it. Do it - * here instead of in the alloc code. Otherwise one - * of the half-pages might be released between now and - * then, and we wouldn't know which one to use. - * Don't call get_page and free_page since those are - * both expensive atomic operations that just change - * the refcount in opposite directions. Just give the - * page to the stack; he can have our refcount. - */ - if (page_count(rx_bi->page) > 2) { - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page = NULL; - rx_bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else { - get_page(rx_bi->page); - /* switch to the other half-page here; the - * allocation code programs the right addr - * into HW. If we haven't used this half-page, - * the address won't be changed, and HW can - * just use it next time through. - */ - rx_bi->page_offset ^= PAGE_SIZE / 2; - } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - } - I40E_RX_INCREMENT(rx_ring, i); + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - struct i40e_rx_buffer *next_buffer; + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} - next_buffer = &rx_ring->rx_bi[i]; - next_buffer->skb = skb; - rx_ring->rx_stats.non_eop_descs++; - continue; - } +/** + * i40e_cleanup_headers - Correct empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being fixed + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + i40e_pull_tail(rx_ring, skb); - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { - dev_kfree_skb_any(skb); - continue; - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + return false; +} - if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { - i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & - I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); - rx_ring->last_rx_timestamp = jiffies; - } +/** + * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_buff) +{ + struct i40e_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; + new_buff = &rx_ring->rx_bi[nta]; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; +} - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; -#ifdef I40E_FCOE - if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { - dev_kfree_skb_any(skb); - continue; - } +/** + * i40e_page_is_reserved - check if reuse is possible + * @page: page struct to check + */ +static inline bool i40e_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} + +/** + * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; +#if (PAGE_SIZE < 8192) + unsigned int truesize = I40E_RXBUFFER_2048; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; #endif - i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_desc->wb.qword1.status_error_len = 0; + /* will the data fit in the skb we allocated? if so, just + * copy it as it is pretty small anyway + */ + if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; - } while (likely(total_rx_packets < budget)); + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!i40e_page_is_reserved(page))) + return true; - return failure ? budget : total_rx_packets; + /* this page cannot be reused so discard it */ + __free_pages(page, 0); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(i40e_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + get_page(rx_buffer->page); + + return true; } /** - * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40e_fetch_rx_buffer - Allocate skb and populate it + * @rx_ring: rx descriptor ring to transact packets on + * @rx_desc: descriptor containing info written by hardware * - * Returns number of packets cleaned + * This function allocates an skb on the fly, and populates it with the page + * data from the current receive descriptor, taking care to set up the skb + * correctly, as well as handling calling the page recycle function if + * necessary. + */ +static inline +struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc) +{ + struct i40e_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + I40E_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } else { + rx_buffer->skb = NULL; + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * i40e_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. **/ -static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +static bool i40e_is_non_eop(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(I40E_RX_DESC(rx_ring, ntc)); + +#define staterrlen rx_desc->wb.qword1.status_error_len + if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) { + i40e_clean_programming_status(rx_ring, rx_desc); + rx_ring->rx_bi[ntc].skb = skb; + return true; + } + /* if we are the last buffer then there is nothing else to do */ +#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) + if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) + return false; + + /* place skb in next buffer to be received */ + rx_ring->rx_bi[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed + **/ +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - u16 rx_packet_len; bool failure = false; - u8 rx_ptype; - u64 qword; - u16 i; - do { - struct i40e_rx_buffer *rx_bi; + while (likely(total_rx_packets < budget)) { + union i40e_rx_desc *rx_desc; struct sk_buff *skb; + u32 rx_status; u16 vlan_tag; + u8 rx_ptype; + u64 qword; + /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { failure = failure || - i40e_alloc_rx_buffers_1buf(rx_ring, - cleaned_count); + i40e_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + I40E_RXD_QW1_STATUS_SHIFT; if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; + /* status_error_len will always be zero for unused descriptors + * because it's cleared in cleanup, and overlaps with hdr_addr + * which is always zero because packet split isn't used, if the + * hardware wrote DD then it will be non-zero + */ + if (!rx_desc->wb.qword1.status_error_len) + break; + /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * DD bit is set. */ dma_rmb(); - if (i40e_rx_is_programming_status(qword)) { - i40e_clean_programming_status(rx_ring, rx_desc); - I40E_RX_INCREMENT(rx_ring, i); - continue; - } - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - prefetch(skb->data); - - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + skb = i40e_fetch_rx_buffer(rx_ring, rx_desc); + if (!skb) + break; - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - rx_bi->skb = NULL; cleaned_count++; - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - skb_put(skb, rx_packet_len); - dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - - I40E_RX_INCREMENT(rx_ring, i); - - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - rx_ring->rx_stats.non_eop_descs++; + if (i40e_is_non_eop(rx_ring, rx_desc, skb)) continue; - } - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { + /* ERR_MASK will only have valid bits if EOP set, and + * what we are doing here is actually checking + * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * the error field + */ + if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); continue; } - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { - i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & - I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT); - rx_ring->last_rx_timestamp = jiffies; - } + if (i40e_cleanup_headers(rx_ring, skb)) + continue; /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; - - skb->protocol = eth_type_trans(skb, rx_ring->netdev); - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); + /* populate checksum, VLAN, and protocol */ + i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; #ifdef I40E_FCOE - if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { + if (unlikely( + i40e_rx_is_fcoe(rx_ptype) && + !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) { dev_kfree_skb_any(skb); continue; } #endif + + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_desc->wb.qword1.status_error_len = 0; - } while (likely(total_rx_packets < budget)); + /* update budget accounting */ + total_rx_packets++; + } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -1849,6 +1842,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : total_rx_packets; } @@ -1975,9 +1969,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete = clean_complete && - i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb = arm_wb || ring->arm_wb; + if (!i40e_clean_tx_irq(vsi, ring, budget)) { + clean_complete = false; + continue; + } + arm_wb |= ring->arm_wb; ring->arm_wb = false; } @@ -1991,16 +1987,12 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned; - - if (ring_is_ps_enabled(ring)) - cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); - else - cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; - /* if we didn't clean as many as budgeted, we must be done */ - clean_complete = clean_complete && (budget_per_ring > cleaned); + /* if we clean as many as budgeted, we must not be done */ + if (cleaned >= budget_per_ring) + clean_complete = false; } /* If work not completed, return budget and polling will return */ @@ -2247,15 +2239,13 @@ out: /** * i40e_tso - set up the tso context descriptor - * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u64 cd_cmd, cd_tso_len, cd_mss; union { @@ -2292,16 +2282,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, ip.v6->payload_len = 0; } - if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_IPIP | + SKB_GSO_SIT | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + /* determine offset of outer transport header */ l4_offset = l4.hdr - skb->data; /* remove payload length from outer checksum */ - paylen = (__force u16)l4.udp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); - l4.udp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.udp->check, htonl(paylen)); } /* reset pointers to inner headers */ @@ -2321,9 +2317,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, l4_offset = l4.hdr - skb->data; /* remove payload length from inner checksum */ - paylen = (__force u16)l4.tcp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); - l4.tcp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; @@ -2405,7 +2400,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *hdr; } l4; unsigned char *exthdr; - u32 offset, cmd = 0, tunnel = 0; + u32 offset, cmd = 0; __be16 frag_off; u8 l4_proto = 0; @@ -2419,6 +2414,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { + u32 tunnel = 0; /* define outer network header type */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? @@ -2436,13 +2432,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } - /* compute outer L3 header size */ - tunnel |= ((l4.hdr - ip.hdr) / 4) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; - - /* switch IP header pointer from outer to inner header */ - ip.hdr = skb_inner_network_header(skb); - /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: @@ -2453,6 +2442,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; default: if (*tx_flags & I40E_TX_FLAGS_TSO) return -1; @@ -2461,12 +2455,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, return 0; } + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; /* indicate if we need to offload outer UDP header */ if ((*tx_flags & I40E_TX_FLAGS_TSO) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; @@ -2594,35 +2596,34 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** - * __i40e_chk_linearize - Check if there are more than 8 fragments per packet + * __i40e_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * - * Note: Our HW can't scatter-gather more than 8 fragments to build - * a packet on the wire and so we need to figure out the cases where we - * need to linearize the skb. + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire + * and so we need to figure out the cases where we need to linearize the skb. + * + * For TSO we need to count the TSO header and segment payload separately. + * As such we need to check cases where we have 7 fragments or more as we + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for + * the segment payload in the first descriptor, and another 7 for the + * fragments. **/ bool __i40e_chk_linearize(struct sk_buff *skb) { const struct skb_frag_struct *frag, *stale; - int gso_size, nr_frags, sum; + int nr_frags, sum; - /* check to see if TSO is enabled, if so we may get a repreive */ - gso_size = skb_shinfo(skb)->gso_size; - if (unlikely(!gso_size)) - return true; - - /* no need to check if number of frags is less than 8 */ + /* no need to check if number of frags is less than 7 */ nr_frags = skb_shinfo(skb)->nr_frags; - if (nr_frags < I40E_MAX_BUFFER_TXD) + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) return false; /* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size. However we don't need - * to perform such validation on the first or last 6 since the first - * 6 cannot inherit any data from a descriptor before them, and the - * last 6 cannot inherit any data from a descriptor after them. + * to perform such validation on the last 6 since the last 6 cannot + * inherit any data from a descriptor after them. */ - nr_frags -= I40E_MAX_BUFFER_TXD - 1; + nr_frags -= I40E_MAX_BUFFER_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; /* Initialize size to the negative value of gso_size minus 1. We @@ -2631,21 +2632,21 @@ bool __i40e_chk_linearize(struct sk_buff *skb) * descriptors for a single transmit as the header and previous * fragment are already consuming 2 descriptors. */ - sum = 1 - gso_size; + sum = 1 - skb_shinfo(skb)->gso_size; - /* Add size of frags 1 through 5 to create our initial sum */ - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ stale = &skb_shinfo(skb)->frags[0]; for (;;) { - sum += skb_frag_size(++frag); + sum += skb_frag_size(frag++); /* if sum is negative we failed to make sufficient progress */ if (sum < 0) @@ -2655,7 +2656,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb) if (!--nr_frags) break; - sum -= skb_frag_size(++stale); + sum -= skb_frag_size(stale++); } return false; @@ -2717,6 +2718,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; @@ -2724,12 +2727,14 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, dma_unmap_len_set(tx_bi, len, size); dma_unmap_addr_set(tx_bi, dma, dma); + /* align size to end of page */ + max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, - I40E_MAX_DATA_PER_TXD, td_tag); + max_data, td_tag); tx_desc++; i++; @@ -2740,9 +2745,10 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i = 0; } - dma += I40E_MAX_DATA_PER_TXD; - size -= I40E_MAX_DATA_PER_TXD; + dma += max_data; + size -= max_data; + max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } @@ -2892,7 +2898,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) goto out_drop; - count = TXD_USE_COUNT(skb->len); + count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2923,7 +2929,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index cdd5dc00aec5..b78c810d1835 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -102,8 +102,8 @@ enum i40e_dyn_idx_t { (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) -/* Supported Rx Buffer Sizes */ -#define I40E_RXBUFFER_512 512 /* Used for packet split */ +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ #define I40E_RXBUFFER_4096 4096 @@ -114,9 +114,28 @@ enum i40e_dyn_idx_t { * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * this adds up to 512 bytes of extra data meaning the smallest allocation * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) + * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define i40e_rx_desc i40e_32byte_rx_desc + +/** + * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->wb.qword1.status_error_len & + cpu_to_le64(stat_err_bits)); +} /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ @@ -142,14 +161,41 @@ enum i40e_dyn_idx_t { prefetch((n)); \ } while (0) -#define i40e_rx_desc i40e_32byte_rx_desc - #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 -#define I40E_MAX_DATA_PER_TXD 8192 + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define I40E_MAX_READ_REQ_SIZE 4096 +#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define I40E_MAX_DATA_PER_TXD_ALIGNED \ + (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) + +/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is + * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact + * that 12K is not a power of 2 and division is expensive. It is used to + * approximate the number of descriptors used per linear buffer. Note + * that this will overestimate in some cases as it doesn't account for the + * fact that we will add up to 4K - 1 in aligning the 12K buffer, however + * the error should not impact things much as large buffers usually mean + * we will use fewer descriptors then there are frags in an skb. + */ +static inline unsigned int i40e_txd_use_count(unsigned int size) +{ + const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED; + const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max; + unsigned int adjust = ~(u32)0; + + /* if we rounded up on the reciprocal pull down the adjustment */ + if ((max * reciprocal) > adjust) + adjust = ~(u32)(reciprocal - 1); + + return (u32)((((u64)size * reciprocal) + adjust) >> 32); +} /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 @@ -184,10 +230,8 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; - void *hdr_buf; dma_addr_t dma; struct page *page; - dma_addr_t page_dma; unsigned int page_offset; }; @@ -216,22 +260,18 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_RX_PS_ENABLED, - __I40E_RX_16BYTE_DESC_ENABLED, }; -#define ring_is_ps_enabled(ring) \ - test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define set_ring_ps_enabled(ring) \ - set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define clear_ring_ps_enabled(ring) \ - clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define ring_is_16byte_desc_enabled(ring) \ - test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define set_ring_16byte_desc_enabled(ring) \ - set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define clear_ring_16byte_desc_enabled(ring) \ - clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +/* some useful defines for virtchannel interface, which + * is the only remaining user of header split + */ +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 /* struct that defines a descriptor ring, associated with a VSI */ struct i40e_ring { @@ -258,16 +298,7 @@ struct i40e_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ - u16 rx_hdr_len; u16 rx_buf_len; - u8 dtype; -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 /* used in interrupt processing */ u16 next_to_use; @@ -301,6 +332,7 @@ struct i40e_ring { struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ + u16 next_to_alloc; } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { @@ -324,9 +356,7 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); -void i40e_alloc_rx_headers(struct i40e_ring *rxr); +bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_rx_ring(struct i40e_ring *rx_ring); @@ -377,7 +407,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) int count = 0, size = skb_headlen(skb); for (;;) { - count += TXD_USE_COUNT(size); + count += i40e_txd_use_count(size); if (!nr_frags--) break; @@ -413,10 +443,24 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) **/ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) { - /* we can only support up to 8 data buffers for a single send */ - if (likely(count <= I40E_MAX_BUFFER_TXD)) + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < I40E_MAX_BUFFER_TXD)) return false; - return __i40e_chk_linearize(skb); + if (skb_is_gso(skb)) + return __i40e_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != I40E_MAX_BUFFER_TXD; +} + +/** + * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE + * @ptype: the packet type field from Rx descriptor write-back + **/ +static inline bool i40e_rx_is_fcoe(u16 ptype) +{ + return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && + (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); } #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 3335f9d13374..bd5f13bef83c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -36,7 +36,7 @@ #include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) (mask << shift) +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 3 @@ -275,6 +275,11 @@ struct i40e_hw_capabilities { #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool sec_rev_disabled; + bool update_disabled; +#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 +#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -549,6 +554,8 @@ struct i40e_hw { enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; + bool nvm_release_on_done; + u16 nvm_wait_opcode; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ @@ -1533,4 +1540,37 @@ struct i40e_lldp_variables { /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 + +/* INPUT SET MASK for RSS, flow director, and flexible payload */ +#define I40E_L3_SRC_SHIFT 47 +#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) +#define I40E_L3_V6_SRC_SHIFT 43 +#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT) +#define I40E_L3_DST_SHIFT 35 +#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT) +#define I40E_L3_V6_DST_SHIFT 35 +#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT) +#define I40E_L4_SRC_SHIFT 34 +#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT) +#define I40E_L4_DST_SHIFT 33 +#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT) +#define I40E_VERIFY_TAG_SHIFT 31 +#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT) + +#define I40E_FLEX_50_SHIFT 13 +#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT) +#define I40E_FLEX_51_SHIFT 12 +#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT) +#define I40E_FLEX_52_SHIFT 11 +#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT) +#define I40E_FLEX_53_SHIFT 10 +#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT) +#define I40E_FLEX_54_SHIFT 9 +#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT) +#define I40E_FLEX_55_SHIFT 8 +#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT) +#define I40E_FLEX_56_SHIFT 7 +#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT) +#define I40E_FLEX_57_SHIFT 6 +#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h index ab866cf3dc18..c92a3bdee229 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h @@ -80,10 +80,15 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, - I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ I40E_VIRTCHNL_OP_IWARP = 20, I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21, I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22, + I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, + }; /* Virtual channel message descriptor. This overlays the admin queue @@ -157,6 +162,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 struct i40e_virtchnl_vf_resource { u16 num_vsis; @@ -165,8 +171,8 @@ struct i40e_virtchnl_vf_resource { u16 max_mtu; u32 vf_offload_flags; - u32 max_fcoe_contexts; - u32 max_fcoe_filters; + u32 rss_key_size; + u32 rss_lut_size; struct i40e_virtchnl_vsi_resource vsi_res[1]; }; @@ -325,6 +331,39 @@ struct i40e_virtchnl_promisc_info { * PF replies with struct i40e_eth_stats in an external buffer. */ +/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY + * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct i40e_virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +struct i40e_virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS + * I40E_VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h + */ +struct i40e_virtchnl_rss_hena { + u64 hena; +}; + /* I40E_VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 816c6bbf7093..1fcafcfa8f14 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -48,7 +48,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, int i; for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; /* Not all vfs are enabled so skip the ones that are not */ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) && !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) @@ -63,7 +63,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf, } /** - * i40e_vc_notify_link_state + * i40e_vc_notify_vf_link_state * @vf: pointer to the VF structure * * send a link status message to a single VF @@ -74,7 +74,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; struct i40e_link_status *ls = &pf->hw.phy.link_info; - int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; + int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id; pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; @@ -141,7 +141,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) return; - abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; + abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id; pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; @@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, } rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; - /* set splitalways mode 10b */ + /* set split mode 10b */ rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; } @@ -665,8 +665,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) goto error_alloc_vsi_res; } if (type == I40E_VSI_SRIOV) { - u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - vf->lan_vsi_idx = vsi->idx; vf->lan_vsi_id = vsi->id; /* If the port VLAN has been configured and then the @@ -688,12 +686,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) "Could not add MAC filter %pM for VF %d\n", vf->default_lan_addr.addr, vf->vf_id); } - f = i40e_add_filter(vsi, brdcast, - vf->port_vlan_id ? vf->port_vlan_id : -1, - true, false); - if (!f) - dev_info(&pf->pdev->dev, - "Could not allocate VF broadcast filter\n"); spin_unlock_bh(&vsi->mac_filter_list_lock); } @@ -860,7 +852,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) if (ret) goto error_alloc; total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; - set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + + if (vf->trusted) + set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); + else + clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); /* store the total qps number for the runtime * VF req validation @@ -917,9 +913,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; + u32 reg, reg_idx, bit_idx; bool rsd = false; int i; - u32 reg; if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) return; @@ -937,6 +933,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg); i40e_flush(hw); } + /* clear the VFLR bit in GLGEN_VFLRSTAT */ + reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32; + bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32; + wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); + i40e_flush(hw); if (i40e_quiesce_vf_pci(vf)) dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", @@ -988,6 +989,7 @@ complete_reset: } /* tell the VF the reset is done */ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); + i40e_flush(hw); clear_bit(__I40E_VF_DISABLE, &pf->state); } @@ -1227,8 +1229,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, /* single place to detect unsuccessful return values */ if (v_retval) { vf->num_invalid_msgs++; - dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n", - vf->vf_id, v_opcode, v_retval); + dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n", + vf->vf_id, v_opcode, v_retval); if (vf->num_invalid_msgs > I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { dev_err(&pf->pdev->dev, @@ -1246,9 +1248,9 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, msg, msglen, NULL); if (aq_ret) { - dev_err(&pf->pdev->dev, - "Unable to send the message to VF %d aq_err %d\n", - vf->vf_id, pf->hw.aq.asq_last_status); + dev_info(&pf->pdev->dev, + "Unable to send the message to VF %d aq_err %d\n", + vf->vf_id, pf->hw.aq.asq_last_status); return -EIO; } @@ -1306,8 +1308,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; struct i40e_vsi *vsi; - int i = 0, len = 0; int num_vsis = 1; + int len = 0; int ret; if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) { @@ -1342,12 +1344,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states); } - if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) - vfres->vf_offload_flags |= - I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { + vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF; } else { - vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; + if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) && + (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ; + else + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; } if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { @@ -1356,8 +1362,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; } - if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) { + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + dev_err(&pf->pdev->dev, + "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", + vf->vf_id); + ret = I40E_ERR_PARAM; + goto err; + } vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + } if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) @@ -1368,16 +1382,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; + vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE; + vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE; + if (vf->lan_vsi_idx) { - vfres->vsi_res[i].vsi_id = vf->lan_vsi_id; - vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV; - vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs; + vfres->vsi_res[0].vsi_id = vf->lan_vsi_id; + vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV; + vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs; /* VFs only use TC 0 */ - vfres->vsi_res[i].qset_handle + vfres->vsi_res[0].qset_handle = le16_to_cpu(vsi->info.qs_handle[0]); - ether_addr_copy(vfres->vsi_res[i].default_mac_addr, + ether_addr_copy(vfres->vsi_res[0].default_mac_addr, vf->default_lan_addr.addr); - i++; } set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states); @@ -1407,6 +1423,25 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf) } /** + * i40e_getnum_vf_vsi_vlan_filters + * @vsi: pointer to the vsi + * + * called to get the number of VLANs offloaded on this VF + **/ +static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) +{ + struct i40e_mac_filter *f; + int num_vlans = 0; + + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) + num_vlans++; + } + + return num_vlans; +} + +/** * i40e_vc_config_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1422,22 +1457,128 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, (struct i40e_virtchnl_promisc_info *)msg; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; - struct i40e_vsi *vsi; + struct i40e_mac_filter *f; + i40e_status aq_ret = 0; bool allmulti = false; - i40e_status aq_ret; + struct i40e_vsi *vsi; + bool alluni = false; + int aq_err = 0; vsi = i40e_find_vsi_from_id(pf, info->vsi_id); if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || - !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) || - (vsi->type != I40E_VSI_FCOE)) { + !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; } + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "Unprivileged VF %d is attempting to configure promiscuous mode\n", + vf->vf_id); + /* Lie to the VF on purpose. */ + aq_ret = 0; + goto error_param; + } + /* Multicast promiscuous handling*/ if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC) allmulti = true; - aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, - allmulti, NULL); + + if (vf->port_vlan_id) { + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid, + allmulti, + vf->port_vlan_id, + NULL); + } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { + list_for_each_entry(f, &vsi->mac_filter_list, list) { + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, + vsi->seid, + allmulti, + f->vlan, + NULL); + aq_err = pf->hw.aq.asq_last_status; + if (aq_ret) { + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + break; + } + } + } else { + aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, + allmulti, NULL); + aq_err = pf->hw.aq.asq_last_status; + if (aq_ret) { + dev_err(&pf->pdev->dev, + "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n", + vf->vf_id, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + goto error_param_int; + } + } + + if (!aq_ret) { + dev_info(&pf->pdev->dev, + "VF %d successfully set multicast promiscuous mode\n", + vf->vf_id); + if (allmulti) + set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); + else + clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states); + } + + if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC) + alluni = true; + if (vf->port_vlan_id) { + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid, + alluni, + vf->port_vlan_id, + NULL); + } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { + list_for_each_entry(f, &vsi->mac_filter_list, list) { + aq_ret = 0; + if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { + aq_ret = + i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + aq_err = pf->hw.aq.asq_last_status; + } + if (aq_ret) + dev_err(&pf->pdev->dev, + "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", + f->vlan, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + } else { + aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, + allmulti, NULL, + true); + aq_err = pf->hw.aq.asq_last_status; + if (aq_ret) + dev_err(&pf->pdev->dev, + "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n", + vf->vf_id, info->flags, + i40e_stat_str(&pf->hw, aq_ret), + i40e_aq_str(&pf->hw, aq_err)); + } + +error_param_int: + if (!aq_ret) { + dev_info(&pf->pdev->dev, + "VF %d successfully set unicast promiscuous mode\n", + vf->vf_id); + if (alluni) + set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); + else + clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states); + } error_param: /* send the response to the VF */ @@ -1688,6 +1829,10 @@ error_param: (u8 *)&stats, sizeof(stats)); } +/* If the VF is not trusted restrict the number of MAC/VLAN it can program */ +#define I40E_VC_MAX_MAC_ADDR_PER_VF 8 +#define I40E_VC_MAX_VLAN_PER_VF 8 + /** * i40e_check_vf_permission * @vf: pointer to the VF info @@ -1708,15 +1853,22 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr) dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); ret = I40E_ERR_INVALID_MAC_ADDR; } else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) && + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && !ether_addr_equal(macaddr, vf->default_lan_addr.addr)) { /* If the host VMM administrator has set the VF MAC address * administratively via the ndo_set_vf_mac command then deny * permission to the VF to add or delete unicast MAC addresses. + * Unless the VF is privileged and then it can do whatever. * The VF may request to set the MAC address filter already * assigned to it so do not return an error in that case. */ dev_err(&pf->pdev->dev, - "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n"); + "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); + ret = -EPERM; + } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) && + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "VF is not trusted, switch the VF to trusted to add more functionality\n"); ret = -EPERM; } return ret; @@ -1741,7 +1893,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) int i; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -1780,6 +1931,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_PARAM; spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; + } else { + vf->num_mac++; } } spin_unlock_bh(&vsi->mac_filter_list_lock); @@ -1815,7 +1968,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) int i; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { ret = I40E_ERR_PARAM; goto error_param; @@ -1839,6 +1991,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_INVALID_MAC_ADDR; spin_unlock_bh(&vsi->mac_filter_list_lock); goto error_param; + } else { + vf->num_mac--; } spin_unlock_bh(&vsi->mac_filter_list_lock); @@ -1873,8 +2027,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) i40e_status aq_ret = 0; int i; + if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) && + !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + dev_err(&pf->pdev->dev, + "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n"); + goto error_param; + } if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -1898,6 +2057,19 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { /* add new VLAN filter */ int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]); + if (!ret) + vf->num_vlan++; + + if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) + i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, + true, + vfl->vlan_id[i], + NULL); + if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) + i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, + true, + vfl->vlan_id[i], + NULL); if (ret) dev_err(&pf->pdev->dev, @@ -1929,7 +2101,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) int i; if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || - !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) || !i40e_vc_isvalid_vsi_id(vf, vsi_id)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -1950,6 +2121,19 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) for (i = 0; i < vfl->num_elements; i++) { int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]); + if (!ret) + vf->num_vlan--; + + if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states)) + i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid, + false, + vfl->vlan_id[i], + NULL); + if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states)) + i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid, + false, + vfl->vlan_id[i], + NULL); if (ret) dev_err(&pf->pdev->dev, @@ -2029,6 +2213,135 @@ error_param: } /** + * i40e_vc_config_rss_key + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Configure the VF's RSS key + **/ +static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_rss_key *vrk = + (struct i40e_virtchnl_rss_key *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = vrk->vsi_id; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id) || + (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0); +err: + /* send the response to the VF */ + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, + aq_ret); +} + +/** + * i40e_vc_config_rss_lut + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Configure the VF's RSS LUT + **/ +static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_rss_lut *vrl = + (struct i40e_virtchnl_rss_lut *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_vsi *vsi = NULL; + u16 vsi_id = vrl->vsi_id; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) || + !i40e_vc_isvalid_vsi_id(vf, vsi_id) || + (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + + vsi = pf->vsi[vf->lan_vsi_idx]; + aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE); + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, + aq_ret); +} + +/** + * i40e_vc_get_rss_hena + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Return the RSS HENA bits allowed by the hardware + **/ +static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_rss_hena *vrh = NULL; + struct i40e_pf *pf = vf->pf; + i40e_status aq_ret = 0; + int len = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + len = sizeof(struct i40e_virtchnl_rss_hena); + + vrh = kzalloc(len, GFP_KERNEL); + if (!vrh) { + aq_ret = I40E_ERR_NO_MEMORY; + len = 0; + goto err; + } + vrh->hena = i40e_pf_get_default_rss_hena(pf); +err: + /* send the response back to the VF */ + aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, + aq_ret, (u8 *)vrh, len); + return aq_ret; +} + +/** + * i40e_vc_set_rss_hena + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * Set the RSS HENA bits for the VF + **/ +static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen) +{ + struct i40e_virtchnl_rss_hena *vrh = + (struct i40e_virtchnl_rss_hena *)msg; + struct i40e_pf *pf = vf->pf; + struct i40e_hw *hw = &pf->hw; + i40e_status aq_ret = 0; + + if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) { + aq_ret = I40E_ERR_PARAM; + goto err; + } + i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena); + i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id), + (u32)(vrh->hena >> 32)); + + /* send the response to the VF */ +err: + return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA, + aq_ret); +} + +/** * i40e_vc_validate_vf_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -2041,7 +2354,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { bool err_msg_format = false; - int valid_len; + int valid_len = 0; /* Check if VF is disabled. */ if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states)) @@ -2053,13 +2366,10 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, valid_len = sizeof(struct i40e_virtchnl_version_info); break; case I40E_VIRTCHNL_OP_RESET_VF: - valid_len = 0; break; case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: if (VF_IS_V11(vf)) valid_len = sizeof(u32); - else - valid_len = 0; break; case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE: valid_len = sizeof(struct i40e_virtchnl_txq_info); @@ -2149,6 +2459,35 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, sizeof(struct i40e_virtchnl_iwarp_qv_info)); } break; + case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: + valid_len = sizeof(struct i40e_virtchnl_rss_key); + if (msglen >= valid_len) { + struct i40e_virtchnl_rss_key *vrk = + (struct i40e_virtchnl_rss_key *)msg; + if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) { + err_msg_format = true; + break; + } + valid_len += vrk->key_len - 1; + } + break; + case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: + valid_len = sizeof(struct i40e_virtchnl_rss_lut); + if (msglen >= valid_len) { + struct i40e_virtchnl_rss_lut *vrl = + (struct i40e_virtchnl_rss_lut *)msg; + if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) { + err_msg_format = true; + break; + } + valid_len += vrl->lut_entries - 1; + } + break; + case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: + break; + case I40E_VIRTCHNL_OP_SET_RSS_HENA: + valid_len = sizeof(struct i40e_virtchnl_rss_hena); + break; /* These are always errors coming from the VF. */ case I40E_VIRTCHNL_OP_EVENT: case I40E_VIRTCHNL_OP_UNKNOWN: @@ -2175,11 +2514,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode, * called from the common aeq/arq handler to * process request from VF **/ -int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, +int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen) { struct i40e_hw *hw = &pf->hw; - unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id; + int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id; struct i40e_vf *vf; int ret; @@ -2247,6 +2586,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP: ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false); break; + case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY: + ret = i40e_vc_config_rss_key(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT: + ret = i40e_vc_config_rss_lut(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: + ret = i40e_vc_get_rss_hena(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_SET_RSS_HENA: + ret = i40e_vc_set_rss_hena(vf, msg, msglen); + break; + case I40E_VIRTCHNL_OP_UNKNOWN: default: dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", @@ -2268,9 +2620,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, **/ int i40e_vc_process_vflr_event(struct i40e_pf *pf) { - u32 reg, reg_idx, bit_idx, vf_id; struct i40e_hw *hw = &pf->hw; + u32 reg, reg_idx, bit_idx; struct i40e_vf *vf; + int vf_id; if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) return 0; @@ -2292,13 +2645,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) /* read GLGEN_VFLRSTAT register to find out the flr VFs */ vf = &pf->vf[vf_id]; reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx)); - if (reg & BIT(bit_idx)) { - /* clear the bit in GLGEN_VFLRSTAT */ - wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); - - if (!test_bit(__I40E_DOWN, &pf->state)) - i40e_reset_vf(vf, true); - } + if (reg & BIT(bit_idx)) + /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */ + i40e_reset_vf(vf, true); } return 0; @@ -2762,3 +3111,45 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) out: return ret; } + +/** + * i40e_ndo_set_vf_trust + * @netdev: network interface device structure of the pf + * @vf_id: VF identifier + * @setting: trust setting + * + * Enable or disable VF trust setting + **/ +int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_pf *pf = np->vsi->back; + struct i40e_vf *vf; + int ret = 0; + + /* validate the request */ + if (vf_id >= pf->num_alloc_vfs) { + dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); + return -EINVAL; + } + + if (pf->flags & I40E_FLAG_MFP_ENABLED) { + dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); + return -EINVAL; + } + + vf = &pf->vf[vf_id]; + + if (!vf) + return -EINVAL; + if (setting == vf->trusted) + goto out; + + vf->trusted = setting; + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); + dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", + vf_id, setting ? "" : "un"); +out: + return ret; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index e7b2fba0309e..875174141451 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -61,6 +61,8 @@ enum i40e_vf_states { I40E_VF_STAT_IWARPENA, I40E_VF_STAT_FCOEENA, I40E_VF_STAT_DISABLED, + I40E_VF_STAT_MC_PROMISC, + I40E_VF_STAT_UC_PROMISC, }; /* VF capabilities */ @@ -75,7 +77,7 @@ struct i40e_vf { struct i40e_pf *pf; /* VF id in the PF space */ - u16 vf_id; + s16 vf_id; /* all VF vsis connect to the same parent */ enum i40e_switch_element_types parent_type; struct i40e_virtchnl_version_info vf_ver; @@ -88,6 +90,7 @@ struct i40e_vf { struct i40e_virtchnl_ether_addr default_fcoe_addr; u16 port_vlan_id; bool pf_set_mac; /* The VMM admin set the VF MAC address */ + bool trusted; /* VSI indices - actual VSI pointers are maintained in the PF structure * When assigned, these will be non-zero, because VSI 0 is always @@ -108,6 +111,9 @@ struct i40e_vf { bool link_forced; bool link_up; /* only valid if VF link is forced */ bool spoofchk; + u16 num_mac; + u16 num_vlan; + /* RDMA Client */ struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info; }; @@ -115,7 +121,7 @@ struct i40e_vf { void i40e_free_vfs(struct i40e_pf *pf); int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); -int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, +int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); int i40e_vc_process_vflr_event(struct i40e_pf *pf); void i40e_reset_vf(struct i40e_vf *vf, bool flr); @@ -127,6 +133,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos); int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, int max_tx_rate); +int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting); int i40e_ndo_get_vf_config(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h index a3eae5d9a2bd..1f9b3b5d946d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h @@ -97,7 +97,6 @@ struct i40e_adminq_info { u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_release_on_done; struct mutex asq_mutex; /* Send queue lock */ struct mutex arq_mutex; /* Receive queue lock */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index aad8d6277110..3114dcfa1724 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -78,17 +78,17 @@ struct i40e_aq_desc { #define I40E_AQ_FLAG_EI_SHIFT 14 #define I40E_AQ_FLAG_FE_SHIFT 15 -#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ /* error codes */ enum i40e_admin_queue_err { @@ -205,10 +205,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, @@ -426,6 +422,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 #define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 @@ -1582,27 +1579,6 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F - /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ @@ -1649,11 +1625,11 @@ enum i40e_aq_phy_type { enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT) }; struct i40e_aqc_module_desc { @@ -1924,9 +1900,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) + BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) #define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 @@ -2195,7 +2171,7 @@ struct i40e_aqc_del_udp_tunnel_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) @@ -2215,14 +2191,14 @@ struct i40e_aqc_get_set_rss_key_data { I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15) +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) #define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 #define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) __le16 vsi_id; #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \ - I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 #define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 771ac6ad8cda..8f64204000fb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -58,6 +58,8 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; case I40E_DEV_ID_X722_VF: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h index ca8b58c3d1f5..d34972bab09c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h @@ -44,6 +44,8 @@ #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF_HV 0x37D9 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index ebcc25c05796..fd7dae46c5d8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) /** * i40e_clean_tx_irq - Reclaim resources after transmit completes - * @tx_ring: tx ring to clean - * @budget: how many cleans we're allowed + * @vsi: the VSI we care about + * @tx_ring: Tx ring to clean + * @napi_budget: Used to determine if we are in netpoll * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) +static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) { u16 i = tx_ring->next_to_clean; struct i40e_tx_buffer *tx_buf; struct i40e_tx_desc *tx_head; struct i40e_tx_desc *tx_desc; - unsigned int total_packets = 0; - unsigned int total_bytes = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int budget = vsi->work_limit; tx_buf = &tx_ring->tx_bi[i]; tx_desc = I40E_TX_DESC(tx_ring, i); @@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) total_packets += tx_buf->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buf->skb); + napi_consume_skb(tx_buf->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -267,7 +269,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + !test_bit(__I40E_DOWN, &vsi->state) && (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) tx_ring->arm_wb = true; } @@ -285,7 +287,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) smp_mb(); if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) { + !test_bit(__I40E_DOWN, &vsi->state)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); ++tx_ring->tx_stats.restart_queue; @@ -494,7 +496,6 @@ err: void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) { struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; unsigned long bi_size; u16 i; @@ -502,48 +503,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (!rx_ring->rx_bi) return; - if (ring_is_ps_enabled(rx_ring)) { - int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count; - - rx_bi = &rx_ring->rx_bi[0]; - if (rx_bi->hdr_buf) { - dma_free_coherent(dev, - bufsz, - rx_bi->hdr_buf, - rx_bi->dma); - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = 0; - rx_bi->hdr_buf = NULL; - } - } - } /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - if (rx_bi->dma) { - dma_unmap_single(dev, - rx_bi->dma, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - } + struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + if (rx_bi->skb) { dev_kfree_skb(rx_bi->skb); rx_bi->skb = NULL; } - if (rx_bi->page) { - if (rx_bi->page_dma) { - dma_unmap_page(dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; - } - __free_page(rx_bi->page); - rx_bi->page = NULL; - rx_bi->page_offset = 0; - } + if (!rx_bi->page) + continue; + + dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE); + __free_pages(rx_bi->page, 0); + + rx_bi->page = NULL; + rx_bi->page_offset = 0; } bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; @@ -552,6 +527,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -576,37 +552,6 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring) } /** - * i40evf_alloc_rx_headers - allocate rx header buffers - * @rx_ring: ring to alloc buffers - * - * Allocate rx header buffers for the entire ring. As these are static, - * this is only called when setting up a new ring. - **/ -void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring) -{ - struct device *dev = rx_ring->dev; - struct i40e_rx_buffer *rx_bi; - dma_addr_t dma; - void *buffer; - int buf_size; - int i; - - if (rx_ring->rx_bi[0].hdr_buf) - return; - /* Make sure the buffers don't cross cache line boundaries. */ - buf_size = ALIGN(rx_ring->rx_hdr_len, 256); - buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count, - &dma, GFP_KERNEL); - if (!buffer) - return; - for (i = 0; i < rx_ring->count; i++) { - rx_bi = &rx_ring->rx_bi[i]; - rx_bi->dma = dma + (i * buf_size); - rx_bi->hdr_buf = buffer + (i * buf_size); - } -} - -/** * i40evf_setup_rx_descriptors - Allocate Rx descriptors * @rx_ring: Rx descriptor ring (for a specific queue) to setup * @@ -627,9 +572,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ - rx_ring->size = ring_is_16byte_desc_enabled(rx_ring) - ? rx_ring->count * sizeof(union i40e_16byte_rx_desc) - : rx_ring->count * sizeof(union i40e_32byte_rx_desc); + rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); @@ -640,6 +583,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) goto err; } + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; @@ -658,6 +602,10 @@ err: static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; + /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, @@ -668,160 +616,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) } /** - * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split - * @rx_ring: ring to place buffers on - * @cleaned_count: number of buffers to replace + * i40e_alloc_mapped_page - recycle or make a new page + * @rx_ring: ring to use + * @bi: rx_buffer struct to modify * - * Returns true if any errors on allocation + * Returns true if the page was successfully allocated or + * reused. **/ -bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) { - u16 i = rx_ring->next_to_use; - union i40e_rx_desc *rx_desc; - struct i40e_rx_buffer *bi; - const int current_node = numa_node_id(); + struct page *page = bi->page; + dma_addr_t dma; - /* do nothing if no valid netdev defined */ - if (!rx_ring->netdev || !cleaned_count) - return false; + /* since we are recycling buffers we should seldom need to alloc */ + if (likely(page)) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; + /* alloc new page for storage */ + page = dev_alloc_page(); + if (unlikely(!page)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } - if (bi->skb) /* desc is in use */ - goto no_buffers; + /* map page for use */ + dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - /* If we've been moved to a different NUMA node, release the - * page so we can get a new one on the current node. + /* if mapping failed free memory back to system since + * there isn't much point in holding memory we can't use */ - if (bi->page && page_to_nid(bi->page) != current_node) { - dma_unmap_page(rx_ring->dev, - bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else if (bi->page) { - rx_ring->rx_stats.page_reuse_count++; - } - - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC); - if (!bi->page) { - rx_ring->rx_stats.alloc_page_failed++; - goto no_buffers; - } - bi->page_dma = dma_map_page(rx_ring->dev, - bi->page, - 0, - PAGE_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { - rx_ring->rx_stats.alloc_page_failed++; - __free_page(bi->page); - bi->page = NULL; - bi->page_dma = 0; - bi->page_offset = 0; - goto no_buffers; - } - bi->page_offset = 0; - } - - /* Refresh the desc even if buffer_addrs didn't change - * because each write-back erases this info. - */ - rx_desc->read.pkt_addr = - cpu_to_le64(bi->page_dma + bi->page_offset); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - i++; - if (i == rx_ring->count) - i = 0; + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, 0); + rx_ring->rx_stats.alloc_page_failed++; + return false; } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + bi->dma = dma; + bi->page = page; + bi->page_offset = 0; - return false; + return true; +} -no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); +/** + * i40e_receive_skb - Send a completed packet up the stack + * @rx_ring: rx ring in play + * @skb: packet to send up + * @vlan_tag: vlan tag for packet + **/ +static void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) +{ + struct i40e_q_vector *q_vector = rx_ring->q_vector; - /* make sure to come back via polling to try again after - * allocation failure - */ - return true; + if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + (vlan_tag & VLAN_VID_MASK)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + + napi_gro_receive(&q_vector->napi, skb); } /** - * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer + * i40evf_alloc_rx_buffers - Replace used receive buffers * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace * - * Returns true if any errors on allocation + * Returns false if all allocations were successful, true if any fail **/ -bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) { - u16 i = rx_ring->next_to_use; + u16 ntu = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; - struct sk_buff *skb; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; - while (cleaned_count--) { - rx_desc = I40E_RX_DESC(rx_ring, i); - bi = &rx_ring->rx_bi[i]; - skb = bi->skb; - - if (!skb) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - goto no_buffers; - } - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - bi->skb = skb; - } + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; - if (!bi->dma) { - bi->dma = dma_map_single(rx_ring->dev, - skb->data, - rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, bi->dma)) { - rx_ring->rx_stats.alloc_buff_failed++; - bi->dma = 0; - dev_kfree_skb(bi->skb); - bi->skb = NULL; - goto no_buffers; - } - } + do { + if (!i40e_alloc_mapped_page(rx_ring, bi)) + goto no_buffers; - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + /* Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); rx_desc->read.hdr_addr = 0; - i++; - if (i == rx_ring->count) - i = 0; - } - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + rx_desc++; + bi++; + ntu++; + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + /* clear the status bits for the next_to_use descriptor */ + rx_desc->wb.qword1.status_error_len = 0; + + cleaned_count--; + } while (cleaned_count); + + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); return false; no_buffers: - if (rx_ring->next_to_use != i) - i40e_release_rx_desc(rx_ring, i); + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); /* make sure to come back via polling to try again after * allocation failure @@ -830,41 +740,35 @@ no_buffers: } /** - * i40e_receive_skb - Send a completed packet up the stack - * @rx_ring: rx ring in play - * @skb: packet to send up - * @vlan_tag: vlan tag for packet - **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, - struct sk_buff *skb, u16 vlan_tag) -{ - struct i40e_q_vector *q_vector = rx_ring->q_vector; - - if (vlan_tag & VLAN_VID_MASK) - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - - napi_gro_receive(&q_vector->napi, skb); -} - -/** * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum * @vsi: the VSI we care about * @skb: skb currently being received and modified - * @rx_status: status value of last descriptor in packet - * @rx_error: error value of last descriptor in packet - * @rx_ptype: ptype value of last descriptor in packet + * @rx_desc: the receive descriptor + * + * skb->protocol must be set before this function is called **/ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, - u32 rx_status, - u32 rx_error, - u16 rx_ptype) + union i40e_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; + struct i40e_rx_ptype_decoded decoded; + bool ipv4, ipv6, tunnel = false; + u32 rx_error, rx_status; + u8 ptype; + u64 qword; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; + rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> + I40E_RXD_QW1_ERROR_SHIFT; + rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> + I40E_RXD_QW1_STATUS_SHIFT; + decoded = decode_rx_desc_ptype(ptype); skb->ip_summed = CHECKSUM_NONE; + skb_checksum_none_assert(skb); + /* Rx csum enabled and ip headers found? */ if (!(vsi->netdev->features & NETIF_F_RXCSUM)) return; @@ -910,14 +814,13 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * doesn't make it a hard requirement so if we have validated the * inner checksum report CHECKSUM_UNNECESSARY. */ - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP | + I40E_RX_PTYPE_INNER_PROT_UDP | + I40E_RX_PTYPE_INNER_PROT_SCTP)) + tunnel = true; skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ipv4_tunnel || ipv6_tunnel; + skb->csum_level = tunnel ? 1 : 0; return; @@ -931,7 +834,7 @@ checksum_fail: * * Returns a hash type to be used by skb_set_hash **/ -static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype) +static inline int i40e_ptype_to_htype(u8 ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); @@ -959,7 +862,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, u8 rx_ptype) { u32 hash; - const __le64 rss_mask = + const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); @@ -973,313 +876,411 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, } /** - * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_desc: pointer to the EOP Rx descriptor + * @skb: pointer to current skb being populated + * @rx_ptype: the packet type decoded by hardware * - * Returns true if there's any budget left (e.g. the clean is finished) + * This function checks the ring, descriptor, and packet information in + * order to populate the hash, checksum, VLAN, protocol, and + * other fields within the skb. **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) +static inline +void i40evf_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype) { - unsigned int total_rx_bytes = 0, total_rx_packets = 0; - u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; - u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - u16 i = rx_ring->next_to_clean; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - bool failure = false; - u8 rx_ptype; - u64 qword; - u32 copysize; + i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); - do { - struct i40e_rx_buffer *rx_bi; - struct sk_buff *skb; - u16 vlan_tag; - /* return some buffers to hardware, one at a time is too slow */ - if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - failure = failure || - i40evf_alloc_rx_buffers_ps(rx_ring, - cleaned_count); - cleaned_count = 0; - } + /* modifies the skb - consumes the enet header */ + skb->protocol = eth_type_trans(skb, rx_ring->netdev); - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); - if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) - break; + skb_record_rx_queue(skb, rx_ring->queue_index); +} - /* This memory barrier is needed to keep us from reading - * any other fields out of the rx_desc until we know the - * DD bit is set. - */ - dma_rmb(); - /* sync header buffer for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - if (likely(!skb)) { - skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len, - GFP_ATOMIC | - __GFP_NOWARN); - if (!skb) { - rx_ring->rx_stats.alloc_buff_failed++; - failure = true; - break; - } +/** + * i40e_pull_tail - i40e specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an i40e specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; - /* initialize queue mapping */ - skb_record_rx_queue(skb, rx_ring->queue_index); - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); - } - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >> - I40E_RXD_QW1_LENGTH_HBUF_SHIFT; - rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >> - I40E_RXD_QW1_LENGTH_SPH_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + /* it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - /* sync half-page for reading */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->page_dma, - rx_bi->page_offset, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - prefetch(page_address(rx_bi->page) + rx_bi->page_offset); - rx_bi->skb = NULL; - cleaned_count++; - copysize = 0; - if (rx_hbo || rx_sph) { - int len; - - if (rx_hbo) - len = I40E_RX_HDR_SIZE; - else - len = rx_header_len; - memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); - } else if (skb->len == 0) { - int len; - unsigned char *va = page_address(rx_bi->page) + - rx_bi->page_offset; - - len = min(rx_packet_len, rx_ring->rx_hdr_len); - memcpy(__skb_put(skb, len), va, len); - copysize = len; - rx_packet_len -= len; - } - /* Get the rest of the data if this was a header split */ - if (rx_packet_len) { - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset + copysize, - rx_packet_len, I40E_RXBUFFER_2048); - - /* If the page count is more than 2, then both halves - * of the page are used and we need to free it. Do it - * here instead of in the alloc code. Otherwise one - * of the half-pages might be released between now and - * then, and we wouldn't know which one to use. - * Don't call get_page and free_page since those are - * both expensive atomic operations that just change - * the refcount in opposite directions. Just give the - * page to the stack; he can have our refcount. - */ - if (page_count(rx_bi->page) > 2) { - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE, - DMA_FROM_DEVICE); - rx_bi->page = NULL; - rx_bi->page_dma = 0; - rx_ring->rx_stats.realloc_count++; - } else { - get_page(rx_bi->page); - /* switch to the other half-page here; the - * allocation code programs the right addr - * into HW. If we haven't used this half-page, - * the address won't be changed, and HW can - * just use it next time through. - */ - rx_bi->page_offset ^= PAGE_SIZE / 2; - } + /* we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE); - } - I40E_RX_INCREMENT(rx_ring, i); + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - struct i40e_rx_buffer *next_buffer; +/** + * i40e_cleanup_headers - Correct empty headers + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being fixed + * + * Also address the case where we are pulling data in on pages only + * and as such no data is present in the skb header. + * + * In addition if skb is not at least 60 bytes we need to pad it so that + * it is large enough to qualify as a valid Ethernet frame. + * + * Returns true if an error was encountered and skb was freed. + **/ +static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb) +{ + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + i40e_pull_tail(rx_ring, skb); - next_buffer = &rx_ring->rx_bi[i]; - next_buffer->skb = skb; - rx_ring->rx_stats.non_eop_descs++; - continue; - } + /* if eth_skb_pad returns an error the skb was freed */ + if (eth_skb_pad(skb)) + return true; - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { - dev_kfree_skb_any(skb); - continue; - } + return false; +} + +/** + * i40e_reuse_rx_page - page flip buffer and store it back on the ring + * @rx_ring: rx descriptor ring to store buffers on + * @old_buff: donor buffer to have page reused + * + * Synchronizes page for reuse by the adapter + **/ +static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_buff) +{ + struct i40e_rx_buffer *new_buff; + u16 nta = rx_ring->next_to_alloc; - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + new_buff = &rx_ring->rx_bi[nta]; - /* probably a little skewed due to removing CRC */ - total_rx_bytes += skb->len; - total_rx_packets++; + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* transfer page from old buffer to new buffer */ + *new_buff = *old_buff; +} - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); +/** + * i40e_page_is_reserved - check if reuse is possible + * @page: page struct to check + */ +static inline bool i40e_page_is_reserved(struct page *page) +{ + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); +} - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; -#ifdef I40E_FCOE - if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) { - dev_kfree_skb_any(skb); - continue; - } +/** + * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff + * @rx_ring: rx descriptor ring to transact packets on + * @rx_buffer: buffer containing page to add + * @rx_desc: descriptor containing length of buffer written by hardware + * @skb: sk_buff to place the data into + * + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +static bool i40e_add_rx_frag(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *rx_buffer, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct page *page = rx_buffer->page; + u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; +#if (PAGE_SIZE < 8192) + unsigned int truesize = I40E_RXBUFFER_2048; +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048; #endif - i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_desc->wb.qword1.status_error_len = 0; + /* will the data fit in the skb we allocated? if so, just + * copy it as it is pretty small anyway + */ + if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; - } while (likely(total_rx_packets < budget)); + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + /* page is not reserved, we can reuse buffer as-is */ + if (likely(!i40e_page_is_reserved(page))) + return true; - return failure ? budget : total_rx_packets; + /* this page cannot be reused so discard it */ + __free_pages(page, 0); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(i40e_page_is_reserved(page))) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; +#endif + + /* Even if we own the page, we are not allowed to use atomic_set() + * This would break get_page_unless_zero() users. + */ + get_page(rx_buffer->page); + + return true; } /** - * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer - * @rx_ring: rx ring to clean - * @budget: how many cleans we're allowed + * i40evf_fetch_rx_buffer - Allocate skb and populate it + * @rx_ring: rx descriptor ring to transact packets on + * @rx_desc: descriptor containing info written by hardware * - * Returns number of packets cleaned + * This function allocates an skb on the fly, and populates it with the page + * data from the current receive descriptor, taking care to set up the skb + * correctly, as well as handling calling the page recycle function if + * necessary. + */ +static inline +struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc) +{ + struct i40e_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + I40E_RX_HDR_SIZE, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_buff_failed++; + return NULL; + } + + /* we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + } else { + rx_buffer->skb = NULL; + } + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + I40E_RXBUFFER_2048, + DMA_FROM_DEVICE); + + /* pull page into skb */ + if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + i40e_reuse_rx_page(rx_ring, rx_buffer); + rx_ring->rx_stats.page_reuse_count++; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->page = NULL; + + return skb; +} + +/** + * i40e_is_non_eop - process handling of non-EOP buffers + * @rx_ring: Rx ring being processed + * @rx_desc: Rx descriptor for current buffer + * @skb: Current socket buffer containing buffer in progress + * + * This function updates next to clean. If the buffer is an EOP buffer + * this function exits returning false, otherwise it will place the + * sk_buff in the next buffer to be chained and return true indicating + * that this is in fact a non-EOP buffer. + **/ +static bool i40e_is_non_eop(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + struct sk_buff *skb) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + + prefetch(I40E_RX_DESC(rx_ring, ntc)); + + /* if we are the last buffer then there is nothing else to do */ +#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT) + if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF))) + return false; + + /* place skb in next buffer to be received */ + rx_ring->rx_bi[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; + + return true; +} + +/** + * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * + * This function provides a "bounce buffer" approach to Rx interrupt + * processing. The advantage to this is that on systems that have + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the system. + * + * Returns amount of work completed **/ -static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - struct i40e_vsi *vsi = rx_ring->vsi; - union i40e_rx_desc *rx_desc; - u32 rx_error, rx_status; - u16 rx_packet_len; bool failure = false; - u8 rx_ptype; - u64 qword; - u16 i; - do { - struct i40e_rx_buffer *rx_bi; + while (likely(total_rx_packets < budget)) { + union i40e_rx_desc *rx_desc; struct sk_buff *skb; + u32 rx_status; u16 vlan_tag; + u8 rx_ptype; + u64 qword; + /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { failure = failure || - i40evf_alloc_rx_buffers_1buf(rx_ring, - cleaned_count); + i40evf_alloc_rx_buffers(rx_ring, cleaned_count); cleaned_count = 0; } - i = rx_ring->next_to_clean; - rx_desc = I40E_RX_DESC(rx_ring, i); + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >> - I40E_RXD_QW1_STATUS_SHIFT; + I40E_RXD_QW1_STATUS_SHIFT; if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT))) break; + /* status_error_len will always be zero for unused descriptors + * because it's cleared in cleanup, and overlaps with hdr_addr + * which is always zero because packet split isn't used, if the + * hardware wrote DD then it will be non-zero + */ + if (!rx_desc->wb.qword1.status_error_len) + break; + /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc until we know the * DD bit is set. */ dma_rmb(); - rx_bi = &rx_ring->rx_bi[i]; - skb = rx_bi->skb; - prefetch(skb->data); - - rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> - I40E_RXD_QW1_LENGTH_PBUF_SHIFT; - - rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >> - I40E_RXD_QW1_ERROR_SHIFT; - rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT); + skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc); + if (!skb) + break; - rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT; - rx_bi->skb = NULL; cleaned_count++; - /* Get the header and possibly the whole packet - * If this is an skb from previous receive dma will be 0 - */ - skb_put(skb, rx_packet_len); - dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len, - DMA_FROM_DEVICE); - rx_bi->dma = 0; - - I40E_RX_INCREMENT(rx_ring, i); - - if (unlikely( - !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) { - rx_ring->rx_stats.non_eop_descs++; + if (i40e_is_non_eop(rx_ring, rx_desc, skb)) continue; - } - /* ERR_MASK will only have valid bits if EOP set */ - if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) { + /* ERR_MASK will only have valid bits if EOP set, and + * what we are doing here is actually checking + * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in + * the error field + */ + if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) { dev_kfree_skb_any(skb); continue; } - i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype); + if (i40e_cleanup_headers(rx_ring, skb)) + continue; + /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; - total_rx_packets++; - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + /* populate checksum, VLAN, and protocol */ + i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); - i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype); - vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) - ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) - : 0; + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); - rx_desc->wb.qword1.status_error_len = 0; - } while (likely(total_rx_packets < budget)); + /* update budget accounting */ + total_rx_packets++; + } u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; @@ -1288,6 +1289,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : total_rx_packets; } @@ -1411,9 +1413,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete = clean_complete && - i40e_clean_tx_irq(ring, vsi->work_limit); - arm_wb = arm_wb || ring->arm_wb; + if (!i40e_clean_tx_irq(vsi, ring, budget)) { + clean_complete = false; + continue; + } + arm_wb |= ring->arm_wb; ring->arm_wb = false; } @@ -1427,16 +1431,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned; - - if (ring_is_ps_enabled(ring)) - cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring); - else - cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring); + int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; - /* if we didn't clean as many as budgeted, we must be done */ - clean_complete = clean_complete && (budget_per_ring > cleaned); + /* if we clean as many as budgeted, we must not be done */ + if (cleaned >= budget_per_ring) + clean_complete = false; } /* If work not completed, return budget and polling will return */ @@ -1514,15 +1514,13 @@ out: /** * i40e_tso - set up the tso context descriptor - * @tx_ring: ptr to the ring to send * @skb: ptr to the skb we're sending * @hdr_len: ptr to the size of the packet header * @cd_type_cmd_tso_mss: Quad Word 1 * * Returns 0 if no TSO can happen, 1 if tso is going, or error **/ -static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, - u8 *hdr_len, u64 *cd_type_cmd_tso_mss) +static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u64 cd_cmd, cd_tso_len, cd_mss; union { @@ -1559,16 +1557,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, ip.v6->payload_len = 0; } - if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_IPIP | + SKB_GSO_SIT | + SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)) { - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { + l4.udp->len = 0; + /* determine offset of outer transport header */ l4_offset = l4.hdr - skb->data; /* remove payload length from outer checksum */ - paylen = (__force u16)l4.udp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); - l4.udp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.udp->check, htonl(paylen)); } /* reset pointers to inner headers */ @@ -1588,9 +1592,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, l4_offset = l4.hdr - skb->data; /* remove payload length from inner checksum */ - paylen = (__force u16)l4.tcp->check; - paylen += ntohs(1) * (u16)~(skb->len - l4_offset); - l4.tcp->check = ~csum_fold((__force __wsum)paylen); + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* compute length of segmentation header */ *hdr_len = (l4.tcp->doff * 4) + l4_offset; @@ -1630,7 +1633,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *hdr; } l4; unsigned char *exthdr; - u32 offset, cmd = 0, tunnel = 0; + u32 offset, cmd = 0; __be16 frag_off; u8 l4_proto = 0; @@ -1644,6 +1647,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; if (skb->encapsulation) { + u32 tunnel = 0; /* define outer network header type */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? @@ -1661,13 +1665,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } - /* compute outer L3 header size */ - tunnel |= ((l4.hdr - ip.hdr) / 4) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; - - /* switch IP header pointer from outer to inner header */ - ip.hdr = skb_inner_network_header(skb); - /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: @@ -1678,6 +1675,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; + case IPPROTO_IPIP: + case IPPROTO_IPV6: + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + l4.hdr = skb_inner_network_header(skb); + break; default: if (*tx_flags & I40E_TX_FLAGS_TSO) return -1; @@ -1686,12 +1688,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, return 0; } + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + /* compute tunnel header size */ tunnel |= ((ip.hdr - l4.hdr) / 2) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; /* indicate if we need to offload outer UDP header */ if ((*tx_flags & I40E_TX_FLAGS_TSO) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; @@ -1796,35 +1806,34 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, } /** - * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet + * __i40evf_chk_linearize - Check if there are more than 8 buffers per packet * @skb: send buffer * - * Note: Our HW can't scatter-gather more than 8 fragments to build - * a packet on the wire and so we need to figure out the cases where we - * need to linearize the skb. + * Note: Our HW can't DMA more than 8 buffers to build a packet on the wire + * and so we need to figure out the cases where we need to linearize the skb. + * + * For TSO we need to count the TSO header and segment payload separately. + * As such we need to check cases where we have 7 fragments or more as we + * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for + * the segment payload in the first descriptor, and another 7 for the + * fragments. **/ bool __i40evf_chk_linearize(struct sk_buff *skb) { const struct skb_frag_struct *frag, *stale; - int gso_size, nr_frags, sum; - - /* check to see if TSO is enabled, if so we may get a repreive */ - gso_size = skb_shinfo(skb)->gso_size; - if (unlikely(!gso_size)) - return true; + int nr_frags, sum; - /* no need to check if number of frags is less than 8 */ + /* no need to check if number of frags is less than 7 */ nr_frags = skb_shinfo(skb)->nr_frags; - if (nr_frags < I40E_MAX_BUFFER_TXD) + if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) return false; /* We need to walk through the list and validate that each group * of 6 fragments totals at least gso_size. However we don't need - * to perform such validation on the first or last 6 since the first - * 6 cannot inherit any data from a descriptor before them, and the - * last 6 cannot inherit any data from a descriptor after them. + * to perform such validation on the last 6 since the last 6 cannot + * inherit any data from a descriptor after them. */ - nr_frags -= I40E_MAX_BUFFER_TXD - 1; + nr_frags -= I40E_MAX_BUFFER_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; /* Initialize size to the negative value of gso_size minus 1. We @@ -1833,21 +1842,21 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) * descriptors for a single transmit as the header and previous * fragment are already consuming 2 descriptors. */ - sum = 1 - gso_size; + sum = 1 - skb_shinfo(skb)->gso_size; - /* Add size of frags 1 through 5 to create our initial sum */ - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); - sum += skb_frag_size(++frag); + /* Add size of frags 0 through 4 to create our initial sum */ + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); + sum += skb_frag_size(frag++); /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ stale = &skb_shinfo(skb)->frags[0]; for (;;) { - sum += skb_frag_size(++frag); + sum += skb_frag_size(frag++); /* if sum is negative we failed to make sufficient progress */ if (sum < 0) @@ -1857,7 +1866,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb) if (!--nr_frags) break; - sum -= skb_frag_size(++stale); + sum -= skb_frag_size(stale++); } return false; @@ -1936,6 +1945,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_bi = first; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; @@ -1943,12 +1954,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, dma_unmap_len_set(tx_bi, len, size); dma_unmap_addr_set(tx_bi, dma, dma); + /* align size to end of page */ + max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); tx_desc->buffer_addr = cpu_to_le64(dma); while (unlikely(size > I40E_MAX_DATA_PER_TXD)) { tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, - I40E_MAX_DATA_PER_TXD, td_tag); + max_data, td_tag); tx_desc++; i++; @@ -1959,9 +1972,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, i = 0; } - dma += I40E_MAX_DATA_PER_TXD; - size -= I40E_MAX_DATA_PER_TXD; + dma += max_data; + size -= max_data; + max_data = I40E_MAX_DATA_PER_TXD_ALIGNED; tx_desc->buffer_addr = cpu_to_le64(dma); } @@ -2110,7 +2124,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (i40e_chk_linearize(skb, count)) { if (__skb_linearize(skb)) goto out_drop; - count = TXD_USE_COUNT(skb->len); + count = i40e_txd_use_count(skb->len); tx_ring->tx_stats.tx_linearize++; } @@ -2141,7 +2155,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (protocol == htons(ETH_P_IPV6)) tx_flags |= I40E_TX_FLAGS_IPV6; - tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss); + tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss); if (tso < 0) goto out_drop; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index c1dd8c5c9666..0112277e5882 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -102,8 +102,8 @@ enum i40e_dyn_idx_t { (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \ I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA) -/* Supported Rx Buffer Sizes */ -#define I40E_RXBUFFER_512 512 /* Used for packet split */ +/* Supported Rx Buffer Sizes (a multiple of 128) */ +#define I40E_RXBUFFER_256 256 #define I40E_RXBUFFER_2048 2048 #define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */ #define I40E_RXBUFFER_4096 4096 @@ -114,9 +114,28 @@ enum i40e_dyn_idx_t { * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, * this adds up to 512 bytes of extra data meaning the smallest allocation * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) + * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) */ -#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512 +#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 +#define i40e_rx_desc i40e_32byte_rx_desc + +/** + * i40e_test_staterr - tests bits in Rx descriptor status and error fields + * @rx_desc: pointer to receive descriptor (in le64 format) + * @stat_err_bits: value to mask + * + * This function does some fast chicanery in order to return the + * value of the mask which is really only used for boolean tests. + * The status_error_len doesn't need to be shifted because it begins + * at offset zero. + */ +static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, + const u64 stat_err_bits) +{ + return !!(rx_desc->wb.qword1.status_error_len & + cpu_to_le64(stat_err_bits)); +} /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ @@ -142,14 +161,41 @@ enum i40e_dyn_idx_t { prefetch((n)); \ } while (0) -#define i40e_rx_desc i40e_32byte_rx_desc - #define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 -#define I40E_MAX_DATA_PER_TXD 8192 + +/* The size limit for a transmit buffer in a descriptor is (16K - 1). + * In order to align with the read requests we will align the value to + * the nearest 4K which represents our maximum read request size. + */ +#define I40E_MAX_READ_REQ_SIZE 4096 +#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) +#define I40E_MAX_DATA_PER_TXD_ALIGNED \ + (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) + +/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is + * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact + * that 12K is not a power of 2 and division is expensive. It is used to + * approximate the number of descriptors used per linear buffer. Note + * that this will overestimate in some cases as it doesn't account for the + * fact that we will add up to 4K - 1 in aligning the 12K buffer, however + * the error should not impact things much as large buffers usually mean + * we will use fewer descriptors then there are frags in an skb. + */ +static inline unsigned int i40e_txd_use_count(unsigned int size) +{ + const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED; + const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max; + unsigned int adjust = ~(u32)0; + + /* if we rounded up on the reciprocal pull down the adjustment */ + if ((max * reciprocal) > adjust) + adjust = ~(u32)(reciprocal - 1); + + return (u32)((((u64)size * reciprocal) + adjust) >> 32); +} /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD) #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 @@ -183,10 +229,8 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { struct sk_buff *skb; - void *hdr_buf; dma_addr_t dma; struct page *page; - dma_addr_t page_dma; unsigned int page_offset; }; @@ -215,22 +259,18 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, - __I40E_RX_PS_ENABLED, - __I40E_RX_16BYTE_DESC_ENABLED, }; -#define ring_is_ps_enabled(ring) \ - test_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define set_ring_ps_enabled(ring) \ - set_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define clear_ring_ps_enabled(ring) \ - clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state) -#define ring_is_16byte_desc_enabled(ring) \ - test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define set_ring_16byte_desc_enabled(ring) \ - set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) -#define clear_ring_16byte_desc_enabled(ring) \ - clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state) +/* some useful defines for virtchannel interface, which + * is the only remaining user of header split + */ +#define I40E_RX_DTYPE_NO_SPLIT 0 +#define I40E_RX_DTYPE_HEADER_SPLIT 1 +#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 +#define I40E_RX_SPLIT_L2 0x1 +#define I40E_RX_SPLIT_IP 0x2 +#define I40E_RX_SPLIT_TCP_UDP 0x4 +#define I40E_RX_SPLIT_SCTP 0x8 /* struct that defines a descriptor ring, associated with a VSI */ struct i40e_ring { @@ -249,16 +289,7 @@ struct i40e_ring { u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ - u16 rx_hdr_len; u16 rx_buf_len; - u8 dtype; -#define I40E_RX_DTYPE_NO_SPLIT 0 -#define I40E_RX_DTYPE_HEADER_SPLIT 1 -#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 -#define I40E_RX_SPLIT_L2 0x1 -#define I40E_RX_SPLIT_IP 0x2 -#define I40E_RX_SPLIT_TCP_UDP 0x4 -#define I40E_RX_SPLIT_SCTP 0x8 /* used in interrupt processing */ u16 next_to_use; @@ -290,6 +321,7 @@ struct i40e_ring { struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct rcu_head rcu; /* to avoid race on free */ + u16 next_to_alloc; } ____cacheline_internodealigned_in_smp; enum i40e_latency_range { @@ -313,9 +345,7 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); -void i40evf_alloc_rx_headers(struct i40e_ring *rxr); +bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_rx_ring(struct i40e_ring *rx_ring); @@ -359,7 +389,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) int count = 0, size = skb_headlen(skb); for (;;) { - count += TXD_USE_COUNT(size); + count += i40e_txd_use_count(size); if (!nr_frags--) break; @@ -395,10 +425,24 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) **/ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) { - /* we can only support up to 8 data buffers for a single send */ - if (likely(count <= I40E_MAX_BUFFER_TXD)) + /* Both TSO and single send will work if count is less than 8 */ + if (likely(count < I40E_MAX_BUFFER_TXD)) return false; - return __i40evf_chk_linearize(skb); + if (skb_is_gso(skb)) + return __i40evf_chk_linearize(skb); + + /* we can support up to 8 data buffers for a single send */ + return count != I40E_MAX_BUFFER_TXD; +} + +/** + * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE + * @ptype: the packet type field from Rx descriptor write-back + **/ +static inline bool i40e_rx_is_fcoe(u16 ptype) +{ + return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && + (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); } #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 301fe2b6dd03..97f96e0d9c4c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -36,7 +36,7 @@ #include "i40e_devids.h" /* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) (mask << shift) +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) #define I40E_MAX_VSI_QP 16 #define I40E_MAX_VF_VSI 3 @@ -258,6 +258,11 @@ struct i40e_hw_capabilities { #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool sec_rev_disabled; + bool update_disabled; +#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 +#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -522,6 +527,8 @@ struct i40e_hw { enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; + bool nvm_release_on_done; + u16 nvm_wait_opcode; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ @@ -1329,4 +1336,46 @@ enum i40e_reset_type { /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 + +/* INPUT SET MASK for RSS, flow director and flexible payload */ +#define I40E_FD_INSET_L3_SRC_SHIFT 47 +#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_L3_SRC_SHIFT) +#define I40E_FD_INSET_L3_DST_SHIFT 35 +#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_L3_DST_SHIFT) +#define I40E_FD_INSET_L4_SRC_SHIFT 34 +#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ + I40E_FD_INSET_L4_SRC_SHIFT) +#define I40E_FD_INSET_L4_DST_SHIFT 33 +#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ + I40E_FD_INSET_L4_DST_SHIFT) +#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 +#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ + I40E_FD_INSET_VERIFY_TAG_SHIFT) + +#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 +#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD50_SHIFT) +#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 +#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD51_SHIFT) +#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 +#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD52_SHIFT) +#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 +#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD53_SHIFT) +#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 +#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD54_SHIFT) +#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 +#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD55_SHIFT) +#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 +#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD56_SHIFT) +#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 +#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ + I40E_FD_INSET_FLEX_WORD57_SHIFT) #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h index 3b9d2037456c..f04ce6cb70dc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h @@ -80,7 +80,12 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, - I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ + I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, + }; /* Virtual channel message descriptor. This overlays the admin queue @@ -154,6 +159,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 struct i40e_virtchnl_vf_resource { u16 num_vsis; @@ -162,8 +168,8 @@ struct i40e_virtchnl_vf_resource { u16 max_mtu; u32 vf_offload_flags; - u32 max_fcoe_contexts; - u32 max_fcoe_filters; + u32 rss_key_size; + u32 rss_lut_size; struct i40e_virtchnl_vsi_resource vsi_res[1]; }; @@ -322,6 +328,39 @@ struct i40e_virtchnl_promisc_info { * PF replies with struct i40e_eth_stats in an external buffer. */ +/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY + * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the RSS fields in + * the VF resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct i40e_virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +struct i40e_virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS + * I40E_VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h + */ +struct i40e_virtchnl_rss_hena { + u64 hena; +}; + /* I40E_VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index e657eccd232c..76ed97db28e2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -67,8 +67,6 @@ struct i40e_vsi { u16 rx_itr_setting; u16 tx_itr_setting; u16 qs_handle; - u8 *rss_hkey_user; /* User configured hash keys */ - u8 *rss_lut_user; /* User configured lookup table entries */ }; /* How many Rx Buffers do we bundle into one write to the hardware ? */ @@ -82,9 +80,6 @@ struct i40e_vsi { #define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32 /* Supported Rx Buffer Sizes */ -#define I40EVF_RXBUFFER_64 64 /* Used for packet split */ -#define I40EVF_RXBUFFER_128 128 /* Used for packet split */ -#define I40EVF_RXBUFFER_256 256 /* Used for packet split */ #define I40EVF_RXBUFFER_2048 2048 #define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define I40EVF_MAX_AQ_BUF_SIZE 4096 @@ -210,9 +205,6 @@ struct i40evf_adapter { u32 flags; #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1) -#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2) -#define I40EVF_FLAG_RX_PS_ENABLED BIT(3) #define I40EVF_FLAG_IMIR_ENABLED BIT(5) #define I40EVF_FLAG_MQ_CAPABLE BIT(6) #define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7) @@ -222,6 +214,8 @@ struct i40evf_adapter { #define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) #define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) #define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) +#define I40EVF_FLAG_PROMISC_ON BIT(15) +#define I40EVF_FLAG_ALLMULTI_ON BIT(16) /* duplicates for common code */ #define I40E_FLAG_FDIR_ATR_ENABLED 0 #define I40E_FLAG_DCB_ENABLED 0 @@ -239,8 +233,17 @@ struct i40evf_adapter { #define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6) #define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7) #define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8) -#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) +#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */ #define I40EVF_FLAG_AQ_GET_CONFIG BIT(10) +/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */ +#define I40EVF_FLAG_AQ_GET_HENA BIT(11) +#define I40EVF_FLAG_AQ_SET_HENA BIT(12) +#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13) +#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14) +#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15) +#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16) +#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17) +#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18) /* OS defined structs */ struct net_device *netdev; @@ -256,10 +259,18 @@ struct i40evf_adapter { bool netdev_registered; bool link_up; enum i40e_virtchnl_ops current_op; -#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \ - I40E_VIRTCHNL_VF_OFFLOAD_IWARP) +#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \ + (_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \ + 0) +/* RSS by the PF should be preferred over RSS via other methods. */ +#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) #define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ) +#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \ + (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \ + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \ I40E_VIRTCHNL_VF_OFFLOAD_VLAN) struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ @@ -271,11 +282,16 @@ struct i40evf_adapter { struct i40e_eth_stats current_stats; struct i40e_vsi vsi; u32 aq_wait_count; + /* RSS stuff */ + u64 hena; + u16 rss_key_size; + u16 rss_lut_size; + u8 *rss_key; + u8 *rss_lut; }; /* Ethtool Private Flags */ -#define I40EVF_PRIV_FLAGS_PS BIT(0) /* needed by i40evf_ethtool.c */ extern char i40evf_driver_name[]; @@ -314,11 +330,12 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter); void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); void i40evf_request_stats(struct i40evf_adapter *adapter); void i40evf_request_reset(struct i40evf_adapter *adapter); +void i40evf_get_hena(struct i40evf_adapter *adapter); +void i40evf_set_hena(struct i40evf_adapter *adapter); +void i40evf_set_rss_key(struct i40evf_adapter *adapter); +void i40evf_set_rss_lut(struct i40evf_adapter *adapter); void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, enum i40e_virtchnl_ops v_opcode, i40e_status v_retval, u8 *msg, u16 msglen); -int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, - u16 lut_size); -int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, - u16 lut_size); +int i40evf_config_rss(struct i40evf_adapter *adapter); #endif /* _I40EVF_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index dd4430aae7fa..c9c202f6c521 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) -static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = { - "packet-split", -}; - -#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings) - /** * i40evf_get_settings - Get Link Speed and Duplex settings * @netdev: network interface device structure @@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) return I40EVF_STATS_LEN(netdev); - else if (sset == ETH_SS_PRIV_FLAGS) - return I40EVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } @@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); p += ETH_GSTRING_LEN; } - } else if (sset == ETH_SS_PRIV_FLAGS) { - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - memcpy(data, i40evf_priv_flags_strings[i], - ETH_GSTRING_LEN); - data += ETH_GSTRING_LEN; - } } } @@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); - drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; } /** @@ -378,63 +363,6 @@ static int i40evf_set_coalesce(struct net_device *netdev, } /** - * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type - * @adapter: board private structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow is supported, else Invalid Input. - **/ -static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter, - struct ethtool_rxnfc *cmd) -{ - struct i40e_hw *hw = &adapter->hw; - u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | - ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); - - /* We always hash on IP src and dest addresses */ - cmd->data = RXH_IP_SRC | RXH_IP_DST; - - switch (cmd->flow_type) { - case TCP_V4_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V4_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case IPV4_FLOW: - break; - - case TCP_V6_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V6_FLOW: - if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) - cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case IPV6_FLOW: - break; - default: - cmd->data = 0; - return -EINVAL; - } - - return 0; -} - -/** * i40evf_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -454,145 +382,8 @@ static int i40evf_get_rxnfc(struct net_device *netdev, ret = 0; break; case ETHTOOL_GRXFH: - ret = i40evf_get_rss_hash_opts(adapter, cmd); - break; - default: - break; - } - - return ret; -} - -/** - * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash - * @adapter: board private structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the flow input set is supported. - **/ -static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, - struct ethtool_rxnfc *nfc) -{ - struct i40e_hw *hw = &adapter->hw; - u32 flags = adapter->vf_res->vf_offload_flags; - - u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | - ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); - - /* RSS does not support anything other than hashing - * to queues on src and dst IPs and ports - */ - if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) - return -EINVAL; - - /* We need at least the IP SRC and DEST fields for hashing */ - if (!(nfc->data & RXH_IP_SRC) || - !(nfc->data & RXH_IP_DST)) - return -EINVAL; - - switch (nfc->flow_type) { - case TCP_V4_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); - - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - } else { - return -EINVAL; - } - break; - case TCP_V6_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); - - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - } else { - return -EINVAL; - } - break; - case UDP_V4_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - } else { - return -EINVAL; - } - break; - case UDP_V6_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - } else { - return -EINVAL; - } - break; - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case SCTP_V4_FLOW: - if ((nfc->data & RXH_L4_B_0_1) || - (nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); - break; - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case SCTP_V6_FLOW: - if ((nfc->data & RXH_L4_B_0_1) || - (nfc->data & RXH_L4_B_2_3)) - return -EINVAL; - hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); - break; - case IPV4_FLOW: - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - case IPV6_FLOW: - hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: - return -EINVAL; - } - - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); - i40e_flush(hw); - - return 0; -} - -/** - * i40evf_set_rxnfc - command to set RX flow classification rules - * @netdev: network interface device structure - * @cmd: ethtool rxnfc command - * - * Returns Success if the command is supported. - **/ -static int i40evf_set_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd) -{ - struct i40evf_adapter *adapter = netdev_priv(netdev); - int ret = -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - ret = i40evf_set_rss_hash_opt(adapter, cmd); + netdev_info(netdev, + "RSS hash info is not available to vf, use pf.\n"); break; default: break; @@ -600,7 +391,6 @@ static int i40evf_set_rxnfc(struct net_device *netdev, return ret; } - /** * i40evf_get_channels: get the number of channels supported by the device * @netdev: network interface device structure @@ -624,6 +414,19 @@ static void i40evf_get_channels(struct net_device *netdev, } /** + * i40evf_get_rxfh_key_size - get the RSS hash key size + * @netdev: network interface device structure + * + * Returns the table size. + **/ +static u32 i40evf_get_rxfh_key_size(struct net_device *netdev) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_key_size; +} + +/** * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size * @netdev: network interface device structure * @@ -631,7 +434,9 @@ static void i40evf_get_channels(struct net_device *netdev, **/ static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev) { - return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4; + struct i40evf_adapter *adapter = netdev_priv(netdev); + + return adapter->rss_lut_size; } /** @@ -646,9 +451,6 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - u8 *seed = NULL, *lut; - int ret; u16 i; if (hfunc) @@ -656,24 +458,13 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, if (!indir) return 0; - seed = key; - - lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); - if (!lut) - return -ENOMEM; - - ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); - if (ret) - goto out; + memcpy(key, adapter->rss_key, adapter->rss_key_size); /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) - indir[i] = (u32)lut[i]; + for (i = 0; i < adapter->rss_lut_size; i++) + indir[i] = (u32)adapter->rss_lut[i]; -out: - kfree(lut); - - return ret; + return 0; } /** @@ -689,8 +480,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { struct i40evf_adapter *adapter = netdev_priv(netdev); - struct i40e_vsi *vsi = &adapter->vsi; - u8 *seed = NULL; u16 i; /* We do not allow change in unsupported parameters */ @@ -701,76 +490,14 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, return 0; if (key) { - if (!vsi->rss_hkey_user) { - vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE, - GFP_KERNEL); - if (!vsi->rss_hkey_user) - return -ENOMEM; - } - memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE); - seed = vsi->rss_hkey_user; - } - if (!vsi->rss_lut_user) { - vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE, - GFP_KERNEL); - if (!vsi->rss_lut_user) - return -ENOMEM; + memcpy(adapter->rss_key, key, adapter->rss_key_size); } /* Each 32 bits pointed by 'indir' is stored with a lut entry */ - for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++) - vsi->rss_lut_user[i] = (u8)(indir[i]); - - return i40evf_config_rss(vsi, seed, vsi->rss_lut_user, - I40EVF_HLUT_ARRAY_SIZE); -} - -/** - * i40evf_get_priv_flags - report device private flags - * @dev: network interface device structure - * - * The get string set count and the string set should be matched for each - * flag returned. Add new strings for each flag to the i40e_priv_flags_strings - * array. - * - * Returns a u32 bitmap of flags. - **/ -static u32 i40evf_get_priv_flags(struct net_device *dev) -{ - struct i40evf_adapter *adapter = netdev_priv(dev); - u32 ret_flags = 0; - - ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ? - I40EVF_PRIV_FLAGS_PS : 0; - - return ret_flags; -} + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = (u8)(indir[i]); -/** - * i40evf_set_priv_flags - set private flags - * @dev: network interface device structure - * @flags: bit flags to be set - **/ -static int i40evf_set_priv_flags(struct net_device *dev, u32 flags) -{ - struct i40evf_adapter *adapter = netdev_priv(dev); - bool reset_required = false; - - if ((flags & I40EVF_PRIV_FLAGS_PS) && - !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - reset_required = true; - } else if (!(flags & I40EVF_PRIV_FLAGS_PS) && - (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - reset_required = true; - } - - /* if needed, issue reset to cause things to take effect */ - if (reset_required) - i40evf_schedule_reset(adapter); - - return 0; + return i40evf_config_rss(adapter); } static const struct ethtool_ops i40evf_ethtool_ops = { @@ -782,18 +509,16 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_strings = i40evf_get_strings, .get_ethtool_stats = i40evf_get_ethtool_stats, .get_sset_count = i40evf_get_sset_count, - .get_priv_flags = i40evf_get_priv_flags, - .set_priv_flags = i40evf_set_priv_flags, .get_msglevel = i40evf_get_msglevel, .set_msglevel = i40evf_set_msglevel, .get_coalesce = i40evf_get_coalesce, .set_coalesce = i40evf_set_coalesce, .get_rxnfc = i40evf_get_rxnfc, - .set_rxnfc = i40evf_set_rxnfc, .get_rxfh_indir_size = i40evf_get_rxfh_indir_size, .get_rxfh = i40evf_get_rxfh, .set_rxfh = i40evf_set_rxfh, .get_channels = i40evf_get_channels, + .get_rxfh_key_size = i40evf_get_rxfh_key_size, }; /** diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 4b70aae2fa84..642bb45ed906 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -37,8 +37,8 @@ static const char i40evf_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 1 -#define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 15 +#define DRV_VERSION_MINOR 5 +#define DRV_VERSION_BUILD 10 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ @@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) static void i40evf_configure_rx(struct i40evf_adapter *adapter) { struct i40e_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i; - int rx_buf_len; - - - /* Set the RX buffer length according to the mode */ - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED || - netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = I40EVF_RXBUFFER_2048; - else - rx_buf_len = ALIGN(max_frame, 1024); for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); - adapter->rx_rings[i].rx_buf_len = rx_buf_len; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - set_ring_ps_enabled(&adapter->rx_rings[i]); - adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE; - } else { - clear_ring_ps_enabled(&adapter->rx_rings[i]); - } + adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048; } } @@ -943,6 +926,21 @@ static void i40evf_set_rx_mode(struct net_device *netdev) bottom_of_search_loop: continue; } + + if (netdev->flags & IFF_PROMISC && + !(adapter->flags & I40EVF_FLAG_PROMISC_ON)) + adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC; + else if (!(netdev->flags & IFF_PROMISC) && + adapter->flags & I40EVF_FLAG_PROMISC_ON) + adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC; + + if (netdev->flags & IFF_ALLMULTI && + !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON)) + adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + else if (!(netdev->flags & IFF_ALLMULTI) && + adapter->flags & I40EVF_FLAG_ALLMULTI_ON) + adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI; + clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); } @@ -999,14 +997,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = &adapter->rx_rings[i]; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - i40evf_alloc_rx_headers(ring); - i40evf_alloc_rx_buffers_ps(ring, ring->count); - } else { - i40evf_alloc_rx_buffers_1buf(ring, ring->count); - } - ring->next_to_use = ring->count - 1; - writel(ring->next_to_use, ring->tail); + i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); } } @@ -1224,24 +1215,18 @@ out: } /** - * i40e_config_rss_aq - Prepare for RSS using AQ commands - * @vsi: vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands + * @adapter: board private structure * * Return 0 on success, negative on failure **/ -static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) +static int i40evf_config_rss_aq(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; + struct i40e_aqc_get_set_rss_key_data *rss_key = + (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key; struct i40e_hw *hw = &adapter->hw; int ret = 0; - if (!vsi->id) - return -EINVAL; - if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n", @@ -1249,198 +1234,82 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, return -EBUSY; } - if (seed) { - struct i40e_aqc_get_set_rss_key_data *rss_key = - (struct i40e_aqc_get_set_rss_key_data *)seed; - ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key); - if (ret) { - dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } + ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } - if (lut) { - ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } + ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false, + adapter->rss_lut, adapter->rss_lut_size); + if (ret) { + dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n", + i40evf_stat_str(hw, ret), + i40evf_aq_str(hw, hw->aq.asq_last_status)); } return ret; + } /** * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, - const u8 *lut, u16 lut_size) +static int i40evf_config_rss_reg(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; struct i40e_hw *hw = &adapter->hw; + u32 *dw; u16 i; - if (seed) { - u32 *seed_dw = (u32 *)seed; - - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); - } - - if (lut) { - u32 *lut_dw = (u32 *)lut; + dw = (u32 *)adapter->rss_key; + for (i = 0; i <= adapter->rss_key_size / 4; i++) + wr32(hw, I40E_VFQF_HKEY(i), dw[i]); - if (lut_size != I40EVF_HLUT_ARRAY_SIZE) - return -EINVAL; + dw = (u32 *)adapter->rss_lut; + for (i = 0; i <= adapter->rss_lut_size / 4; i++) + wr32(hw, I40E_VFQF_HLUT(i), dw[i]); - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) - wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]); - } i40e_flush(hw); return 0; } /** - * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Return 0 on success, negative on failure - **/ -static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - struct i40e_hw *hw = &adapter->hw; - int ret = 0; - - if (seed) { - ret = i40evf_aq_get_rss_key(hw, vsi->id, - (struct i40e_aqc_get_set_rss_key_data *)seed); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot get RSS key, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - } - - if (lut) { - ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size); - if (ret) { - dev_err(&adapter->pdev->dev, - "Cannot get RSS lut, err %s aq_err %s\n", - i40evf_stat_str(hw, ret), - i40evf_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - } - - return ret; -} - -/** - * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Returns 0 on success, negative on failure - **/ -static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed, - const u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - struct i40e_hw *hw = &adapter->hw; - u16 i; - - if (seed) { - u32 *seed_dw = (u32 *)seed; - - for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) - seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i)); - } - - if (lut) { - u32 *lut_dw = (u32 *)lut; - - if (lut_size != I40EVF_HLUT_ARRAY_SIZE) - return -EINVAL; - - for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) - lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i)); - } - - return 0; -} - -/** * i40evf_config_rss - Configure RSS keys and lut - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size - * - * Returns 0 on success, negative on failure - **/ -int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) -{ - struct i40evf_adapter *adapter = vsi->back; - - if (RSS_AQ(adapter)) - return i40evf_config_rss_aq(vsi, seed, lut, lut_size); - else - return i40evf_config_rss_reg(vsi, seed, lut, lut_size); -} - -/** - * i40evf_get_rss - Get RSS keys and lut - * @vsi: Pointer to vsi structure - * @seed: RSS hash seed - * @lut: Lookup table - * @lut_size: Lookup table size + * @adapter: board private structure * * Returns 0 on success, negative on failure **/ -int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size) +int i40evf_config_rss(struct i40evf_adapter *adapter) { - struct i40evf_adapter *adapter = vsi->back; - if (RSS_AQ(adapter)) - return i40evf_get_rss_aq(vsi, seed, lut, lut_size); - else - return i40evf_get_rss_reg(vsi, seed, lut, lut_size); + if (RSS_PF(adapter)) { + adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT | + I40EVF_FLAG_AQ_SET_RSS_KEY; + return 0; + } else if (RSS_AQ(adapter)) { + return i40evf_config_rss_aq(adapter); + } else { + return i40evf_config_rss_reg(adapter); + } } /** * i40evf_fill_rss_lut - Fill the lut with default values - * @lut: Lookup table to be filled with - * @rss_table_size: Lookup table size - * @rss_size: Range of queue number for hashing + * @adapter: board private structure **/ -static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) +static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter) { u16 i; - for (i = 0; i < rss_table_size; i++) - lut[i] = i % rss_size; + for (i = 0; i < adapter->rss_lut_size; i++) + adapter->rss_lut[i] = i % adapter->num_active_queues; } /** @@ -1451,42 +1320,25 @@ static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size) **/ static int i40evf_init_rss(struct i40evf_adapter *adapter) { - struct i40e_vsi *vsi = &adapter->vsi; struct i40e_hw *hw = &adapter->hw; - u8 seed[I40EVF_HKEY_ARRAY_SIZE]; - u64 hena; - u8 *lut; int ret; - /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - if (adapter->vf_res->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) - hena = I40E_DEFAULT_RSS_HENA_EXPANDED; - else - hena = I40E_DEFAULT_RSS_HENA; - wr32(hw, I40E_VFQF_HENA(0), (u32)hena); - wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); + if (!RSS_PF(adapter)) { + /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + else + adapter->hena = I40E_DEFAULT_RSS_HENA; - lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL); - if (!lut) - return -ENOMEM; + wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena); + wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32)); + } - /* Use user configured lut if there is one, otherwise use default */ - if (vsi->rss_lut_user) - memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE); - else - i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE, - adapter->num_active_queues); + i40evf_fill_rss_lut(adapter); - /* Use user configured hash key if there is one, otherwise - * user default. - */ - if (vsi->rss_hkey_user) - memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE); - else - netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); - ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE); - kfree(lut); + netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size); + ret = i40evf_config_rss(adapter); return ret; } @@ -1507,7 +1359,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector), GFP_KERNEL); if (!adapter->q_vectors) - goto err_out; + return -ENOMEM; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { q_vector = &adapter->q_vectors[q_idx]; @@ -1519,15 +1371,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) } return 0; - -err_out: - while (q_idx) { - q_idx--; - q_vector = &adapter->q_vectors[q_idx]; - netif_napi_del(&q_vector->napi); - } - kfree(adapter->q_vectors); - return -ENOMEM; } /** @@ -1610,19 +1453,16 @@ err_set_interrupt: } /** - * i40evf_clear_rss_config_user - Clear user configurations of RSS - * @vsi: Pointer to VSI structure + * i40evf_free_rss - Free memory used by RSS structs + * @adapter: board private structure **/ -static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi) +static void i40evf_free_rss(struct i40evf_adapter *adapter) { - if (!vsi) - return; - - kfree(vsi->rss_hkey_user); - vsi->rss_hkey_user = NULL; + kfree(adapter->rss_key); + adapter->rss_key = NULL; - kfree(vsi->rss_lut_user); - vsi->rss_lut_user = NULL; + kfree(adapter->rss_lut); + adapter->rss_lut = NULL; } /** @@ -1756,6 +1596,39 @@ static void i40evf_watchdog_task(struct work_struct *work) adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; goto watchdog_done; } + if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) { + i40evf_get_hena(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) { + i40evf_set_hena(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) { + i40evf_set_rss_key(adapter); + goto watchdog_done; + } + if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) { + i40evf_set_rss_lut(adapter); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) { + i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC | + I40E_FLAG_VF_MULTICAST_PROMISC); + goto watchdog_done; + } + + if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) { + i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC); + goto watchdog_done; + } + + if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) && + (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) { + i40evf_set_promiscuous(adapter, 0); + goto watchdog_done; + } if (adapter->state == __I40EVF_RUNNING) i40evf_request_stats(adapter); @@ -2003,6 +1876,8 @@ static void i40evf_adminq_task(struct work_struct *work) /* check for error indications */ val = rd32(hw, hw->aq.arq.len); + if (val == 0xdeadbeef) /* indicates device in reset */ + goto freedom; oldval = val; if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) { dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n"); @@ -2259,6 +2134,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) return 0; } +#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\ + NETIF_F_HW_VLAN_CTAG_RX |\ + NETIF_F_HW_VLAN_CTAG_FILTER) + +/** + * i40evf_fix_features - fix up the netdev feature bits + * @netdev: our net device + * @features: desired feature bits + * + * Returns fixed-up features bits + **/ +static netdev_features_t i40evf_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + + features &= ~I40EVF_VLAN_FEATURES; + if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) + features |= I40EVF_VLAN_FEATURES; + return features; +} + static const struct net_device_ops i40evf_netdev_ops = { .ndo_open = i40evf_open, .ndo_stop = i40evf_close, @@ -2271,6 +2168,7 @@ static const struct net_device_ops i40evf_netdev_ops = { .ndo_tx_timeout = i40evf_tx_timeout, .ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid, + .ndo_fix_features = i40evf_fix_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40evf_netpoll, #endif @@ -2307,57 +2205,61 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) **/ int i40evf_process_config(struct i40evf_adapter *adapter) { + struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res; struct net_device *netdev = adapter->netdev; + struct i40e_vsi *vsi = &adapter->vsi; int i; /* got VF config message back from PF, now we can parse it */ - for (i = 0; i < adapter->vf_res->num_vsis; i++) { - if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) - adapter->vsi_res = &adapter->vf_res->vsi_res[i]; + for (i = 0; i < vfres->num_vsis; i++) { + if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV) + adapter->vsi_res = &vfres->vsi_res[i]; } if (!adapter->vsi_res) { dev_err(&adapter->pdev->dev, "No LAN VSI found\n"); return -ENODEV; } - if (adapter->vf_res->vf_offload_flags - & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) { - netdev->vlan_features = netdev->features & - ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER); - netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; - } - netdev->features |= NETIF_F_HIGHDMA | - NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_SCTP_CRC | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_RXCSUM | - NETIF_F_GRO; - - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_TSO_ECN | - NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE) - netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - - /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; - netdev->hw_features &= ~NETIF_F_RXCSUM; + netdev->hw_enc_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_HIGHDMA | + NETIF_F_SOFT_FEATURES | + NETIF_F_TSO | + NETIF_F_TSO_ECN | + NETIF_F_TSO6 | + NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | + NETIF_F_GSO_IPIP | + NETIF_F_GSO_SIT | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL | + NETIF_F_SCTP_CRC | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + 0; + + if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)) + netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + + /* record features VLANs can make use of */ + netdev->vlan_features |= netdev->hw_enc_features | + NETIF_F_TSO_MANGLEID; + + /* Write features and hw_features separately to avoid polluting + * with, or dropping, features that are set when we registgered. + */ + netdev->hw_features |= netdev->hw_enc_features; + + netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES; + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + + /* disable VLAN features if not supported */ + if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)) + netdev->features ^= I40EVF_VLAN_FEATURES; adapter->vsi.id = adapter->vsi_res->vsi_id; @@ -2368,8 +2270,16 @@ int i40evf_process_config(struct i40evf_adapter *adapter) ITR_REG_TO_USEC(I40E_ITR_RX_DEF)); adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC | ITR_REG_TO_USEC(I40E_ITR_TX_DEF)); - adapter->vsi.netdev = adapter->netdev; - adapter->vsi.qs_handle = adapter->vsi_res->qset_handle; + vsi->netdev = adapter->netdev; + vsi->qs_handle = adapter->vsi_res->qset_handle; + if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) { + adapter->rss_key_size = vfres->rss_key_size; + adapter->rss_lut_size = vfres->rss_lut_size; + } else { + adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE; + adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE; + } + return 0; } @@ -2502,11 +2412,6 @@ static void i40evf_init_task(struct work_struct *work) adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; - adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; - adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE; - - /* Default to single buffer rx, can be changed through ethtool. */ - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); @@ -2565,6 +2470,11 @@ static void i40evf_init_task(struct work_struct *work) set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_misc_irq_enable(adapter); + adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL); + adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL); + if (!adapter->rss_key || !adapter->rss_lut) + goto err_mem; + if (RSS_AQ(adapter)) { adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); @@ -2575,7 +2485,8 @@ static void i40evf_init_task(struct work_struct *work) restart: schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30)); return; - +err_mem: + i40evf_free_rss(adapter); err_register: i40evf_free_misc_irq(adapter); err_sw_init: @@ -2838,11 +2749,11 @@ static void i40evf_remove(struct pci_dev *pdev) adapter->state = __I40EVF_REMOVE; adapter->aq_required = 0; i40evf_request_reset(adapter); - msleep(20); + msleep(50); /* If the FW isn't responding, kick it once, but only once. */ if (!i40evf_asq_done(hw)) { i40evf_request_reset(adapter); - msleep(20); + msleep(50); } if (adapter->msix_entries) { @@ -2857,8 +2768,7 @@ static void i40evf_remove(struct pci_dev *pdev) flush_scheduled_work(); - /* Clear user configurations for RSS */ - i40evf_clear_rss_config_user(&adapter->vsi); + i40evf_free_rss(adapter); if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); @@ -2869,7 +2779,6 @@ static void i40evf_remove(struct pci_dev *pdev) iounmap(hw->hw_addr); pci_release_regions(pdev); - i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); i40evf_free_queues(adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 488e738f76c6..f13445691507 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - vqpi->rxq.splithdr_enabled = true; - vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE; - } vqpi++; } @@ -645,6 +641,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter) void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) { struct i40e_virtchnl_promisc_info vpi; + int promisc_all; if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -652,6 +649,27 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags) adapter->current_op); return; } + + promisc_all = I40E_FLAG_VF_UNICAST_PROMISC | + I40E_FLAG_VF_MULTICAST_PROMISC; + if ((flags & promisc_all) == promisc_all) { + adapter->flags |= I40EVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC; + dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n"); + } + + if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) { + adapter->flags |= I40EVF_FLAG_ALLMULTI_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI; + dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n"); + } + + if (!flags) { + adapter->flags &= ~I40EVF_FLAG_PROMISC_ON; + adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC; + dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n"); + } + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE; vpi.vsi_id = adapter->vsi_res->vsi_id; vpi.flags = flags; @@ -681,6 +699,115 @@ void i40evf_request_stats(struct i40evf_adapter *adapter) /* if the request failed, don't lock out others */ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; } + +/** + * i40evf_get_hena + * @adapter: adapter structure + * + * Request hash enable capabilities from PF + **/ +void i40evf_get_hena(struct i40evf_adapter *adapter) +{ + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n", + adapter->current_op); + return; + } + adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS; + adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS, + NULL, 0); +} + +/** + * i40evf_set_hena + * @adapter: adapter structure + * + * Request the PF to set our RSS hash capabilities + **/ +void i40evf_set_hena(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_hena vrh; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n", + adapter->current_op); + return; + } + vrh.hena = adapter->hena; + adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA, + (u8 *)&vrh, sizeof(vrh)); +} + +/** + * i40evf_set_rss_key + * @adapter: adapter structure + * + * Request the PF to set our RSS hash key + **/ +void i40evf_set_rss_key(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_key *vrk; + int len; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n", + adapter->current_op); + return; + } + len = sizeof(struct i40e_virtchnl_rss_key) + + (adapter->rss_key_size * sizeof(u8)) - 1; + vrk = kzalloc(len, GFP_KERNEL); + if (!vrk) + return; + vrk->vsi_id = adapter->vsi.id; + vrk->key_len = adapter->rss_key_size; + memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size); + + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY, + (u8 *)vrk, len); + kfree(vrk); +} + +/** + * i40evf_set_rss_lut + * @adapter: adapter structure + * + * Request the PF to set our RSS lookup table + **/ +void i40evf_set_rss_lut(struct i40evf_adapter *adapter) +{ + struct i40e_virtchnl_rss_lut *vrl; + int len; + + if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n", + adapter->current_op); + return; + } + len = sizeof(struct i40e_virtchnl_rss_lut) + + (adapter->rss_lut_size * sizeof(u8)) - 1; + vrl = kzalloc(len, GFP_KERNEL); + if (!vrl) + return; + vrl->vsi_id = adapter->vsi.id; + vrl->lut_entries = adapter->rss_lut_size; + memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size); + adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT; + adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT; + i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT, + (u8 *)vrl, len); + kfree(vrl); +} + /** * i40evf_request_reset * @adapter: adapter structure @@ -820,6 +947,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, if (v_opcode != adapter->current_op) return; break; + case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: { + struct i40e_virtchnl_rss_hena *vrh = + (struct i40e_virtchnl_rss_hena *)msg; + if (msglen == sizeof(*vrh)) + adapter->hena = vrh->hena; + else + dev_warn(&adapter->pdev->dev, + "Invalid message %d from PF\n", v_opcode); + } + break; default: if (v_opcode != adapter->current_op) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index a23aa6704394..a61447fd778e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -361,7 +361,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) if (size > 15) size = 15; - nvm->word_size = 1 << size; + nvm->word_size = BIT(size); nvm->opcode_bits = 8; nvm->delay_usec = 1; @@ -380,7 +380,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) 16 : 8; break; } - if (nvm->word_size == (1 << 15)) + if (nvm->word_size == BIT(15)) nvm->page_size = 128; nvm->type = e1000_nvm_eeprom_spi; @@ -391,7 +391,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw) nvm->ops.write = igb_write_nvm_spi; nvm->ops.validate = igb_validate_nvm_checksum; nvm->ops.update = igb_update_nvm_checksum; - if (nvm->word_size < (1 << 15)) + if (nvm->word_size < BIT(15)) nvm->ops.read = igb_read_nvm_eerd; else nvm->ops.read = igb_read_nvm_spi; @@ -2107,7 +2107,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) /* The PF can spoof - it has to in order to * support emulation mode NICs */ - reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS)); } else { reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | E1000_DTXSWC_VLAN_SPOOF_MASK); diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index de8805a2a2fe..199ff98209cf 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -168,16 +168,16 @@ struct e1000_adv_tx_context_desc { #define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ #define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ -#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ -#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ -#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ /* Additional DCA related definitions, note change in position of CPUID */ #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ @@ -186,8 +186,8 @@ struct e1000_adv_tx_context_desc { #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ /* ETQF register bit definitions */ -#define E1000_ETQF_FILTER_ENABLE (1 << 26) -#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_FILTER_ENABLE BIT(26) +#define E1000_ETQF_1588 BIT(30) /* FTQF register bit definitions */ #define E1000_FTQF_VF_BP 0x00008000 @@ -203,16 +203,16 @@ struct e1000_adv_tx_context_desc { #define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ #define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ #define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 -#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */ /* Easy defines for setting default pool, would normally be left a zero */ #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 #define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) /* Other useful VMD_CTL register defines */ -#define E1000_VT_CTL_IGNORE_MAC (1 << 28) -#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) -#define E1000_VT_CTL_VM_REPL_EN (1 << 30) +#define E1000_VT_CTL_IGNORE_MAC BIT(28) +#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29) +#define E1000_VT_CTL_VM_REPL_EN BIT(30) /* Per VM Offload register setup */ #define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ @@ -252,7 +252,7 @@ struct e1000_adv_tx_context_desc { #define E1000_DTXCTL_MDP_EN 0x0020 #define E1000_DTXCTL_SPOOF_INT 0x0040 -#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14) #define ALL_QUEUES 0xFFFF diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index e9f23ee8f15e..2997c443c5dc 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -530,65 +530,65 @@ /* Time Sync Interrupt Cause/Mask Register Bits */ -#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ -#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ -#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ -#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ -#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ -#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ -#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ -#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ +#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */ +#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */ +#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */ #define TSYNC_INTERRUPTS TSINTR_TXTS #define E1000_TSICR_TXTS TSINTR_TXTS /* TSAUXC Configuration Bits */ -#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ -#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ -#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ -#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ -#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ -#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ -#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ -#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ -#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ -#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ -#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ -#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ -#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ -#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ +#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */ +#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */ /* SDP Configuration Bits */ -#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ -#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ -#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ -#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ -#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ -#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ -#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ -#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ -#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ -#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ -#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ -#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ -#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ -#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ -#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ -#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ -#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ -#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ -#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ -#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ -#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ -#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ -#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ -#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ -#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ -#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ -#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ -#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ -#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ -#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ +#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */ #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ @@ -997,8 +997,8 @@ #define E1000_M88E1543_FIBER_CTRL 0x0 #define E1000_EEE_ADV_DEV_I354 7 #define E1000_EEE_ADV_ADDR_I354 60 -#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ -#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */ #define E1000_PCS_STATUS_DEV_I354 3 #define E1000_PCS_STATUS_ADDR_I354 1 #define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */ diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 07cf4fe58338..5010e2232c50 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -212,7 +212,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, * bits[4-0]: which bit in the register */ regidx = vlan / 32; - vfta_delta = 1 << (vlan % 32); + vfta_delta = BIT(vlan % 32); vfta = adapter->shadow_vfta[regidx]; /* vfta_delta represents the difference between the current value @@ -243,12 +243,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, bits = rd32(E1000_VLVF(vlvf_index)); /* set the pool bit */ - bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); if (vlan_on) goto vlvf_update; /* clear the pool bit */ - bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind); if (!(bits & E1000_VLVF_POOLSEL_MASK)) { /* Clear VFTA first, then disable VLVF. Otherwise @@ -427,7 +427,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value) mta = array_rd32(E1000_MTA, hash_reg); - mta |= (1 << hash_bit); + mta |= BIT(hash_bit); array_wr32(E1000_MTA, hash_reg, mta); wrfl(); @@ -527,7 +527,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw, hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); hash_bit = hash_value & 0x1F; - hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit); mc_addr_list += (ETH_ALEN); } diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 10f5c9e016a9..00e263f0c030 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -302,9 +302,9 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) u32 vflre = rd32(E1000_VFLRE); s32 ret_val = -E1000_ERR_MBX; - if (vflre & (1 << vf_number)) { + if (vflre & BIT(vf_number)) { ret_val = 0; - wr32(E1000_VFLRE, (1 << vf_number)); + wr32(E1000_VFLRE, BIT(vf_number)); hw->mbx.stats.rsts++; } diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c index e8280d0d7f02..3582c5cf8843 100644 --- a/drivers/net/ethernet/intel/igb/e1000_nvm.c +++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c @@ -72,7 +72,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) u32 eecd = rd32(E1000_EECD); u32 mask; - mask = 0x01 << (count - 1); + mask = 1u << (count - 1); if (nvm->type == e1000_nvm_eeprom_spi) eecd |= E1000_EECD_DO; diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 969a6ddafa3b..9b622b33bb5a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -91,10 +91,10 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define I82580_ADDR_REG 16 #define I82580_CFG_REG 22 -#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15) -#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15) +#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */ #define I82580_CTRL_REG 23 -#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10) +#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10) /* 82580 specific PHY registers */ #define I82580_PHY_CTRL_2 18 diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9413fa61392f..b9609afa5ca3 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -91,6 +91,14 @@ struct igb_adapter; #define NVM_COMB_VER_OFF 0x0083 #define NVM_COMB_VER_PTR 0x003d +/* Transmit and receive latency (for PTP timestamps) */ +#define IGB_I210_TX_LATENCY_10 9542 +#define IGB_I210_TX_LATENCY_100 1024 +#define IGB_I210_TX_LATENCY_1000 178 +#define IGB_I210_RX_LATENCY_10 20662 +#define IGB_I210_RX_LATENCY_100 2213 +#define IGB_I210_RX_LATENCY_1000 448 + struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; @@ -169,7 +177,7 @@ enum igb_tx_flags { * maintain a power of two alignment we have to limit ourselves to 32K. */ #define IGB_MAX_TXD_PWR 15 -#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) +#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) @@ -466,21 +474,21 @@ struct igb_adapter { u16 eee_advert; }; -#define IGB_FLAG_HAS_MSI (1 << 0) -#define IGB_FLAG_DCA_ENABLED (1 << 1) -#define IGB_FLAG_QUAD_PORT_A (1 << 2) -#define IGB_FLAG_QUEUE_PAIRS (1 << 3) -#define IGB_FLAG_DMAC (1 << 4) -#define IGB_FLAG_PTP (1 << 5) -#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6) -#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7) -#define IGB_FLAG_WOL_SUPPORTED (1 << 8) -#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9) -#define IGB_FLAG_MEDIA_RESET (1 << 10) -#define IGB_FLAG_MAS_CAPABLE (1 << 11) -#define IGB_FLAG_MAS_ENABLE (1 << 12) -#define IGB_FLAG_HAS_MSIX (1 << 13) -#define IGB_FLAG_EEE (1 << 14) +#define IGB_FLAG_HAS_MSI BIT(0) +#define IGB_FLAG_DCA_ENABLED BIT(1) +#define IGB_FLAG_QUAD_PORT_A BIT(2) +#define IGB_FLAG_QUEUE_PAIRS BIT(3) +#define IGB_FLAG_DMAC BIT(4) +#define IGB_FLAG_PTP BIT(5) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7) +#define IGB_FLAG_WOL_SUPPORTED BIT(8) +#define IGB_FLAG_NEED_LINK_UPDATE BIT(9) +#define IGB_FLAG_MEDIA_RESET BIT(10) +#define IGB_FLAG_MAS_CAPABLE BIT(11) +#define IGB_FLAG_MAS_ENABLE BIT(12) +#define IGB_FLAG_HAS_MSIX BIT(13) +#define IGB_FLAG_EEE BIT(14) #define IGB_FLAG_VLAN_PROMISC BIT(15) /* Media Auto Sense */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 7982243d1f9b..64e91c575a39 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -466,7 +466,7 @@ static void igb_get_regs(struct net_device *netdev, memset(p, 0, IGB_REGS_LEN * sizeof(u32)); - regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; /* General Registers */ regs_buff[0] = rd32(E1000_CTRL); @@ -1448,7 +1448,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 31; i++) { /* Interrupt to test */ - mask = 1 << i; + mask = BIT(i); if (!(mask & ics_mask)) continue; @@ -2411,19 +2411,19 @@ static int igb_get_ts_info(struct net_device *dev, SOF_TIMESTAMPING_RAW_HARDWARE; info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); - info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); /* 82576 does not support timestamping all packets. */ if (adapter->hw.mac.type >= e1000_82580) - info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; + info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL); else info->rx_filters |= - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; default: @@ -2831,7 +2831,8 @@ static int igb_get_module_eeprom(struct net_device *netdev, /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ for (i = 0; i < last_word - first_word + 1; i++) { - status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); + status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); if (status) { /* Error occurred while reading module */ kfree(dataword); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 55a1405cb2a1..21727692bef6 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -50,6 +50,7 @@ #include <linux/aer.h> #include <linux/prefetch.h> #include <linux/pm_runtime.h> +#include <linux/etherdevice.h> #ifdef CONFIG_IGB_DCA #include <linux/dca.h> #endif @@ -150,7 +151,7 @@ static void igb_update_dca(struct igb_q_vector *); static void igb_setup_dca(struct igb_adapter *); #endif /* CONFIG_IGB_DCA */ static int igb_poll(struct napi_struct *, int); -static bool igb_clean_tx_irq(struct igb_q_vector *); +static bool igb_clean_tx_irq(struct igb_q_vector *, int); static int igb_clean_rx_irq(struct igb_q_vector *, int); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static void igb_tx_timeout(struct net_device *); @@ -382,7 +383,7 @@ static void igb_dump(struct igb_adapter *adapter) dev_info(&adapter->pdev->dev, "Net device Info\n"); pr_info("Device Name state trans_start last_rx\n"); pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, - netdev->state, netdev->trans_start, netdev->last_rx); + netdev->state, dev_trans_start(netdev), netdev->last_rx); } /* Print Registers */ @@ -835,7 +836,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) igb_write_ivar(hw, msix_vector, tx_queue & 0x7, ((tx_queue & 0x8) << 1) + 8); - q_vector->eims_value = 1 << msix_vector; + q_vector->eims_value = BIT(msix_vector); break; case e1000_82580: case e1000_i350: @@ -856,7 +857,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) igb_write_ivar(hw, msix_vector, tx_queue >> 1, ((tx_queue & 0x1) << 4) + 8); - q_vector->eims_value = 1 << msix_vector; + q_vector->eims_value = BIT(msix_vector); break; default: BUG(); @@ -918,7 +919,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) E1000_GPIE_NSICR); /* enable msix_other interrupt */ - adapter->eims_other = 1 << vector; + adapter->eims_other = BIT(vector); tmp = (vector++ | E1000_IVAR_VALID) << 8; wr32(E1000_IVAR_MISC, tmp); @@ -2086,6 +2087,40 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } +#define IGB_MAX_MAC_HDR_LEN 127 +#define IGB_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +igb_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2110,7 +2145,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, .ndo_fdb_add = igb_ndo_fdb_add, - .ndo_features_check = passthru_features_check, + .ndo_features_check = igb_features_check, }; /** @@ -2376,38 +2411,43 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM | - NETIF_F_HW_CSUM | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_HW_CSUM; if (hw->mac.type >= e1000_82576) netdev->features |= NETIF_F_SCTP_CRC; +#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES; + /* copy netdev features into list of user selectable features */ - netdev->hw_features |= netdev->features; - netdev->hw_features |= NETIF_F_RXALL; + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL; if (hw->mac.type >= e1000_i350) netdev->hw_features |= NETIF_F_NTUPLE; - /* set this bit last since it cannot be part of hw_features */ - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - - netdev->vlan_features |= NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; - netdev->priv_flags |= IFF_SUPP_NOFCS; + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } + netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->priv_flags |= IFF_UNICAST_FLT; @@ -2442,9 +2482,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - /* copy the MAC address out of the NVM */ - if (hw->mac.ops.read_mac_addr(hw)) - dev_err(&pdev->dev, "NVM Read Error\n"); + if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { + /* copy the MAC address out of the NVM */ + if (hw->mac.ops.read_mac_addr(hw)) + dev_err(&pdev->dev, "NVM Read Error\n"); + } memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len); @@ -4061,7 +4103,7 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter) for (i = E1000_VLVF_ARRAY_SIZE; --i;) { u32 vlvf = rd32(E1000_VLVF(i)); - vlvf |= 1 << pf_id; + vlvf |= BIT(pf_id); wr32(E1000_VLVF(i), vlvf); } @@ -4088,7 +4130,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) /* guarantee that we don't scrub out management VLAN */ vid = adapter->mng_vlan_id; if (vid >= vid_start && vid < vid_end) - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); if (!adapter->vfs_allocated_count) goto set_vfta; @@ -4107,7 +4149,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) if (vlvf & E1000_VLVF_VLANID_ENABLE) { /* record VLAN ID in VFTA */ - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); /* if PF is part of this then continue */ if (test_bit(vid, adapter->active_vlans)) @@ -4115,7 +4157,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) } /* remove PF from the pool */ - bits = ~(1 << pf_id); + bits = ~BIT(pf_id); bits &= rd32(E1000_VLVF(i)); wr32(E1000_VLVF(i), bits); } @@ -4273,13 +4315,13 @@ static void igb_spoof_check(struct igb_adapter *adapter) return; for (j = 0; j < adapter->vfs_allocated_count; j++) { - if (adapter->wvbr & (1 << j) || - adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) { + if (adapter->wvbr & BIT(j) || + adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) { dev_warn(&adapter->pdev->dev, "Spoof event(s) detected on VF %d\n", j); adapter->wvbr &= - ~((1 << j) | - (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))); + ~(BIT(j) | + BIT(j + IGB_STAGGERED_QUEUE_OFFSET)); } } } @@ -4839,9 +4881,18 @@ static int igb_tso(struct igb_ring *tx_ring, struct igb_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -4854,45 +4905,52 @@ static int igb_tso(struct igb_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* MSS L4LEN IDX */ - mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; /* VLAN MACLEN IPLEN */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); @@ -5960,11 +6018,11 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) /* create mask for VF and other pools */ pool_mask = E1000_VLVF_POOLSEL_MASK; - vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf); /* drop PF from pool bits */ - pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + - adapter->vfs_allocated_count)); + pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count); /* Find the vlan filter for this id */ for (i = E1000_VLVF_ARRAY_SIZE; i--;) { @@ -5987,7 +6045,7 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) goto update_vlvf; vid = vlvf & E1000_VLVF_VLANID_MASK; - vfta_mask = 1 << (vid % 32); + vfta_mask = BIT(vid % 32); /* clear bit from VFTA */ vfta = adapter->shadow_vfta[vid / 32]; @@ -6024,7 +6082,7 @@ static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) return idx; } -void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) +static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) { struct e1000_hw *hw = &adapter->hw; u32 bits, pf_id; @@ -6038,13 +6096,13 @@ void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) * entry other than the PF. */ pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; - bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK; + bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK; bits &= rd32(E1000_VLVF(idx)); /* Disable the filter so this falls into the default pool. */ if (!bits) { if (adapter->flags & IGB_FLAG_VLAN_PROMISC) - wr32(E1000_VLVF(idx), 1 << pf_id); + wr32(E1000_VLVF(idx), BIT(pf_id)); else wr32(E1000_VLVF(idx), 0); } @@ -6228,9 +6286,9 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) /* enable transmit and receive for vf */ reg = rd32(E1000_VFTE); - wr32(E1000_VFTE, reg | (1 << vf)); + wr32(E1000_VFTE, reg | BIT(vf)); reg = rd32(E1000_VFRE); - wr32(E1000_VFRE, reg | (1 << vf)); + wr32(E1000_VFRE, reg | BIT(vf)); adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; @@ -6522,13 +6580,14 @@ static int igb_poll(struct napi_struct *napi, int budget) igb_update_dca(q_vector); #endif if (q_vector->tx.ring) - clean_complete = igb_clean_tx_irq(q_vector); + clean_complete = igb_clean_tx_irq(q_vector, budget); if (q_vector->rx.ring) { int cleaned = igb_clean_rx_irq(q_vector, budget); work_done += cleaned; - clean_complete &= (cleaned < budget); + if (cleaned >= budget) + clean_complete = false; } /* If all work not completed, return budget and keep polling */ @@ -6545,10 +6604,11 @@ static int igb_poll(struct napi_struct *napi, int budget) /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info + * @napi_budget: Used to determine if we are in netpoll * * returns true if ring is completely cleaned **/ -static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) +static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) { struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *tx_ring = q_vector->tx.ring; @@ -6587,7 +6647,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -7574,7 +7634,6 @@ static int igb_resume(struct device *dev) if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); - rtnl_unlock(); return -ENOMEM; } @@ -7845,11 +7904,13 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, struct e1000_hw *hw = &adapter->hw; u32 rar_low, rar_high; - /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to CPU endian + /* HW expects these to be in network order when they are plugged + * into the registers which are little endian. In order to guarantee + * that ordering we need to do an leXX_to_cpup here in order to be + * ready for the byteswap that occurs with writel */ - rar_low = le32_to_cpup((__be32 *)(addr)); - rar_high = le16_to_cpup((__be16 *)(addr + 4)); + rar_low = le32_to_cpup((__le32 *)(addr)); + rar_high = le16_to_cpup((__le16 *)(addr + 4)); /* Indicate to hardware the Address is Valid. */ rar_high |= E1000_RAH_AV; @@ -7921,7 +7982,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, /* Calculate the rate factor values to set */ rf_int = link_speed / tx_rate; rf_dec = (link_speed - (rf_int * tx_rate)); - rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) / + rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; bcnrc_val = E1000_RTTBCNRC_RS_ENA; @@ -8011,11 +8072,11 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC; reg_val = rd32(reg_offset); if (setting) - reg_val |= ((1 << vf) | - (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); + reg_val |= (BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); else - reg_val &= ~((1 << vf) | - (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT))); + reg_val &= ~(BIT(vf) | + BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)); wr32(reg_offset, reg_val); adapter->vf_data[vf].spoofchk_enabled = setting; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 22a8a29895b4..f097c5a8ab93 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -69,9 +69,9 @@ #define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) #define IGB_PTP_TX_TIMEOUT (HZ * 15) -#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) -#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) -#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) +#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0) +#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT) #define IGB_NBITS_82580 40 static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); @@ -722,11 +722,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps shhwtstamps; u64 regval; + int adjust = 0; regval = rd32(E1000_TXSTMPL); regval |= (u64)rd32(E1000_TXSTMPH) << 32; igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + /* adjust timestamp for the TX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_TX_LATENCY_1000; + break; + } + } + + shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; @@ -771,6 +789,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct igb_adapter *adapter = q_vector->adapter; struct e1000_hw *hw = &adapter->hw; u64 regval; + int adjust = 0; /* If this bit is set, then the RX registers contain the time stamp. No * other packet will be time stamped until we read these registers, so @@ -790,6 +809,23 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGB_I210_RX_LATENCY_10; + break; + case SPEED_100: + adjust = IGB_I210_RX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGB_I210_RX_LATENCY_1000; + break; + } + } + skb_hwtstamps(skb)->hwtstamp = + ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + /* Update the last_rx_timestamp timer in order to enable watchdog check * for error case of latched timestamp on a dropped packet. */ diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h index ae3f28332fa0..ee1ef08d7fc4 100644 --- a/drivers/net/ethernet/intel/igbvf/defines.h +++ b/drivers/net/ethernet/intel/igbvf/defines.h @@ -113,7 +113,7 @@ #define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */ /* Direct Cache Access (DCA) definitions */ -#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ #define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index b74ce53d7b52..8dea1b1367ef 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -154,7 +154,8 @@ static void igbvf_get_regs(struct net_device *netdev, memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + regs->version = (1u << 24) | + (adapter->pdev->revision << 16) | adapter->pdev->device; regs_buff[0] = er32(CTRL); diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h index f166baab8d7e..6f4290d6dc9f 100644 --- a/drivers/net/ethernet/intel/igbvf/igbvf.h +++ b/drivers/net/ethernet/intel/igbvf/igbvf.h @@ -287,8 +287,8 @@ struct igbvf_info { }; /* hardware capability, feature, and workaround flags */ -#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0) -#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1) +#define IGBVF_FLAG_RX_CSUM_DISABLED BIT(0) +#define IGBVF_FLAG_RX_LB_VLAN_BSWAP BIT(1) #define IGBVF_RX_DESC_ADV(R, i) \ (&((((R).desc))[i].rx_desc)) #define IGBVF_TX_DESC_ADV(R, i) \ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index c12442252adb..322a2d7828a5 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -964,7 +964,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, ivar = ivar & 0xFFFFFF00; ivar |= msix_vector | E1000_IVAR_VALID; } - adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; + adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector); array_ew32(IVAR0, index, ivar); } if (tx_queue > IGBVF_NO_QUEUE) { @@ -979,7 +979,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, ivar = ivar & 0xFFFF00FF; ivar |= (msix_vector | E1000_IVAR_VALID) << 8; } - adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; + adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector); array_ew32(IVAR0, index, ivar); } } @@ -1014,8 +1014,8 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter) ew32(IVAR_MISC, tmp); - adapter->eims_enable_mask = (1 << (vector)) - 1; - adapter->eims_other = 1 << (vector - 1); + adapter->eims_enable_mask = GENMASK(vector - 1, 0); + adapter->eims_other = BIT(vector - 1); e1e_flush(); } @@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct igbvf_ring *rx_ring = adapter->rx_ring; u64 rdba; - u32 rdlen, rxdctl; + u32 rxdctl; /* disable receives */ rxdctl = er32(RXDCTL(0)); @@ -1375,8 +1375,6 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter) e1e_flush(); msleep(10); - rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); - /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring */ @@ -1933,83 +1931,74 @@ static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens, buffer_info->dma = 0; } -static int igbvf_tso(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, - __be16 protocol) -{ - struct e1000_adv_tx_context_desc *context_desc; - struct igbvf_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - u32 mss_l4len_idx, l4len; - unsigned int i; +static int igbvf_tso(struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; - *hdr_len = 0; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (!skb_is_gso(skb)) + return 0; err = skb_cow_head(skb, 0); - if (err < 0) { - dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n"); + if (err < 0) return err; - } - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - - if (protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - } + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); - i = tx_ring->next_to_use; + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); - /* VLAN MACLEN IPLEN */ - if (tx_flags & IGBVF_TX_FLAGS_VLAN) - info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - *hdr_len += skb_network_offset(skb); - info |= (skb_transport_header(skb) - skb_network_header(skb)); - *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); - context_desc->vlan_macip_lens = cpu_to_le32(info); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); + type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + ip.v4->tot_len = 0; + } else { + ip.v6->payload_len = 0; + } - if (protocol == htons(ETH_P_IP)) - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; - /* MSS L4LEN IDX */ - mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - context_desc->seqnum_seed = 0; + /* MSS L4LEN IDX */ + mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; - buffer_info->time_stamp = jiffies; - buffer_info->dma = 0; - i++; - if (i == tx_ring->count) - i = 0; + /* VLAN MACLEN IPLEN */ + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; - tx_ring->next_to_use = i; + igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); - return true; + return 1; } static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb) @@ -2091,7 +2080,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) } #define IGBVF_MAX_TXD_PWR 16 -#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) +#define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR) static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, @@ -2271,8 +2260,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, first = tx_ring->next_to_use; - tso = skb_is_gso(skb) ? - igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0; + tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len); if (unlikely(tso < 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -2615,6 +2603,40 @@ static int igbvf_set_features(struct net_device *netdev, return 0; } +#define IGBVF_MAX_MAC_HDR_LEN 127 +#define IGBVF_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +igbvf_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + static const struct net_device_ops igbvf_netdev_ops = { .ndo_open = igbvf_open, .ndo_stop = igbvf_close, @@ -2631,7 +2653,7 @@ static const struct net_device_ops igbvf_netdev_ops = { .ndo_poll_controller = igbvf_netpoll, #endif .ndo_set_features = igbvf_set_features, - .ndo_features_check = passthru_features_check, + .ndo_features_check = igbvf_features_check, }; /** @@ -2739,22 +2761,30 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC; - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; +#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) + + netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES; + netdev->hw_features |= NETIF_F_GSO_PARTIAL | + IGBVF_GSO_PARTIAL_FEATURES; + + netdev->features = netdev->hw_features; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_SG | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_HW_CSUM | - NETIF_F_SCTP_CRC; - + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; /*reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw(hw); diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index a13baa90ae20..335ba6642145 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -266,7 +266,7 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) msgbuf[1] = vid; /* Setting the 8 bit field MSG INFO to true indicates "add" */ if (set) - msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; + msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT); mbx->ops.write_posted(hw, msgbuf, 2); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index e4949af7dd6b..9f2db1855412 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -143,14 +143,11 @@ struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; - u16 default_vf_vlan_id; - u16 vlans_enabled; bool clear_to_send; bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; - u16 vlan_count; u8 spoofchk_enabled; bool rss_query_enabled; u8 trusted; @@ -173,7 +170,7 @@ struct vf_macvlans { }; #define IXGBE_MAX_TXD_PWR 14 -#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) +#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) @@ -456,7 +453,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) IXGBE_QV_STATE_POLL); #ifdef BP_EXTENDED_STATS if (rc != IXGBE_QV_STATE_IDLE) - q_vector->tx.ring->stats.yields++; + q_vector->rx.ring->stats.yields++; #endif return rc == IXGBE_QV_STATE_IDLE; } @@ -623,44 +620,45 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) -#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) -#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) -#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11) -#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12) -#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13) -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16) -#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19) -#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20) -#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) -#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) -#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) +#define IXGBE_FLAG_MSI_ENABLED BIT(1) +#define IXGBE_FLAG_MSIX_ENABLED BIT(3) +#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4) +#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5) +#define IXGBE_FLAG_RX_PS_ENABLED BIT(6) +#define IXGBE_FLAG_DCA_ENABLED BIT(8) +#define IXGBE_FLAG_DCA_CAPABLE BIT(9) +#define IXGBE_FLAG_IMIR_ENABLED BIT(10) +#define IXGBE_FLAG_MQ_CAPABLE BIT(11) +#define IXGBE_FLAG_DCB_ENABLED BIT(12) +#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13) +#define IXGBE_FLAG_VMDQ_ENABLED BIT(14) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15) +#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16) +#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19) +#define IXGBE_FLAG_FCOE_CAPABLE BIT(20) +#define IXGBE_FLAG_FCOE_ENABLED BIT(21) +#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22) +#define IXGBE_FLAG_SRIOV_ENABLED BIT(23) #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24) #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25) #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26) +#define IXGBE_FLAG_DCB_CAPABLE BIT(27) u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) -#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) -#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) -#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) -#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) -#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) -#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) -#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) -#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) -#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) -#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) +#define IXGBE_FLAG2_RSC_CAPABLE BIT(0) +#define IXGBE_FLAG2_RSC_ENABLED BIT(1) +#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2) +#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3) +#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4) +#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5) +#define IXGBE_FLAG2_RESET_REQUESTED BIT(6) +#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7) +#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8) +#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9) +#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10) +#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11) #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13) @@ -795,7 +793,7 @@ struct ixgbe_adapter { unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ #define IXGBE_MAX_LINK_HANDLE 10 - struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE]; + struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE]; unsigned long tables; /* maximum number of RETA entries among all devices supported by ixgbe @@ -806,6 +804,8 @@ struct ixgbe_adapter { #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; + + bool need_crosstalk_fix; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -817,6 +817,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) return IXGBE_MAX_RSS_INDICES; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return IXGBE_MAX_RSS_INDICES_X550; default: return 0; @@ -827,7 +828,7 @@ struct ixgbe_fdir_filter { struct hlist_node fdir_node; union ixgbe_atr_input filter; u16 sw_idx; - u16 action; + u64 action; }; enum ixgbe_state_t { @@ -860,13 +861,15 @@ enum ixgbe_boards { board_X540, board_X550, board_X550EM_x, + board_x550em_a, }; -extern struct ixgbe_info ixgbe_82598_info; -extern struct ixgbe_info ixgbe_82599_info; -extern struct ixgbe_info ixgbe_X540_info; -extern struct ixgbe_info ixgbe_X550_info; -extern struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_82598_info; +extern const struct ixgbe_info ixgbe_82599_info; +extern const struct ixgbe_info ixgbe_X540_info; +extern const struct ixgbe_info ixgbe_X550_info; +extern const struct ixgbe_info ixgbe_X550EM_x_info; +extern const struct ixgbe_info ixgbe_x550em_a_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; #endif @@ -893,8 +896,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); void ixgbe_update_stats(struct ixgbe_adapter *adapter); int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); -int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id); +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id); #ifdef CONFIG_PCI_IOV void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index d8a9fb8a59e2..fb51be74dd4c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -792,7 +792,7 @@ mac_reset_top: } gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); - gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6)); IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); /* @@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); if (vlan_on) /* Turn on this VLAN id */ - bits |= (1 << bitindex); + bits |= BIT(bitindex); else /* Turn off this VLAN id */ - bits &= ~(1 << bitindex); + bits &= ~BIT(bitindex); IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); return 0; @@ -1160,7 +1160,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); } -static struct ixgbe_mac_operations mac_ops_82598 = { +static const struct ixgbe_mac_operations mac_ops_82598 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82598, .start_hw = &ixgbe_start_hw_82598, @@ -1192,9 +1192,11 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .clear_vfta = &ixgbe_clear_vfta_82598, .set_vfta = &ixgbe_set_vfta_82598, .fc_enable = &ixgbe_fc_enable_82598, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, + .init_swfw_sync = NULL, .get_thermal_sensor_data = NULL, .init_thermal_sensor_thresh = NULL, .prot_autoc_read = &prot_autoc_read_generic, @@ -1203,7 +1205,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_82598 = { +static const struct ixgbe_eeprom_operations eeprom_ops_82598 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eerd_generic, .write = &ixgbe_write_eeprom_generic, @@ -1214,7 +1216,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = { .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; -static struct ixgbe_phy_operations phy_ops_82598 = { +static const struct ixgbe_phy_operations phy_ops_82598 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82598, @@ -1230,7 +1232,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .check_overtemp = &ixgbe_tn_check_overtemp, }; -struct ixgbe_info ixgbe_82598_info = { +const struct ixgbe_info ixgbe_82598_info = { .mac = ixgbe_mac_82598EB, .get_invariants = &ixgbe_get_invariants_82598, .mac_ops = &mac_ops_82598, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index fa8d4f40ac2a..47afed74a54d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \ common_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ bucket_hash ^= lo_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \ sig_hash ^= lo_hash_dword << (16 - n); \ - if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \ common_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ bucket_hash ^= hi_hash_dword >> n; \ - else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \ sig_hash ^= hi_hash_dword << (16 - n); \ } while (0) @@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \ bucket_hash ^= lo_hash_dword >> n; \ - if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \ bucket_hash ^= hi_hash_dword >> n; \ } while (0) @@ -1633,6 +1633,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); break; default: @@ -2181,7 +2182,7 @@ release_i2c_access: return status; } -static struct ixgbe_mac_operations mac_ops_82599 = { +static const struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, .start_hw = &ixgbe_start_hw_82599, @@ -2220,6 +2221,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = &ixgbe_setup_sfp_modules_82599, @@ -2227,6 +2229,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, + .init_swfw_sync = NULL, .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, .prot_autoc_read = &prot_autoc_read_82599, @@ -2235,7 +2238,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_82599 = { +static const struct ixgbe_eeprom_operations eeprom_ops_82599 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eeprom_82599, .read_buffer = &ixgbe_read_eeprom_buffer_82599, @@ -2246,7 +2249,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = { .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; -static struct ixgbe_phy_operations phy_ops_82599 = { +static const struct ixgbe_phy_operations phy_ops_82599 = { .identify = &ixgbe_identify_phy_82599, .identify_sfp = &ixgbe_identify_module_generic, .init = &ixgbe_init_phy_ops_82599, @@ -2263,7 +2266,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = { .check_overtemp = &ixgbe_tn_check_overtemp, }; -struct ixgbe_info ixgbe_82599_info = { +const struct ixgbe_info ixgbe_82599_info = { .mac = ixgbe_mac_82599EB, .get_invariants = &ixgbe_get_invariants_82599, .mac_ops = &mac_ops_82599, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 64045053e874..902d2061ce73 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -97,6 +97,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550T1: case IXGBE_DEV_ID_X550EM_X_10G_T: supported = true; break; @@ -111,12 +112,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) } /** - * ixgbe_setup_fc - Set up flow control + * ixgbe_setup_fc_generic - Set up flow control * @hw: pointer to hardware structure * * Called at init time to set up flow control. **/ -static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) { s32 ret_val = 0; u32 reg = 0, reg_bp = 0; @@ -296,7 +297,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) IXGBE_WRITE_FLUSH(hw); /* Setup flow control */ - ret_val = ixgbe_setup_fc(hw); + ret_val = hw->mac.ops.setup_fc(hw); if (ret_val) return ret_val; @@ -681,6 +682,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; + u16 ee_ctrl_4; u32 reg; reg = IXGBE_READ_REG(hw, IXGBE_STATUS); @@ -691,6 +693,13 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; + + /* Get MAC instance from EEPROM for configuring CS4227 */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); + bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> + IXGBE_EE_CTRL_4_INST_ID_SHIFT; + } } /** @@ -816,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) */ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); } if (eec & IXGBE_EEC_ADDR_SIZE) @@ -1493,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, * Mask is used to shift "count" bits of "data" out to the EEPROM * one bit at a time. Determine the starting bit based on count */ - mask = 0x01 << (count - 1); + mask = BIT(count - 1); for (i = 0; i < count; i++) { /* @@ -1982,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) */ vector_reg = (vector >> 5) & 0x7F; vector_bit = vector & 0x1F; - hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); + hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit); } /** @@ -2854,6 +2863,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; break; @@ -2911,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) mpsar_hi = 0; } } else if (vmdq < 32) { - mpsar_lo &= ~(1 << vmdq); + mpsar_lo &= ~BIT(vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); } else { - mpsar_hi &= ~(1 << (vmdq - 32)); + mpsar_hi &= ~BIT(vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); } @@ -2943,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) if (vmdq < 32) { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); - mpsar |= 1 << vmdq; + mpsar |= BIT(vmdq); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); } else { mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); - mpsar |= 1 << (vmdq - 32); + mpsar |= BIT(vmdq - 32); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); } return 0; @@ -2968,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) u32 rar = hw->mac.san_mac_rar_index; if (vmdq < 32) { - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq)); IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); } else { IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); - IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32)); } return 0; @@ -3072,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[4-0]: which bit in the register */ regidx = vlan / 32; - vfta_delta = 1 << (vlan % 32); + vfta_delta = BIT(vlan % 32); vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); /* vfta_delta represents the difference between the current value @@ -3103,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); /* set the pool bit */ - bits |= 1 << (vind % 32); + bits |= BIT(vind % 32); if (vlan_on) goto vlvf_update; /* clear the pool bit */ - bits ^= 1 << (vind % 32); + bits ^= BIT(vind % 32); if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { @@ -3300,43 +3310,25 @@ wwn_prefix_err: /** * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * @hw: pointer to hardware structure - * @enable: enable or disable switch for anti-spoofing - * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * @enable: enable or disable switch for MAC anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing * **/ -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { - int j; - int pf_target_reg = pf >> 3; - int pf_target_shift = pf % 8; - u32 pfvfspoof = 0; + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8; + u32 pfvfspoof; if (hw->mac.type == ixgbe_mac_82598EB) return; + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof = IXGBE_SPOOF_MACAS_MASK; - - /* - * PFVFSPOOF register array is size 8 with 8 bits assigned to - * MAC anti-spoof enables in each register array element. - */ - for (j = 0; j < pf_target_reg; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* - * The PF should be allowed to spoof so that it can support - * emulation mode NICs. Do not set the bits assigned to the PF - */ - pfvfspoof &= (1 << pf_target_shift) - 1; - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* - * Remaining pools belong to the PF so they do not need to have - * anti-spoofing enabled. - */ - for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); + pfvfspoof |= BIT(vf_target_shift); + else + pfvfspoof &= ~BIT(vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** @@ -3357,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= (1 << vf_target_shift); + pfvfspoof |= BIT(vf_target_shift); else - pfvfspoof &= ~(1 << vf_target_shift); + pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } @@ -3483,18 +3475,27 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * Communicates with the manageability block. On success return 0 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. **/ -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer, u32 length, u32 timeout, bool return_data) { - u32 hicr, i, bi, fwsts; u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + u32 hicr, i, bi, fwsts; u16 buf_len, dword_len; + union { + struct ixgbe_hic_hdr hdr; + u32 u32arr[1]; + } *bp = buffer; + s32 status; - if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + if (status) + return status; /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); @@ -3502,26 +3503,27 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* Check that the host interface is enabled. */ hicr = IXGBE_READ_REG(hw, IXGBE_HICR); - if ((hicr & IXGBE_HICR_EN) == 0) { + if (!(hicr & IXGBE_HICR_EN)) { hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } /* Calculate length in DWORDs. We must be DWORD aligned */ - if ((length % (sizeof(u32))) != 0) { + if (length % sizeof(u32)) { hw_dbg(hw, "Buffer length failure, not aligned to dword"); - return IXGBE_ERR_INVALID_ARGUMENT; + status = IXGBE_ERR_INVALID_ARGUMENT; + goto rel_out; } dword_len = length >> 2; - /* - * The device driver writes the relevant command block + /* The device driver writes the relevant command block * into the ram area. */ for (i = 0; i < dword_len; i++) IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, - i, cpu_to_le32(buffer[i])); + i, cpu_to_le32(bp->u32arr[i])); /* Setting this bit tells the ARC that a new command is pending. */ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); @@ -3534,44 +3536,49 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, } /* Check command successful completion. */ - if ((timeout != 0 && i == timeout) || - (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { + if ((timeout && i == timeout) || + !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { hw_dbg(hw, "Command has failed with no status valid.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } if (!return_data) - return 0; + goto rel_out; /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; /* first pull in the header so we know the buffer length */ for (bi = 0; bi < dword_len; bi++) { - buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&buffer[bi]); + bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&bp->u32arr[bi]); } /* If there is any thing in data position pull it in */ - buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; - if (buf_len == 0) - return 0; + buf_len = bp->hdr.buf_len; + if (!buf_len) + goto rel_out; - if (length < (buf_len + hdr_size)) { + if (length < round_up(buf_len, 4) + hdr_size) { hw_dbg(hw, "Buffer not large enough for reply message.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } /* Calculate length in DWORDs, add 3 for odd lengths */ dword_len = (buf_len + 3) >> 2; - /* Pull in the rest of the buffer (bi is where we left off)*/ + /* Pull in the rest of the buffer (bi is where we left off) */ for (; bi <= dword_len; bi++) { - buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); - le32_to_cpus(&buffer[bi]); + bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); + le32_to_cpus(&bp->u32arr[bi]); } - return 0; +rel_out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + + return status; } /** @@ -3594,13 +3601,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, int i; s32 ret_val; - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)) - return IXGBE_ERR_SWFW_SYNC; - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; - fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.port_num = hw->bus.func; fw_cmd.ver_maj = maj; fw_cmd.ver_min = min; fw_cmd.ver_build = build; @@ -3612,7 +3616,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, fw_cmd.pad2 = 0; for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { - ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + ret_val = ixgbe_host_interface_command(hw, &fw_cmd, sizeof(fw_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); @@ -3628,7 +3632,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, break; } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); return ret_val; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 2b9563137fd8..6d4c260d0cbd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -81,6 +81,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_fc_generic(struct ixgbe_hw *); bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); void ixgbe_fc_autoneg(struct ixgbe_hw *hw); @@ -105,13 +106,13 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, u8 ver); -s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, - u32 length, u32 timeout, bool return_data); +s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length, + u32 timeout, bool return_data); void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); bool ixgbe_mng_present(struct ixgbe_hw *hw); bool ixgbe_mng_enabled(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 02c7333a9c83..072ef3b5fc61 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { if (tc_config[tc].dcb_pfc != pfc_disabled) - *pfc_en |= 1 << tc; + *pfc_en |= BIT(tc); } } @@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { struct tc_configuration *tc_config = &cfg->tc_config[0]; - u8 prio_mask = 1 << up; + u8 prio_mask = BIT(up); u8 tc = cfg->num_tcs.pg_tcs; /* If tc is 0 then DCB is likely not enabled or supported */ @@ -293,6 +293,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, bwgid, ptype, prio_tc); default: @@ -311,6 +312,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); default: break; @@ -368,6 +370,7 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, @@ -398,6 +401,7 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_dcb_read_rtrup2tc_82599(hw, map); break; default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c index d3ba63f9ad37..b79e93a5b699 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c @@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - if (!(pfc_en & (1 << i))) { + if (!(pfc_en & BIT(i))) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); continue; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c index b5cc989a3d23..1011d644978f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c @@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) int enabled = 0; for (j = 0; j < MAX_USER_PRIORITY; j++) { - if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { + if ((prio_tc[j] == i) && (pfc_en & BIT(j))) { enabled = 1; break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 2707bda37418..b8fc3cfec831 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) }; u8 up = dcb_getapp(adapter->netdev, &app); - if (up && !(up & (1 << adapter->fcoe.up))) + if (up && !(up & BIT(adapter->fcoe.up))) changes |= BIT_APP_UPCHG; #endif @@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & (1 << adapter->fcoe.up)) + if (app_mask & BIT(adapter->fcoe.up)) return 0; adapter->fcoe.up = app->priority; @@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, app->protocol == ETH_P_FCOE) { u8 app_mask = dcb_ieee_getapp_mask(dev, app); - if (app_mask & (1 << adapter->fcoe.up)) + if (app_mask & BIT(adapter->fcoe.up)) return 0; adapter->fcoe.up = app_mask ? diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index b3530e1e3ce1..59b771b9b354 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev, /* Flow Control */ regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); - regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); - regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); - regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); - regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); + for (i = 0; i < 4; i++) + regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i)); for (i = 0; i < 8; i++) { switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -547,6 +545,7 @@ static void ixgbe_get_regs(struct net_device *netdev, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); break; @@ -660,6 +659,7 @@ static void ixgbe_get_regs(struct net_device *netdev, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); for (i = 0; i < 8; i++) @@ -718,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[939] = IXGBE_GET_STAT(adapter, bprc); regs_buff[940] = IXGBE_GET_STAT(adapter, mprc); regs_buff[941] = IXGBE_GET_STAT(adapter, gptc); - regs_buff[942] = IXGBE_GET_STAT(adapter, gorc); - regs_buff[944] = IXGBE_GET_STAT(adapter, gotc); + regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc); + regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32); + regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc); + regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32); for (i = 0; i < 8; i++) regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]); regs_buff[954] = IXGBE_GET_STAT(adapter, ruc); @@ -729,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc); regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc); regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc); - regs_buff[961] = IXGBE_GET_STAT(adapter, tor); + regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor); + regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32); regs_buff[963] = IXGBE_GET_STAT(adapter, tpr); regs_buff[964] = IXGBE_GET_STAT(adapter, tpt); regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64); @@ -801,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); - regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); - regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); - regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); - regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); + for (i = 0; i < 4; i++) + regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i)); regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); - regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); - regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); - regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); - regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); + for (i = 0; i < 4; i++) + regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i)); for (i = 0; i < 8; i++) regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); @@ -1443,6 +1442,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: toggle = 0x7FFFF30F; test = reg_test_82599; break; @@ -1583,7 +1583,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) /* Test each interrupt */ for (; i < 10; i++) { /* Interrupt to test */ - mask = 1 << i; + mask = BIT(i); if (!shared_int) { /* @@ -1681,6 +1681,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); reg_ctl &= ~IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); @@ -1720,6 +1721,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); reg_data |= IXGBE_DMATXCTL_TE; IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); @@ -1780,6 +1782,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); reg_data |= IXGBE_MACC_FLU; IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); @@ -2991,6 +2994,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: case ixgbe_mac_X540: case ixgbe_mac_82599EB: info->so_timestamping = @@ -3007,14 +3011,14 @@ static int ixgbe_get_ts_info(struct net_device *dev, info->phc_index = -1; info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); + BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT); break; default: return ethtool_op_get_ts_info(dev, info); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index e771e764daa3..bcdc88444ceb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -128,6 +128,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (num_tcs > 4) { /* * TCs : TC0/1 TC2/3 TC4-7 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7df3fe29b210..9f3677c7e96f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -53,15 +53,7 @@ #include <net/vxlan.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> - -#ifdef CONFIG_OF -#include <linux/of_net.h> -#endif - -#ifdef CONFIG_SPARC -#include <asm/idprom.h> -#include <asm/prom.h> -#endif +#include <net/tc_act/tc_mirred.h> #include "ixgbe.h" #include "ixgbe_common.h" @@ -79,10 +71,10 @@ char ixgbe_default_device_descr[] = static char ixgbe_default_device_descr[] = "Intel(R) 10 Gigabit Network Connection"; #endif -#define DRV_VERSION "4.2.1-k" +#define DRV_VERSION "4.4.0-k" const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = - "Copyright (c) 1999-2015 Intel Corporation."; + "Copyright (c) 1999-2016 Intel Corporation."; static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; @@ -92,6 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_X540] = &ixgbe_X540_info, [board_X550] = &ixgbe_X550_info, [board_X550EM_x] = &ixgbe_X550EM_x_info, + [board_x550em_a] = &ixgbe_x550em_a_info, }; /* ixgbe_pci_tbl - PCI Device ID Table @@ -134,10 +127,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a }, /* required last entry */ {0, } }; @@ -372,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) if (ixgbe_removed(reg_addr)) return IXGBE_FAILED_READ_REG; + if (unlikely(hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { + struct ixgbe_adapter *adapter; + int i; + + for (i = 0; i < 200; ++i) { + value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); + if (likely(!value)) + goto writes_completed; + if (value == IXGBE_FAILED_READ_REG) { + ixgbe_remove_adapter(hw); + return IXGBE_FAILED_READ_REG; + } + udelay(5); + } + + adapter = hw->back; + e_warn(hw, "register writes incomplete %08x\n", value); + } + +writes_completed: value = readl(reg_addr + reg); if (unlikely(value == IXGBE_FAILED_READ_REG)) ixgbe_check_remove(hw, reg); @@ -588,7 +609,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter) pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, netdev->state, - netdev->trans_start, + dev_trans_start(netdev), netdev->last_rx); } @@ -869,6 +890,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (direction == -1) { /* other causes */ msix_vector |= IXGBE_IVAR_ALLOC_VAL; @@ -907,6 +929,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); mask = (qmask >> 32); @@ -1087,9 +1110,40 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) } /** + * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate + **/ +static int ixgbe_tx_maxrate(struct net_device *netdev, + int queue_index, u32 maxrate) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = ixgbe_link_mbps(adapter); + + if (!maxrate) + return 0; + + /* Calculate the rate factor values to set */ + bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= maxrate; + + /* clear everything but the rate factor */ + bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | + IXGBE_RTTBCNRC_RF_DEC_MASK; + + /* enable the rate scheduler */ + bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + + return 0; +} + +/** * ixgbe_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: structure containing interrupt and ring information * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, struct ixgbe_ring *tx_ring, int napi_budget) @@ -2192,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) /* Populate MSIX to EITR Select */ if (adapter->num_vfs > 32) { - u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + u32 eitrsel = BIT(adapter->num_vfs - 32) - 1; IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); } @@ -2222,6 +2276,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: ixgbe_set_ivar(adapter, -1, 1, v_idx); break; default: @@ -2333,6 +2388,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* * set the WDIS bit to not clear the timer bits and cause an * immediate assertion of the interrupt @@ -2494,6 +2550,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) return false; case ixgbe_mac_82599EB: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: switch (hw->mac.ops.get_media_type(hw)) { case ixgbe_media_type_fiber: case ixgbe_media_type_fiber_qsfp: @@ -2568,6 +2625,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); @@ -2596,6 +2654,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask = (qmask & 0xFFFFFFFF); if (mask) IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); @@ -2631,6 +2690,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: mask |= IXGBE_EIMS_TS; break; default: @@ -2646,7 +2706,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: - if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP) + case ixgbe_mac_x550em_a: + if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || + adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || + adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw); if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) mask |= IXGBE_EICR_GPI_SDP0_X540; @@ -2704,6 +2767,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (hw->phy.type == ixgbe_phy_x550em_ext_t && (eicr & IXGBE_EICR_GPI_SDP0_X540)) { adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; @@ -2786,8 +2850,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ixgbe_update_dca(q_vector); #endif - ixgbe_for_each_ring(ring, q_vector->tx) - clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget); + ixgbe_for_each_ring(ring, q_vector->tx) { + if (!ixgbe_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } /* Exit if we are called by netpoll or busy polling is active */ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) @@ -2805,7 +2871,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget) per_ring_budget); work_done += cleaned; - clean_complete &= (cleaned < per_ring_budget); + if (cleaned >= per_ring_budget) + clean_complete = false; } ixgbe_qv_unlock_napi(q_vector); @@ -2818,7 +2885,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) if (adapter->rx_itr_setting & 1) ixgbe_set_itr(q_vector); if (!test_bit(__IXGBE_DOWN, &adapter->state)) - ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); + ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx)); return 0; } @@ -2937,6 +3004,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -3033,6 +3101,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); @@ -3109,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, * currently 40. */ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) - txdctl |= (1 << 16); /* WTHRESH = 1 */ + txdctl |= 1u << 16; /* WTHRESH = 1 */ else - txdctl |= (8 << 16); /* WTHRESH = 8 */ + txdctl |= 8u << 16; /* WTHRESH = 8 */ /* * Setting PTHRESH to 32 both improves performance * and avoids a TX hang with DFP enabled */ - txdctl |= (1 << 8) | /* HTHRESH = 1 */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ @@ -3669,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) return; if (rss_i > 3) - psrtype |= 2 << 29; + psrtype |= 2u << 29; else if (rss_i > 1) - psrtype |= 1 << 29; + psrtype |= 1u << 29; for_each_set_bit(pool, &adapter->fwd_bitmask, 32) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); @@ -3698,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift)); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); if (adapter->bridge_mode == BRIDGE_MODE_VEB) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); @@ -3729,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - - /* Enable MAC Anti-Spoofing */ - hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), - adapter->num_vfs); - - /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be - * calling set_ethertype_anti_spoofing for each VF in loop below - */ - if (hw->mac.ops.set_ethertype_anti_spoofing) { - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), - (IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_TX_ANTISPOOF | - IXGBE_ETH_P_LLDP)); - - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), - (IXGBE_ETQF_FILTER_EN | - IXGBE_ETQF_TX_ANTISPOOF | - ETH_P_PAUSE)); - } - - /* For VFs that have spoof checking turned off */ for (i = 0; i < adapter->num_vfs; i++) { - if (!adapter->vfinfo[i].spoofchk_enabled) - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); - - /* enable ethertype anti spoofing if hw supports it */ - if (hw->mac.ops.set_ethertype_anti_spoofing) - hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + /* configure spoof checking */ + ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, + adapter->vfinfo[i].spoofchk_enabled); /* Enable/Disable RSS query feature */ ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, @@ -3832,6 +3877,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: if (adapter->num_vfs) rdrxctl |= IXGBE_RDRXCTL_PSP; /* fall through for older HW */ @@ -3908,7 +3954,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* add VID to filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true); + if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid); + set_bit(vid, adapter->active_vlans); return 0; @@ -3947,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) * entry other than the PF. */ word = idx * 2 + (VMDQ_P(0) / 32); - bits = ~(1 << (VMDQ_P(0)) % 32); + bits = ~BIT(VMDQ_P(0) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* Disable the filter so this falls into the default pool. */ @@ -3965,9 +4013,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, struct ixgbe_hw *hw = &adapter->hw; /* remove VID from filter table */ - if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) - ixgbe_update_pf_promisc_vlvf(adapter, vid); - else + if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true); clear_bit(vid, adapter->active_vlans); @@ -3995,6 +4041,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4031,6 +4078,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *ring = adapter->rx_ring[i]; @@ -4057,6 +4105,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) break; @@ -4081,7 +4130,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); - vlvfb |= 1 << (VMDQ_P(0) % 32); + vlvfb |= BIT(VMDQ_P(0) % 32); IXGBE_WRITE_REG(hw, reg_offset, vlvfb); } @@ -4111,7 +4160,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) if (vlvf) { /* record VLAN ID in VFTA */ - vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + vfta[(vid - vid_start) / 32] |= BIT(vid % 32); /* if PF is part of this then continue */ if (test_bit(vid, adapter->active_vlans)) @@ -4120,7 +4169,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) /* remove PF from the pool */ word = i * 2 + VMDQ_P(0) / 32; - bits = ~(1 << (VMDQ_P(0) % 32)); + bits = ~BIT(VMDQ_P(0) % 32); bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); } @@ -4147,6 +4196,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) break; @@ -4172,11 +4222,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) { - u16 vid; + u16 vid = 1; ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } @@ -4426,6 +4476,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; + netdev_features_t features = netdev->features; int count; /* Check for Promiscuous and All Multicast modes */ @@ -4443,14 +4494,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev) hw->addr_ctrl.user_set_promisc = true; fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= IXGBE_VMOLR_MPE; - ixgbe_vlan_promisc_enable(adapter); + features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; } else { if (netdev->flags & IFF_ALLMULTI) { fctrl |= IXGBE_FCTRL_MPE; vmolr |= IXGBE_VMOLR_MPE; } hw->addr_ctrl.user_set_promisc = false; - ixgbe_vlan_promisc_disable(adapter); } /* @@ -4483,7 +4533,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev) } /* This is useful for sniffing bad packets. */ - if (adapter->netdev->features & NETIF_F_RXALL) { + if (features & NETIF_F_RXALL) { /* UPE and MPE will be handled by normal PROMISC logic * in e1000e_set_rx_mode */ fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ @@ -4496,10 +4546,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + if (features & NETIF_F_HW_VLAN_CTAG_RX) ixgbe_vlan_strip_enable(adapter); else ixgbe_vlan_strip_disable(adapter); + + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) + ixgbe_vlan_promisc_disable(adapter); + else + ixgbe_vlan_promisc_enable(adapter); } static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) @@ -4530,6 +4585,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter) switch (adapter->hw.mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0); adapter->vxlan_port = 0; break; @@ -4630,6 +4686,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: dv_id = IXGBE_DV_X540(link, tc); break; default: @@ -4690,6 +4747,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: dv_id = IXGBE_LOW_DV_X540(tc); break; default: @@ -4805,9 +4863,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) return; if (rss_i > 3) - psrtype |= 2 << 29; + psrtype |= 2u << 29; else if (rss_i > 1) - psrtype |= 1 << 29; + psrtype |= 1u << 29; IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); } @@ -4871,7 +4929,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, /* shutdown specific queue receive and wait for dma to settle */ ixgbe_disable_rx_queue(adapter, rx_ring); usleep_range(10000, 20000); - ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); + ixgbe_irq_disable_queues(adapter, BIT_ULL(index)); ixgbe_clean_rx_ring(rx_ring); rx_ring->l2_accel_priv = NULL; } @@ -5106,6 +5164,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: default: IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); @@ -5156,6 +5215,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X; break; case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: gpie |= IXGBE_SDP0_GPIEN_X540; break; default: @@ -5228,7 +5288,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) { WARN_ON(in_interrupt()); /* put off any impending NetWatchDogTimeout */ - adapter->netdev->trans_start = jiffies; + netif_trans_update(adapter->netdev); while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); @@ -5467,6 +5527,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & ~IXGBE_DMATXCTL_TE)); @@ -5498,6 +5559,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev) ixgbe_tx_timeout_reset(adapter); } +#ifdef CONFIG_IXGBE_DCB +static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct tc_configuration *tc; + int j; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + case ixgbe_mac_82599EB: + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + break; + case ixgbe_mac_X540: + case ixgbe_mac_X550: + adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; + break; + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + default: + adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS; + break; + } + + /* Configure DCB traffic classes */ + for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { + tc = &adapter->dcb_cfg.tc_config[j]; + tc->path[DCB_TX_CONFIG].bwg_id = 0; + tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); + tc->path[DCB_RX_CONFIG].bwg_id = 0; + tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); + tc->dcb_pfc = pfc_disabled; + } + + /* Initialize default user to priority mapping, UPx->TC0 */ + tc = &adapter->dcb_cfg.tc_config[0]; + tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; + tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; + + adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; + adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; + adapter->dcb_cfg.pfc_mode_enable = false; + adapter->dcb_set_bitmap = 0x00; + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + sizeof(adapter->temp_dcb_cfg)); +} +#endif + /** * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) * @adapter: board private structure to initialize @@ -5512,10 +5625,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) struct pci_dev *pdev = adapter->pdev; unsigned int rss, fdir; u32 fwsm; -#ifdef CONFIG_IXGBE_DCB - int j; - struct tc_configuration *tc; -#endif + u16 device_caps; + int i; /* PCI config space info */ @@ -5537,6 +5648,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCA adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; #endif +#ifdef CONFIG_IXGBE_DCB + adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; +#endif #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; @@ -5547,7 +5662,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #endif /* IXGBE_FCOE */ /* initialize static ixgbe jump table entries */ - adapter->jump_tables[0] = ixgbe_ipv4_fields; + adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]), + GFP_KERNEL); + if (!adapter->jump_tables[0]) + return -ENOMEM; + adapter->jump_tables[0]->mat = ixgbe_ipv4_fields; + + for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) + adapter->jump_tables[i] = NULL; adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, @@ -5585,6 +5707,17 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: +#ifdef CONFIG_IXGBE_DCB + adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; +#endif +#ifdef IXGBE_FCOE + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; +#ifdef CONFIG_IXGBE_DCB + adapter->fcoe.up = 0; +#endif /* IXGBE_DCB */ +#endif /* IXGBE_FCOE */ + /* Fall Through */ case ixgbe_mac_X550: #ifdef CONFIG_IXGBE_DCA adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; @@ -5606,42 +5739,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) spin_lock_init(&adapter->fdir_perfect_lock); #ifdef CONFIG_IXGBE_DCB - switch (hw->mac.type) { - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; - break; - default: - adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; - adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; - break; - } - - /* Configure DCB traffic classes */ - for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { - tc = &adapter->dcb_cfg.tc_config[j]; - tc->path[DCB_TX_CONFIG].bwg_id = 0; - tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); - tc->path[DCB_RX_CONFIG].bwg_id = 0; - tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); - tc->dcb_pfc = pfc_disabled; - } - - /* Initialize default user to priority mapping, UPx->TC0 */ - tc = &adapter->dcb_cfg.tc_config[0]; - tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; - tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - - adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; - adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; - adapter->dcb_cfg.pfc_mode_enable = false; - adapter->dcb_set_bitmap = 0x00; - adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; - memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, - sizeof(adapter->temp_dcb_cfg)); - + ixgbe_init_dcb(adapter); #endif /* default flow control settings */ @@ -5675,6 +5773,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->tx_ring_count = IXGBE_DEFAULT_TXD; adapter->rx_ring_count = IXGBE_DEFAULT_RXD; + /* Cache bit indicating need for crosstalk fix */ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + hw->mac.ops.get_device_caps(hw, &device_caps); + if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) + adapter->need_crosstalk_fix = false; + else + adapter->need_crosstalk_fix = true; + break; + default: + adapter->need_crosstalk_fix = false; + break; + } + /* set default work limits */ adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; @@ -6217,6 +6331,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: pci_wake_from_d3(pdev, !!wufc); break; default: @@ -6352,6 +6467,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: hwstats->pxonrxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); break; @@ -6367,7 +6483,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) if ((hw->mac.type == ixgbe_mac_82599EB) || (hw->mac.type == ixgbe_mac_X540) || (hw->mac.type == ixgbe_mac_X550) || - (hw->mac.type == ixgbe_mac_X550EM_x)) { + (hw->mac.type == ixgbe_mac_X550EM_x) || + (hw->mac.type == ixgbe_mac_x550em_a)) { hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); @@ -6392,6 +6509,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* OS2BMC stats are X540 and later */ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); @@ -6562,7 +6680,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= ((u64)1 << i); + eics |= BIT_ULL(i); } } @@ -6593,6 +6711,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) link_up = true; } + /* If Crosstalk fix enabled do the sanity check of making sure + * the SFP+ cage is empty. + */ + if (adapter->need_crosstalk_fix) { + u32 sfp_cage_full; + + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) + link_up = false; + } + if (adapter->ixgbe_ieee_pfc) pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); @@ -6662,6 +6792,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: case ixgbe_mac_82599EB: { u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); @@ -6938,6 +7069,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; s32 err; + /* If crosstalk fix enabled verify the SFP+ cage is full */ + if (adapter->need_crosstalk_fix) { + u32 sfp_cage_full; + + sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & + IXGBE_ESDP_SDP2; + if (!sfp_cage_full) + return; + } + /* not searching for SFP so there is nothing to do here */ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) @@ -7122,10 +7263,12 @@ static void ixgbe_service_task(struct work_struct *work) return; } #ifdef CONFIG_IXGBE_VXLAN + rtnl_lock(); if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) { adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED; vxlan_get_rx_port(adapter->netdev); } + rtnl_unlock(); #endif /* CONFIG_IXGBE_VXLAN */ ixgbe_reset_subtask(adapter); ixgbe_phy_interrupt_subtask(adapter); @@ -7148,9 +7291,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -7163,46 +7315,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 0 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, @@ -7211,103 +7369,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, return 1; } +static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & IXGBE_TX_FLAGS_CC)) +csum_failed: + if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | + IXGBE_TX_FLAGS_CC))) return; - vlan_macip_lens = skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } else { - u8 l4_hdr = 0; - union { - struct iphdr *ipv4; - struct ipv6hdr *ipv6; - u8 *raw; - } network_hdr; - union { - struct tcphdr *tcphdr; - u8 *raw; - } transport_hdr; - __be16 frag_off; - - if (skb->encapsulation) { - network_hdr.raw = skb_inner_network_header(skb); - transport_hdr.raw = skb_inner_transport_header(skb); - vlan_macip_lens = skb_inner_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } else { - network_hdr.raw = skb_network_header(skb); - transport_hdr.raw = skb_transport_header(skb); - vlan_macip_lens = skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT; - } - - /* use first 4 bits to determine IP version */ - switch (network_hdr.ipv4->version) { - case IPVERSION: - vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = network_hdr.ipv4->protocol; - break; - case 6: - vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; - l4_hdr = network_hdr.ipv6->nexthdr; - if (likely((transport_hdr.raw - network_hdr.raw) == - sizeof(struct ipv6hdr))) - break; - ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - l4_hdr = NEXTHDR_FRAGMENT; - break; - default: - break; - } + goto no_csum; + } - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + ixgbe_ipv6_csum_is_sctp(skb))) { + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum, version=%d, l4 proto=%x\n", - network_hdr.ipv4->version, l4_hdr); - } - skb_checksum_help(skb); - goto no_csum; } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + /* fall through */ + default: + skb_checksum_help(skb); + goto csum_failed; } + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, - type_tucmd, mss_l4len_idx); + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); } #define IXGBE_SET_FLAG(_input, _flag, _result) \ @@ -8238,6 +8354,134 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, return 0; } +#ifdef CONFIG_NET_CLS_ACT +static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex, + u8 *queue, u64 *action) +{ + unsigned int num_vfs = adapter->num_vfs, vf; + struct net_device *upper; + struct list_head *iter; + + /* redirect to a SRIOV VF */ + for (vf = 0; vf < num_vfs; ++vf) { + upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev); + if (upper->ifindex == ifindex) { + if (adapter->num_rx_pools > 1) + *queue = vf * 2; + else + *queue = vf * adapter->num_rx_queues_per_pool; + + *action = vf + 1; + *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; + return 0; + } + } + + /* redirect to a offloaded macvlan netdev */ + netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { + if (netif_is_macvlan(upper)) { + struct macvlan_dev *dfwd = netdev_priv(upper); + struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; + + if (vadapter && vadapter->netdev->ifindex == ifindex) { + *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx; + *action = *queue; + return 0; + } + } + } + + return -EINVAL; +} + +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + const struct tc_action *a; + int err; + + if (tc_no_actions(exts)) + return -EINVAL; + + tc_for_each_action(a, exts) { + + /* Drop action */ + if (is_tcf_gact_shot(a)) { + *action = IXGBE_FDIR_DROP_QUEUE; + *queue = IXGBE_FDIR_DROP_QUEUE; + return 0; + } + + /* Redirect to a VF or a offloaded macvlan */ + if (is_tcf_mirred_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + + err = handle_redirect_action(adapter, ifindex, queue, + action); + if (err == 0) + return err; + } + } + + return -EINVAL; +} +#else +static int parse_tc_actions(struct ixgbe_adapter *adapter, + struct tcf_exts *exts, u64 *action, u8 *queue) +{ + return -EINVAL; +} +#endif /* CONFIG_NET_CLS_ACT */ + +static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + struct tc_cls_u32_offload *cls, + struct ixgbe_mat_field *field_ptr, + struct ixgbe_nexthdr *nexthdr) +{ + int i, j, off; + __be32 val, m; + bool found_entry = false, found_jump_field = false; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + off = cls->knode.sel->keys[i].off; + val = cls->knode.sel->keys[i].val; + m = cls->knode.sel->keys[i].mask; + + for (j = 0; field_ptr[j].val; j++) { + if (field_ptr[j].off == off) { + field_ptr[j].val(input, mask, val, m); + input->filter.formatted.flow_type |= + field_ptr[j].type; + found_entry = true; + break; + } + } + if (nexthdr) { + if (nexthdr->off == cls->knode.sel->keys[i].off && + nexthdr->val == cls->knode.sel->keys[i].val && + nexthdr->mask == cls->knode.sel->keys[i].mask) + found_jump_field = true; + else + continue; + } + } + + if (nexthdr && !found_jump_field) + return -EINVAL; + + if (!found_entry) + return 0; + + mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + + return 0; +} + static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, __be16 protocol, struct tc_cls_u32_offload *cls) @@ -8245,16 +8489,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, u32 loc = cls->knode.handle & 0xfffff; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_mat_field *field_ptr; - struct ixgbe_fdir_filter *input; - union ixgbe_atr_input mask; -#ifdef CONFIG_NET_CLS_ACT - const struct tc_action *a; -#endif - int i, err = 0; + struct ixgbe_fdir_filter *input = NULL; + union ixgbe_atr_input *mask = NULL; + struct ixgbe_jump_table *jump = NULL; + int i, err = -EINVAL; u8 queue; u32 uhtid, link_uhtid; - memset(&mask, 0, sizeof(union ixgbe_atr_input)); uhtid = TC_U32_USERHTID(cls->knode.handle); link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); @@ -8266,38 +8507,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, * headers when needed. */ if (protocol != htons(ETH_P_IP)) - return -EINVAL; - - if (link_uhtid) { - struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; - - if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - if (!test_bit(link_uhtid - 1, &adapter->tables)) - return -EINVAL; - - for (i = 0; nexthdr[i].jump; i++) { - if (nexthdr->o != cls->knode.sel->offoff || - nexthdr->s != cls->knode.sel->offshift || - nexthdr->m != cls->knode.sel->offmask || - /* do not support multiple key jumps its just mad */ - cls->knode.sel->nkeys > 1) - return -EINVAL; - - if (nexthdr->off != cls->knode.sel->keys[0].off || - nexthdr->val != cls->knode.sel->keys[0].val || - nexthdr->mask != cls->knode.sel->keys[0].mask) - return -EINVAL; - - adapter->jump_tables[link_uhtid] = nexthdr->jump; - } - return 0; - } + return err; if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { e_err(drv, "Location out of range\n"); - return -EINVAL; + return err; } /* cls u32 is a graph starting at root node 0x800. The driver tracks @@ -8308,87 +8522,123 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, * this function _should_ be generic try not to hardcode values here. */ if (uhtid == 0x800) { - field_ptr = adapter->jump_tables[0]; + field_ptr = (adapter->jump_tables[0])->mat; } else { if (uhtid >= IXGBE_MAX_LINK_HANDLE) - return -EINVAL; - - field_ptr = adapter->jump_tables[uhtid]; + return err; + if (!adapter->jump_tables[uhtid]) + return err; + field_ptr = (adapter->jump_tables[uhtid])->mat; } if (!field_ptr) - return -EINVAL; + return err; - input = kzalloc(sizeof(*input), GFP_KERNEL); - if (!input) - return -ENOMEM; + /* At this point we know the field_ptr is valid and need to either + * build cls_u32 link or attach filter. Because adding a link to + * a handle that does not exist is invalid and the same for adding + * rules to handles that don't exist. + */ - for (i = 0; i < cls->knode.sel->nkeys; i++) { - int off = cls->knode.sel->keys[i].off; - __be32 val = cls->knode.sel->keys[i].val; - __be32 m = cls->knode.sel->keys[i].mask; - bool found_entry = false; - int j; + if (link_uhtid) { + struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; - for (j = 0; field_ptr[j].val; j++) { - if (field_ptr[j].off == off) { - field_ptr[j].val(input, &mask, val, m); - input->filter.formatted.flow_type |= - field_ptr[j].type; - found_entry = true; + if (link_uhtid >= IXGBE_MAX_LINK_HANDLE) + return err; + + if (!test_bit(link_uhtid - 1, &adapter->tables)) + return err; + + for (i = 0; nexthdr[i].jump; i++) { + if (nexthdr[i].o != cls->knode.sel->offoff || + nexthdr[i].s != cls->knode.sel->offshift || + nexthdr[i].m != cls->knode.sel->offmask) + return err; + + jump = kzalloc(sizeof(*jump), GFP_KERNEL); + if (!jump) + return -ENOMEM; + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) { + err = -ENOMEM; + goto free_jump; + } + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } + jump->input = input; + jump->mask = mask; + err = ixgbe_clsu32_build_input(input, mask, cls, + field_ptr, &nexthdr[i]); + if (!err) { + jump->mat = nexthdr[i].jump; + adapter->jump_tables[link_uhtid] = jump; break; } } - - if (!found_entry) - goto err_out; + return 0; } - mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | - IXGBE_ATR_L4TYPE_MASK; - - if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) - mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + mask = kzalloc(sizeof(*mask), GFP_KERNEL); + if (!mask) { + err = -ENOMEM; + goto free_input; + } -#ifdef CONFIG_NET_CLS_ACT - if (list_empty(&cls->knode.exts->actions)) + if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) { + if ((adapter->jump_tables[uhtid])->input) + memcpy(input, (adapter->jump_tables[uhtid])->input, + sizeof(*input)); + if ((adapter->jump_tables[uhtid])->mask) + memcpy(mask, (adapter->jump_tables[uhtid])->mask, + sizeof(*mask)); + } + err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); + if (err) goto err_out; - list_for_each_entry(a, &cls->knode.exts->actions, list) { - if (!is_tcf_gact_shot(a)) - goto err_out; - } -#endif + err = parse_tc_actions(adapter, cls->knode.exts, &input->action, + &queue); + if (err < 0) + goto err_out; - input->action = IXGBE_FDIR_DROP_QUEUE; - queue = IXGBE_FDIR_DROP_QUEUE; input->sw_idx = loc; spin_lock(&adapter->fdir_perfect_lock); if (hlist_empty(&adapter->fdir_filter_list)) { - memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); - err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + memcpy(&adapter->fdir_mask, mask, sizeof(*mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, mask); if (err) goto err_out_w_lock; - } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) { err = -EINVAL; goto err_out_w_lock; } - ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask); err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, input->sw_idx, queue); if (!err) ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); spin_unlock(&adapter->fdir_perfect_lock); + kfree(mask); return err; err_out_w_lock: spin_unlock(&adapter->fdir_perfect_lock); err_out: + kfree(mask); +free_input: kfree(input); - return -EINVAL; +free_jump: + kfree(jump); + return err; } static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, @@ -8515,11 +8765,6 @@ static int ixgbe_set_features(struct net_device *netdev, adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } - if (features & NETIF_F_HW_VLAN_CTAG_RX) - ixgbe_vlan_strip_enable(adapter); - else - ixgbe_vlan_strip_disable(adapter); - if (changed & NETIF_F_RXALL) need_reset = true; @@ -8536,6 +8781,9 @@ static int ixgbe_set_features(struct net_device *netdev, if (need_reset) ixgbe_do_reset(netdev); + else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER)) + ixgbe_set_rx_mode(netdev); return 0; } @@ -8833,17 +9081,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) kfree(fwd_adapter); } -#define IXGBE_MAX_TUNNEL_HDR_LEN 80 +#define IXGBE_MAX_MAC_HDR_LEN 127 +#define IXGBE_MAX_NETWORK_HDR_LEN 511 + static netdev_features_t ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { - if (!skb->encapsulation) - return features; - - if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > - IXGBE_MAX_TUNNEL_HDR_LEN)) - return features & ~NETIF_F_CSUM_MASK; + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; return features; } @@ -8858,6 +9125,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_mac_address = ixgbe_set_mac, .ndo_change_mtu = ixgbe_change_mtu, .ndo_tx_timeout = ixgbe_tx_timeout, + .ndo_set_tx_maxrate = ixgbe_tx_maxrate, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, .ndo_do_ioctl = ixgbe_ioctl, @@ -8943,7 +9211,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) /** * ixgbe_wol_supported - Check whether device supports WoL - * @hw: hw specific details + * @adapter: the adapter private structure * @device_id: the device ID * @subdev_id: the subsystem device ID * @@ -8951,19 +9219,33 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) * which devices have WoL support * **/ -int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, - u16 subdevice_id) +bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + u16 subdevice_id) { struct ixgbe_hw *hw = &adapter->hw; u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; - int is_wol_supported = 0; + /* WOL not supported on 82598 */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + /* check eeprom to see if WOL is enabled for X540 and newer */ + if (hw->mac.type >= ixgbe_mac_X540) { + if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || + ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && + (hw->bus.func == 0))) + return true; + } + + /* WOL is determined based on device IDs for 82599 MACs */ switch (device_id) { case IXGBE_DEV_ID_82599_SFP: /* Only these subdevices could supports WOL */ switch (subdevice_id) { - case IXGBE_SUBDEV_ID_82599_SFP_WOL0: case IXGBE_SUBDEV_ID_82599_560FLR: + case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: + case IXGBE_SUBDEV_ID_82599_SFP_WOL0: + case IXGBE_SUBDEV_ID_82599_SFP_2OCP: /* only support first port */ if (hw->bus.func != 0) break; @@ -8971,66 +9253,31 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, case IXGBE_SUBDEV_ID_82599_SFP: case IXGBE_SUBDEV_ID_82599_RNDC: case IXGBE_SUBDEV_ID_82599_ECNA_DP: - case IXGBE_SUBDEV_ID_82599_LOM_SFP: - is_wol_supported = 1; - break; + case IXGBE_SUBDEV_ID_82599_SFP_1OCP: + case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: + case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: + return true; } break; case IXGBE_DEV_ID_82599EN_SFP: - /* Only this subdevice supports WOL */ + /* Only these subdevices support WOL */ switch (subdevice_id) { case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: - is_wol_supported = 1; - break; + return true; } break; case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: /* All except this subdevice support WOL */ if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) - is_wol_supported = 1; + return true; break; case IXGBE_DEV_ID_82599_KX4: - is_wol_supported = 1; - break; - case IXGBE_DEV_ID_X540T: - case IXGBE_DEV_ID_X540T1: - case IXGBE_DEV_ID_X550T: - case IXGBE_DEV_ID_X550EM_X_KX4: - case IXGBE_DEV_ID_X550EM_X_KR: - case IXGBE_DEV_ID_X550EM_X_10G_T: - /* check eeprom to see if enabled wol */ - if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || - ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && - (hw->bus.func == 0))) { - is_wol_supported = 1; - } + return true; + default: break; } - return is_wol_supported; -} - -/** - * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM - * @adapter: Pointer to adapter struct - */ -static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter) -{ -#ifdef CONFIG_OF - struct device_node *dp = pci_device_to_OF_node(adapter->pdev); - struct ixgbe_hw *hw = &adapter->hw; - const unsigned char *addr; - - addr = of_get_mac_address(dp); - if (addr) { - ether_addr_copy(hw->mac.perm_addr, addr); - return; - } -#endif /* CONFIG_OF */ - -#ifdef CONFIG_SPARC - ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr); -#endif /* CONFIG_SPARC */ + return false; } /** @@ -9136,23 +9383,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); /* Setup hw api */ - memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); + hw->mac.ops = *ii->mac_ops; hw->mac.type = ii->mac; hw->mvals = ii->mvals; /* EEPROM */ - memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); + hw->eeprom.ops = *ii->eeprom_ops; eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (ixgbe_removed(hw->hw_addr)) { err = -EIO; goto err_ioremap; } /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ - if (!(eec & (1 << 8))) + if (!(eec & BIT(8))) hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; /* PHY */ - memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); + hw->phy.ops = *ii->phy_ops; hw->phy.sfp_type = ixgbe_sfp_type_unknown; /* ixgbe_identify_phy_generic will set prtad and mmds properly */ hw->phy.mdio.prtad = MDIO_PRTAD_NONE; @@ -9169,12 +9416,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; + /* Make sure the SWFW semaphore is in a valid state */ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); + /* Make it possible the adapter to be woken up via WOL */ switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); break; default: @@ -9215,54 +9467,62 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto skip_sriov; /* Mailbox */ ixgbe_init_mbx_params_pf(hw); - memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + hw->mbx.ops = ii->mbx_ops; pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); ixgbe_enable_sriov(adapter); skip_sriov: #endif netdev->features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | - NETIF_F_RXCSUM; + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM; - netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; +#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: + netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; + netdev->features |= NETIF_F_GSO_PARTIAL | + IXGBE_GSO_PARTIAL_FEATURES; + + if (hw->mac.type >= ixgbe_mac_82599EB) netdev->features |= NETIF_F_SCTP_CRC; - netdev->hw_features |= NETIF_F_SCTP_CRC | - NETIF_F_NTUPLE | + + /* copy netdev features into list of user selectable features */ + netdev->hw_features |= netdev->features | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXALL | + NETIF_F_HW_L2FW_DOFFLOAD; + + if (hw->mac.type >= ixgbe_mac_82599EB) + netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; - break; - default: - break; - } - netdev->hw_features |= NETIF_F_RXALL; - netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->hw_enc_features |= netdev->vlan_features; + netdev->mpls_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; #ifdef CONFIG_IXGBE_DCB - netdev->dcbnl_ops = &dcbnl_ops; + if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) + netdev->dcbnl_ops = &dcbnl_ops; #endif #ifdef IXGBE_FCOE @@ -9287,10 +9547,6 @@ skip_sriov: NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ - if (pci_using_dac) { - netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_HIGHDMA; - } if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) netdev->hw_features |= NETIF_F_LRO; @@ -9304,7 +9560,8 @@ skip_sriov: goto err_sw_init; } - ixgbe_get_platform_mac_addr(adapter); + eth_platform_get_mac_address(&adapter->pdev->dev, + adapter->hw.mac.perm_addr); memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); @@ -9455,6 +9712,7 @@ err_sw_init: ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; iounmap(adapter->io_addr); + kfree(adapter->jump_tables[0]); kfree(adapter->mac_table); err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); @@ -9483,6 +9741,7 @@ static void ixgbe_remove(struct pci_dev *pdev) struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev; bool disable_dev; + int i; /* if !adapter then we already cleaned up in probe */ if (!adapter) @@ -9532,6 +9791,14 @@ static void ixgbe_remove(struct pci_dev *pdev) e_dev_info("complete\n"); + for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) { + if (adapter->jump_tables[i]) { + kfree(adapter->jump_tables[i]->input); + kfree(adapter->jump_tables[i]->mask); + } + kfree(adapter->jump_tables[i]); + } + kfree(adapter->mac_table); disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); @@ -9612,6 +9879,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, case ixgbe_mac_X550EM_x: device_id = IXGBE_DEV_ID_X550EM_X_VF; break; + case ixgbe_mac_x550em_a: + device_id = IXGBE_DEV_ID_X550EM_A_VF; + break; default: device_id = 0; break; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c index 9993a471d668..a0cb84381cd0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -48,10 +48,10 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) if (size > mbx->size) size = mbx->size; - if (!mbx->ops.read) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.read(hw, msg, size, mbx_id); + return mbx->ops->read(hw, msg, size, mbx_id); } /** @@ -70,10 +70,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) if (size > mbx->size) return IXGBE_ERR_MBX; - if (!mbx->ops.write) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.write(hw, msg, size, mbx_id); + return mbx->ops->write(hw, msg, size, mbx_id); } /** @@ -87,10 +87,10 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_msg) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_msg(hw, mbx_id); + return mbx->ops->check_for_msg(hw, mbx_id); } /** @@ -104,10 +104,10 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_ack) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_ack(hw, mbx_id); + return mbx->ops->check_for_ack(hw, mbx_id); } /** @@ -121,10 +121,10 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) { struct ixgbe_mbx_info *mbx = &hw->mbx; - if (!mbx->ops.check_for_rst) + if (!mbx->ops) return IXGBE_ERR_MBX; - return mbx->ops.check_for_rst(hw, mbx_id); + return mbx->ops->check_for_rst(hw, mbx_id); } /** @@ -139,10 +139,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops.check_for_msg) + if (!countdown || !mbx->ops) return IXGBE_ERR_MBX; - while (mbx->ops.check_for_msg(hw, mbx_id)) { + while (mbx->ops->check_for_msg(hw, mbx_id)) { countdown--; if (!countdown) return IXGBE_ERR_MBX; @@ -164,10 +164,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) struct ixgbe_mbx_info *mbx = &hw->mbx; int countdown = mbx->timeout; - if (!countdown || !mbx->ops.check_for_ack) + if (!countdown || !mbx->ops) return IXGBE_ERR_MBX; - while (mbx->ops.check_for_ack(hw, mbx_id)) { + while (mbx->ops->check_for_ack(hw, mbx_id)) { countdown--; if (!countdown) return IXGBE_ERR_MBX; @@ -193,7 +193,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, struct ixgbe_mbx_info *mbx = &hw->mbx; s32 ret_val; - if (!mbx->ops.read) + if (!mbx->ops) return IXGBE_ERR_MBX; ret_val = ixgbe_poll_for_msg(hw, mbx_id); @@ -201,7 +201,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, return ret_val; /* if ack received read message */ - return mbx->ops.read(hw, msg, size, mbx_id); + return mbx->ops->read(hw, msg, size, mbx_id); } /** @@ -221,11 +221,11 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, s32 ret_val; /* exit if either we can't write or there isn't a defined timeout */ - if (!mbx->ops.write || !mbx->timeout) + if (!mbx->ops || !mbx->timeout) return IXGBE_ERR_MBX; /* send msg */ - ret_val = mbx->ops.write(hw, msg, size, mbx_id); + ret_val = mbx->ops->write(hw, msg, size, mbx_id); if (ret_val) return ret_val; @@ -307,14 +307,15 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); break; default: break; } - if (vflre & (1 << vf_shift)) { - IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + if (vflre & BIT(vf_shift)) { + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift)); hw->mbx.stats.rsts++; return 0; } @@ -430,6 +431,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) if (hw->mac.type != ixgbe_mac_82599EB && hw->mac.type != ixgbe_mac_X550 && hw->mac.type != ixgbe_mac_X550EM_x && + hw->mac.type != ixgbe_mac_x550em_a && hw->mac.type != ixgbe_mac_X540) return; @@ -446,7 +448,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) } #endif /* CONFIG_PCI_IOV */ -struct ixgbe_mbx_operations mbx_ops_generic = { +const struct ixgbe_mbx_operations mbx_ops_generic = { .read = ixgbe_read_mbx_pf, .write = ixgbe_write_mbx_pf, .read_posted = ixgbe_read_posted_mbx, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index 8daa95f74548..01c2667c0f92 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2013 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -123,6 +123,6 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); #endif /* CONFIG_PCI_IOV */ -extern struct ixgbe_mbx_operations mbx_ops_generic; +extern const struct ixgbe_mbx_operations mbx_ops_generic; #endif /* _IXGBE_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 74c53ad9d268..a8bed3d887f7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -38,6 +38,12 @@ struct ixgbe_mat_field { unsigned int type; }; +struct ixgbe_jump_table { + struct ixgbe_mat_field *mat; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input *mask; +}; + static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, u32 val, u32 m) @@ -82,6 +88,12 @@ static struct ixgbe_mat_field ixgbe_tcp_fields[] = { { .val = NULL } /* terminal node */ }; +static struct ixgbe_mat_field ixgbe_udp_fields[] = { + {.off = 0, .val = ixgbe_mat_prgm_ports, + .type = IXGBE_ATR_FLOW_TYPE_UDPV4}, + { .val = NULL } /* terminal node */ +}; + struct ixgbe_nexthdr { /* offset, shift, and mask of position to next header */ unsigned int o; @@ -98,6 +110,8 @@ struct ixgbe_nexthdr { static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { { .o = 0, .s = 6, .m = 0xf, .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, + { .o = 0, .s = 6, .m = 0xf, + .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields}, { .jump = NULL } /* terminal node */ }; #endif /* _IXGBE_MODEL_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 5abd66c84d00..cc735ec3e045 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -81,7 +81,11 @@ #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 #define IXGBE_CS4227 0xBE /* CS4227 address */ +#define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_GLOBAL_ID_MSB 1 #define IXGBE_CS4227_SCRATCH 2 +#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */ +#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */ #define IXGBE_CS4227_RESET_PENDING 0x1357 #define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 #define IXGBE_CS4227_RETRIES 15 @@ -103,7 +107,7 @@ #define IXGBE_PE 0xE0 /* Port expander addr */ #define IXGBE_PE_OUTPUT 1 /* Output reg offset */ #define IXGBE_PE_CONFIG 3 /* Config reg offset */ -#define IXGBE_PE_BIT1 (1 << 1) +#define IXGBE_PE_BIT1 BIT(1) /* Flow control defines */ #define IXGBE_TAF_SYM_PAUSE 0x400 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index ef1504d41890..e5431bfe3339 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -333,6 +333,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, */ case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* Upper 32 bits represent billions of cycles, lower 32 bits * represent cycles. However, we use timespec64_to_ns for the * correct math even though the units haven't been corrected @@ -395,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) if (incval > 0x00FFFFFFULL) e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | + BIT(IXGBE_INCPER_SHIFT_82599) | ((u32)incval & 0x00FFFFFFUL)); break; default: @@ -921,6 +922,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, switch (hw->mac.type) { case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: /* enable timestamping all packets only if at least some * packets were requested. Otherwise, play nice and disable * timestamping @@ -1083,6 +1085,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) cc.shift = 2; } /* fallthrough */ + case ixgbe_mac_x550em_a: case ixgbe_mac_X550: cc.read = ixgbe_ptp_read_X550; @@ -1111,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) incval >>= IXGBE_INCVAL_SHIFT_82599; cc.shift -= IXGBE_INCVAL_SHIFT_82599; IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, - (1 << IXGBE_INCPER_SHIFT_82599) | incval); + BIT(IXGBE_INCPER_SHIFT_82599) | incval); break; default: /* other devices aren't supported */ @@ -1223,6 +1226,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) break; case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.max_adj = 30000000; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 8025a3f93598..c5caacdd193d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); + mta_reg |= BIT(vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } vmolr |= IXGBE_VMOLR_ROMPE; @@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F; vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F; mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg)); - mta_reg |= (1 << vector_bit); + mta_reg |= BIT(vector_bit); IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); } @@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* enable or disable receive depending on error */ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); if (err) - vfre &= ~(1 << vf_shift); + vfre &= ~BIT(vf_shift); else - vfre |= 1 << vf_shift; + vfre |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre); if (err) { @@ -589,47 +589,47 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) { struct ixgbe_hw *hw = &adapter->hw; - u32 i; + u32 vlvfb_mask, pool_mask, i; + + /* create mask for VF and other pools */ + pool_mask = ~BIT(VMDQ_P(0) % 32); + vlvfb_mask = BIT(vf % 32); /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ for (i = IXGBE_VLVF_ENTRIES; i--;) { u32 bits[2], vlvfb, vid, vfta, vlvf; u32 word = i * 2 + vf / 32; - u32 mask = 1 << (vf % 32); + u32 mask; vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); /* if our bit isn't set we can skip it */ - if (!(vlvfb & mask)) + if (!(vlvfb & vlvfb_mask)) continue; /* clear our bit from vlvfb */ - vlvfb ^= mask; + vlvfb ^= vlvfb_mask; /* create 64b mask to chedk to see if we should clear VLVF */ bits[word % 2] = vlvfb; bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); - /* if promisc is enabled, PF will be present, leave VFTA */ - if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) { - bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32)); - - if (bits[0] || bits[1]) - goto update_vlvfb; - goto update_vlvf; - } - /* if other pools are present, just remove ourselves */ - if (bits[0] || bits[1]) + if (bits[(VMDQ_P(0) / 32) ^ 1] || + (bits[VMDQ_P(0) / 32] & pool_mask)) goto update_vlvfb; + /* if PF is present, leave VFTA */ + if (bits[0] || bits[1]) + goto update_vlvf; + /* if we cannot determine VLAN just remove ourselves */ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); if (!vlvf) goto update_vlvfb; vid = vlvf & VLAN_VID_MASK; - mask = 1 << (vid % 32); + mask = BIT(vid % 32); /* clear bit from VFTA */ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); @@ -638,6 +638,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) update_vlvf: /* clear POOL selection enable */ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); + + if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) + vlvfb = 0; update_vlvfb: /* clear pool bits */ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); @@ -810,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable transmit for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset)); - reg |= 1 << vf_shift; + reg |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); /* force drop enable for all VF Rx queues */ @@ -818,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* enable receive for vf */ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); - reg |= 1 << vf_shift; + reg |= BIT(vf_shift); /* * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs. * For more info take a look at ixgbe_set_vf_lpe @@ -834,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) #endif /* CONFIG_FCOE */ if (pf_max_frame > ETH_FRAME_LEN) - reg &= ~(1 << vf_shift); + reg &= ~BIT(vf_shift); } IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); @@ -843,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) /* Enable counting of spoofed packets in the SSVPC register */ reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); - reg |= (1 << vf_shift); + reg |= BIT(vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); /* @@ -887,7 +890,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, return -1; } - if (adapter->vfinfo[vf].pf_set_mac && + if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { e_warn(drv, "VF %d attempted to override administratively set MAC address\n" @@ -905,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); u8 tcs = netdev_get_num_tc(adapter->netdev); - struct ixgbe_hw *hw = &adapter->hw; - int err; if (adapter->vfinfo[vf].pf_vlan || tcs) { e_warn(drv, @@ -920,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, if (!vid && !add) return 0; - err = ixgbe_set_vf_vlan(adapter, add, vid, vf); - if (err) - return err; - - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - - if (add) - adapter->vfinfo[vf].vlan_count++; - else if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; - - return 0; + return ixgbe_set_vf_vlan(adapter, add, vid, vf); } static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, @@ -961,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, * If the VF is allowed to set MAC filters then turn off * anti-spoofing to avoid false positives. */ - if (adapter->vfinfo[vf].spoofchk_enabled) - ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); + if (adapter->vfinfo[vf].spoofchk_enabled) { + struct ixgbe_hw *hw = &adapter->hw; + + hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); + } } err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); @@ -1318,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf, ixgbe_set_vmvir(adapter, vlan, qos, vf); ixgbe_set_vmolr(hw, vf, false); - if (adapter->vfinfo[vf].spoofchk_enabled) - hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); - adapter->vfinfo[vf].vlan_count++; /* enable hide vlan on X550 */ if (hw->mac.type >= ixgbe_mac_X550) @@ -1353,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_clear_vmvir(adapter, vf); ixgbe_set_vmolr(hw, vf, true); - hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); - if (adapter->vfinfo[vf].vlan_count) - adapter->vfinfo[vf].vlan_count--; /* disable hide VLAN on X550 */ if (hw->mac.type >= ixgbe_mac_X550) @@ -1395,7 +1381,7 @@ out: return err; } -static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) +int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: @@ -1522,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - int vf_target_reg = vf >> 3; - int vf_target_shift = vf % 8; struct ixgbe_hw *hw = &adapter->hw; - u32 regval; if (vf >= adapter->num_vfs) return -EINVAL; adapter->vfinfo[vf].spoofchk_enabled = setting; - regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - regval &= ~(1 << vf_target_shift); - regval |= (setting << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); - - if (adapter->vfinfo[vf].vlan_count) { - vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT; - regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); - regval &= ~(1 << vf_target_shift); - regval |= (setting << vf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); + /* configure MAC spoofing */ + hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); + + /* configure VLAN spoofing */ + hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); + + /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be + * calling set_ethertype_anti_spoofing for each VF in loop below + */ + if (hw->mac.ops.set_ethertype_anti_spoofing) { + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + IXGBE_ETH_P_LLDP)); + + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), + (IXGBE_ETQF_FILTER_EN | + IXGBE_ETQF_TX_ANTISPOOF | + ETH_P_PAUSE)); + + hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index dad925706f4c..47e65e2f886a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -44,6 +44,7 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, u8 qos); +int ixgbe_link_mbps(struct ixgbe_adapter *adapter); int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate); int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index bf7367a08716..da3d8358fee0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2015 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -59,8 +59,12 @@ #define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 +#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D +#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 +#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE #define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 -#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D #define IXGBE_DEV_ID_82599EN_SFP 0x1557 @@ -75,21 +79,25 @@ #define IXGBE_DEV_ID_X540T1 0x1560 #define IXGBE_DEV_ID_X550T 0x1563 +#define IXGBE_DEV_ID_X550T1 0x15D1 #define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA #define IXGBE_DEV_ID_X550EM_X_KR 0x15AB #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC #define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD #define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE -#define IXGBE_DEV_ID_X550_VF_HV 0x1564 -#define IXGBE_DEV_ID_X550_VF 0x1565 -#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 -#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 +#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 +#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 +#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE /* VF Device IDs */ -#define IXGBE_DEV_ID_82599_VF 0x10ED -#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 #define IXGBE_CAT(r, m) IXGBE_##r##_##m @@ -128,7 +136,7 @@ #define IXGBE_FLA_X540 IXGBE_FLA_8259X #define IXGBE_FLA_X550 IXGBE_FLA_8259X #define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X -#define IXGBE_FLA_X550EM_a 0x15F6C +#define IXGBE_FLA_X550EM_a 0x15F68 #define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) #define IXGBE_EEMNGCTL 0x10110 #define IXGBE_EEMNGDATA 0x10114 @@ -143,13 +151,6 @@ #define IXGBE_GRC_X550EM_a 0x15F64 #define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) -#define IXGBE_SRAMREL_8259X 0x10210 -#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X -#define IXGBE_SRAMREL_X550EM_a 0x15F6C -#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL) - /* General Receive Control */ #define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ #define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ @@ -375,6 +376,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) #define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) #define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ +#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ #define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ #define IXGBE_RXFECCERR0 0x051B8 #define IXGBE_LLITHRESH 0x0EC90 @@ -446,6 +449,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ #define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ #define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ +#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ #define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ #define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ @@ -543,6 +548,7 @@ struct ixgbe_thermal_sensor_data { /* DCB registers */ #define MAX_TRAFFIC_CLASS 8 #define X540_TRAFFIC_CLASS 4 +#define DEF_TRAFFIC_CLASS 1 #define IXGBE_RMCS 0x03D00 #define IXGBE_DPMCS 0x07F40 #define IXGBE_PDPMCS 0x0CD00 @@ -554,7 +560,6 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ - /* Security Control Registers */ #define IXGBE_SECTXCTRL 0x08800 #define IXGBE_SECTXSTAT 0x08804 @@ -693,16 +698,16 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ #define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ #define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) -#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ -#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ -#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */ #define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ #define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ #define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 #define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 #define IXGBE_FCBUFF_OFFSET_SHIFT 16 -#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ -#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */ +#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */ #define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ #define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ #define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 @@ -719,23 +724,23 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCFLT 0x05108 /* FC FLT Context */ #define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ #define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ -#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ -#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */ #define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ #define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ -#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ -#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ -#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */ /* FCoE Receive Control */ #define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ -#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ -#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ -#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ -#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ -#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ -#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ -#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ -#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */ #define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ #define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 /* FCoE Redirection */ @@ -1056,15 +1061,9 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) #define IXGBE_TDPROBE 0x07F20 #define IXGBE_TXBUFCTRL 0x0C600 -#define IXGBE_TXBUFDATA0 0x0C610 -#define IXGBE_TXBUFDATA1 0x0C614 -#define IXGBE_TXBUFDATA2 0x0C618 -#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */ #define IXGBE_RXBUFCTRL 0x03600 -#define IXGBE_RXBUFDATA0 0x03610 -#define IXGBE_RXBUFDATA1 0x03614 -#define IXGBE_RXBUFDATA2 0x03618 -#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */ #define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ #define IXGBE_RFVAL 0x050A4 #define IXGBE_MDFTC1 0x042B8 @@ -1127,6 +1126,7 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_XPCSS 0x04290 #define IXGBE_MFLCN 0x04294 #define IXGBE_SERDESC 0x04298 +#define IXGBE_MAC_SGMII_BUSY 0x04298 #define IXGBE_MACS 0x0429C #define IXGBE_AUTOC 0x042A0 #define IXGBE_LINKS 0x042A4 @@ -1203,6 +1203,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ #define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ #define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_MBINTEN 0x10000000 +#define IXGBE_RDRXCTL_MDP_EN 0x20000000 /* RQTC Bit Masks and Shifts */ #define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) @@ -1249,20 +1251,20 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ #define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ #define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ #define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ /* MSCA Bit Masks */ @@ -1309,6 +1311,7 @@ struct ixgbe_thermal_sensor_data { /* MDIO definitions */ +#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 #define IXGBE_MDIO_PCS_DEV_TYPE 0x3 #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 @@ -1740,7 +1743,7 @@ enum { #define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ #define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ #define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ -#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */ #define IXGBE_ETQF_POOL_SHIFT 20 #define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ @@ -1866,20 +1869,20 @@ enum { #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 #define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 -#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) -#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 #define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 -#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) -#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) #define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 #define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 @@ -1957,7 +1960,9 @@ enum { #define IXGBE_GSSR_PHY1_SM 0x0004 #define IXGBE_GSSR_MAC_CSR_SM 0x0008 #define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 #define IXGBE_GSSR_SW_MNG_SM 0x0400 +#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ #define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */ #define IXGBE_GSSR_I2C_MASK 0x1800 #define IXGBE_GSSR_NVM_PHY_MASK 0xF @@ -1997,6 +2002,9 @@ enum { #define IXGBE_PBANUM_PTR_GUARD 0xFAFA #define IXGBE_EEPROM_CHECKSUM 0x3F #define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_EEPROM_CTRL_4 0x45 +#define IXGBE_EE_CTRL_4_INST_ID 0x10 +#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 #define IXGBE_PCIE_ANALOG_PTR 0x03 #define IXGBE_ATLAS0_CONFIG_PTR 0x04 #define IXGBE_PHY_PTR 0x04 @@ -2111,6 +2119,7 @@ enum { #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7) #define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 #define IXGBE_FW_LESM_STATE_1 0x1 #define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ @@ -2530,6 +2539,10 @@ enum ixgbe_fdir_pballoc_type { #define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 #define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 #define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 +#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 +#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ +#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ #define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 #define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 #define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 @@ -2620,6 +2633,20 @@ enum ixgbe_fdir_pballoc_type { #define FW_MAX_READ_BUFFER_SIZE 1024 #define FW_DISABLE_RXEN_CMD 0xDE #define FW_DISABLE_RXEN_LEN 0x1 +#define FW_PHY_MGMT_REQ_CMD 0x20 +#define FW_PHY_TOKEN_REQ_CMD 0x0A +#define FW_PHY_TOKEN_REQ_LEN 2 +#define FW_PHY_TOKEN_REQ 0 +#define FW_PHY_TOKEN_REL 1 +#define FW_PHY_TOKEN_OK 1 +#define FW_PHY_TOKEN_RETRY 0x80 +#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ +#define FW_PHY_TOKEN_WAIT 5 /* seconds */ +#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) +#define FW_INT_PHY_REQ_CMD 0xB +#define FW_INT_PHY_REQ_LEN 10 +#define FW_INT_PHY_REQ_READ 0 +#define FW_INT_PHY_REQ_WRITE 1 /* Host Interface Command Structures */ struct ixgbe_hic_hdr { @@ -2688,6 +2715,28 @@ struct ixgbe_hic_disable_rxen { u16 pad3; }; +struct ixgbe_hic_phy_token_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + u16 pad; +}; + +struct ixgbe_hic_internal_phy_req { + struct ixgbe_hic_hdr hdr; + u8 port_number; + u8 command_type; + __be16 address; + u16 rsv1; + __be32 write_data; + u16 pad; +} __packed; + +struct ixgbe_hic_internal_phy_resp { + struct ixgbe_hic_hdr hdr; + __be32 read_data; +}; + /* Transmit Descriptor - Advanced */ union ixgbe_adv_tx_desc { struct { @@ -2786,15 +2835,15 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ #define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ -#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ -#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ -#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ -#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ -#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ -#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ -#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ -#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ -#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */ #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ @@ -2948,7 +2997,6 @@ union ixgbe_atr_hash_dword { IXGBE_CAT(EEC, m), \ IXGBE_CAT(FLA, m), \ IXGBE_CAT(GRC, m), \ - IXGBE_CAT(SRAMREL, m), \ IXGBE_CAT(FACTPS, m), \ IXGBE_CAT(SWSM, m), \ IXGBE_CAT(SWFW_SYNC, m), \ @@ -2989,6 +3037,7 @@ enum ixgbe_mac_type { ixgbe_mac_X540, ixgbe_mac_X550, ixgbe_mac_X550EM_x, + ixgbe_mac_x550em_a, ixgbe_num_macs }; @@ -3017,6 +3066,7 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_intel, ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, + ixgbe_phy_sgmii, ixgbe_phy_generic }; @@ -3130,8 +3180,9 @@ struct ixgbe_bus_info { enum ixgbe_bus_width width; enum ixgbe_bus_type type; - u16 func; - u16 lan_id; + u8 func; + u8 lan_id; + u8 instance_id; }; /* Flow control parameters */ @@ -3266,6 +3317,7 @@ struct ixgbe_mac_operations { s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); void (*release_swfw_sync)(struct ixgbe_hw *, u32); + void (*init_swfw_sync)(struct ixgbe_hw *); s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); @@ -3308,6 +3360,7 @@ struct ixgbe_mac_operations { /* Flow Control */ s32 (*fc_enable)(struct ixgbe_hw *); + s32 (*setup_fc)(struct ixgbe_hw *); /* Manageability interface */ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); @@ -3323,6 +3376,8 @@ struct ixgbe_mac_operations { s32 (*dmac_config)(struct ixgbe_hw *hw); s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); + s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); + s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); }; struct ixgbe_phy_operations { @@ -3442,7 +3497,7 @@ struct ixgbe_mbx_stats { }; struct ixgbe_mbx_info { - struct ixgbe_mbx_operations ops; + const struct ixgbe_mbx_operations *ops; struct ixgbe_mbx_stats stats; u32 timeout; u32 usec_delay; @@ -3475,10 +3530,10 @@ struct ixgbe_hw { struct ixgbe_info { enum ixgbe_mac_type mac; s32 (*get_invariants)(struct ixgbe_hw *); - struct ixgbe_mac_operations *mac_ops; - struct ixgbe_eeprom_operations *eeprom_ops; - struct ixgbe_phy_operations *phy_ops; - struct ixgbe_mbx_operations *mbx_ops; + const struct ixgbe_mac_operations *mac_ops; + const struct ixgbe_eeprom_operations *eeprom_ops; + const struct ixgbe_phy_operations *phy_ops; + const struct ixgbe_mbx_operations *mbx_ops; const u32 *mvals; }; @@ -3517,14 +3572,19 @@ struct ixgbe_info { #define IXGBE_ERR_INVALID_ARGUMENT -32 #define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 #define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 +#define IXGBE_ERR_FW_RESP_INVALID -39 +#define IXGBE_ERR_TOKEN_RETRY -40 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF #define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) #define IXGBE_FUSES0_300MHZ BIT(5) -#define IXGBE_FUSES0_REV_MASK (3 << 6) +#define IXGBE_FUSES0_REV_MASK (3u << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) +#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) @@ -3532,43 +3592,54 @@ struct ixgbe_info { #define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) #define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) -#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9) +#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11) + +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24) +#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31) + +#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28) +#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29) + +#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) -#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) -#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19) -#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) -#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) -#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) +#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6) +#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15) +#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) -#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4) +#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2) -#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) +#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) -#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) -#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2) +#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3) +#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31) #define IXGBE_KX4_LINK_CNTL_1 0x4C -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24) -#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29) -#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30) -#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24) +#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29) +#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30) +#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31) #define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 #define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 @@ -3584,12 +3655,17 @@ struct ixgbe_info { #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 #define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 #define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 -#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) +#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) #define IXGBE_SB_IOSF_TARGET_KR_PHY 0 #define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1 #define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 #define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 #define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1) +#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) #define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 2358c1b7d586..f2b1d48a16c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -1,7 +1,7 @@ /******************************************************************************* Intel 10 Gigabit PCI Express Linux driver - Copyright(c) 1999 - 2014 Intel Corporation. + Copyright(c) 1999 - 2016 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); @@ -747,6 +747,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) } /** + * ixgbe_init_swfw_sync_X540 - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function reset hardware semaphore bits for a semaphore that may + * have be left locked due to a catastrophic failure. + **/ +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) +{ + /* First try to grab the semaphore but we don't need to bother + * looking to see whether we got the lock or not since we do + * the same thing regardless of whether we got the lock or not. + * We got the lock - we release it. + * We timeout trying to get the lock - we force its release. + */ + ixgbe_get_swfw_sync_semaphore(hw); + ixgbe_release_swfw_sync_semaphore(hw); +} + +/** * ixgbe_blink_led_start_X540 - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink @@ -810,7 +829,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) return 0; } -static struct ixgbe_mac_operations mac_ops_X540 = { +static const struct ixgbe_mac_operations mac_ops_X540 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_X540, .start_hw = &ixgbe_start_hw_X540, @@ -846,6 +865,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, + .setup_fc = ixgbe_setup_fc_generic, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = NULL, @@ -853,6 +873,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, .disable_rx_buff = &ixgbe_disable_rx_buff_generic, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, .get_thermal_sensor_data = NULL, @@ -863,7 +884,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .disable_rx = &ixgbe_disable_rx_generic, }; -static struct ixgbe_eeprom_operations eeprom_ops_X540 = { +static const struct ixgbe_eeprom_operations eeprom_ops_X540 = { .init_params = &ixgbe_init_eeprom_params_X540, .read = &ixgbe_read_eerd_X540, .read_buffer = &ixgbe_read_eerd_buffer_X540, @@ -874,7 +895,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X540 = { .update_checksum = &ixgbe_update_eeprom_checksum_X540, }; -static struct ixgbe_phy_operations phy_ops_X540 = { +static const struct ixgbe_phy_operations phy_ops_X540 = { .identify = &ixgbe_identify_phy_generic, .identify_sfp = &ixgbe_identify_sfp_module_generic, .init = NULL, @@ -897,7 +918,7 @@ static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X540) }; -struct ixgbe_info ixgbe_X540_info = { +const struct ixgbe_info ixgbe_X540_info = { .mac = ixgbe_mac_X540, .get_invariants = &ixgbe_get_invariants_X540, .mac_ops = &mac_ops_X540, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h index a1468b1f4d8a..e21cd48491d3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h @@ -36,4 +36,5 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 68a9c646498e..19b75cd98682 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2015 Intel Corporation. + * Copyright(c) 1999 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -27,6 +27,7 @@ #include "ixgbe_phy.h" static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed); +static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *); static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw) { @@ -272,16 +273,26 @@ out: static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; + return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); ixgbe_check_cs4227(hw); + /* Fallthrough */ + case IXGBE_DEV_ID_X550EM_A_SFP_N: return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_KX4: hw->phy.type = ixgbe_phy_x550em_kx4; break; case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: hw->phy.type = ixgbe_phy_x550em_kr; break; case IXGBE_DEV_ID_X550EM_X_1G_T: @@ -324,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); - eeprom->word_size = 1 << (eeprom_size + - IXGBE_EEPROM_WORD_SIZE_SHIFT); + eeprom->word_size = BIT(eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type, eeprom->word_size); @@ -412,6 +423,121 @@ out: return ret; } +/** + * ixgbe_get_phy_token - Get the token for shared PHY access + * @hw: Pointer to hardware structure + */ +static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REQ; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return 0; + if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) + return IXGBE_ERR_FW_RESP_INVALID; + + return IXGBE_ERR_TOKEN_RETRY; +} + +/** + * ixgbe_put_phy_token - Put the token for shared PHY access + * @hw: Pointer to hardware structure + */ +static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) +{ + struct ixgbe_hic_phy_token_req token_cmd; + s32 status; + + token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; + token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; + token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + token_cmd.port_number = hw->bus.lan_id; + token_cmd.command_type = FW_PHY_TOKEN_REL; + token_cmd.pad = 0; + status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd), + IXGBE_HI_COMMAND_TIMEOUT, + true); + if (status) + return status; + if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) + return 0; + return IXGBE_ERR_FW_RESP_INVALID; +} + +/** + * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Data to write to the register + **/ +static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + __always_unused u32 device_type, + u32 data) +{ + struct ixgbe_hic_internal_phy_req write_cmd; + + memset(&write_cmd, 0, sizeof(write_cmd)); + write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + write_cmd.port_number = hw->bus.lan_id; + write_cmd.command_type = FW_INT_PHY_REQ_WRITE; + write_cmd.address = cpu_to_be16(reg_addr); + write_cmd.write_data = cpu_to_be32(data); + + return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd), + IXGBE_HI_COMMAND_TIMEOUT, false); +} + +/** + * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 3 bit device type + * @data: Pointer to read data from the register + **/ +static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + __always_unused u32 device_type, + u32 *data) +{ + union { + struct ixgbe_hic_internal_phy_req cmd; + struct ixgbe_hic_internal_phy_resp rsp; + } hic; + s32 status; + + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.command_type = FW_INT_PHY_REQ_READ; + hic.cmd.address = cpu_to_be16(reg_addr); + + status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, true); + + /* Extract the register value from the response. */ + *data = be32_to_cpu(hic.rsp.read_data); + + return status; +} + /** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface * command assuming that the semaphore is already obtained. * @hw: pointer to hardware structure @@ -436,8 +562,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, /* one word */ buffer.length = cpu_to_be16(sizeof(u16)); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); if (status) return status; @@ -487,7 +612,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); @@ -770,8 +895,7 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, buffer.data = data; buffer.address = cpu_to_be32(offset * 2); - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); return status; } @@ -813,8 +937,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; buffer.req.checksum = FW_DEFAULT_CHECKSUM; - status = ixgbe_host_interface_command(hw, (u32 *)&buffer, - sizeof(buffer), + status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT, false); return status; } @@ -861,9 +984,9 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; - fw_cmd.port_number = (u8)hw->bus.lan_id; + fw_cmd.port_number = hw->bus.lan_id; - status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, + status = ixgbe_host_interface_command(hw, &fw_cmd, sizeof(struct ixgbe_hic_disable_rxen), IXGBE_HI_COMMAND_TIMEOUT, true); @@ -1248,6 +1371,117 @@ i2c_err: } /** + * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for native SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + bool setup_linear = false; + u32 reg_phy_int; + s32 rc; + + /* Check if SFP module is supported and linear */ + rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (rc == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (!rc) + return rc; + + /* Configure internal PHY for native SFI */ + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_phy_int); + if (rc) + return rc; + + if (setup_linear) { + reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING; + reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR; + } else { + reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING; + reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR; + } + + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_phy_int); + if (rc) + return rc; + + /* Setup XFI/SFI internal link */ + return ixgbe_setup_ixfi_x550em(hw, &speed); +} + +/** + * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for SFP support. + */ +static s32 +ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + u32 reg_slice, slice_offset; + bool setup_linear = false; + u16 reg_phy_ext; + s32 rc; + + /* Check if SFP module is supported and linear */ + rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (rc == IXGBE_ERR_SFP_NOT_PRESENT) + return 0; + + if (!rc) + return rc; + + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF) + return IXGBE_ERR_PHY_ADDR_INVALID; + + /* Get external PHY device id */ + rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + if (rc) + return rc; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ + if (reg_phy_ext == IXGBE_CS4223_PHY_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else + slice_offset = hw->bus.lan_id << 12; + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; + if (setup_linear) + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1; + else + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1; + return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE, + reg_phy_ext); +} + +/** * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed * @hw: pointer to hardware structure * @speed: new link speed @@ -1326,6 +1560,57 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, return 0; } +/** + * ixgbe_setup_sgmii - Set up link for sgmii + * @hw: pointer to hardware structure + */ +static s32 +ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed, + __always_unused bool autoneg_wait_to_complete) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval; + s32 rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + + return rc; +} + /** ixgbe_init_mac_link_ops_X550em - init mac link function pointers * @hw: pointer to hardware structure **/ @@ -1342,15 +1627,35 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; + mac->ops.setup_fc = ixgbe_setup_fc_x550em; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP_N: + mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n; + break; + case IXGBE_DEV_ID_X550EM_A_SFP: + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550a; + break; + default: + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550em; + break; + } mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; break; case ixgbe_media_type_copper: mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.setup_fc = ixgbe_setup_fc_generic; mac->ops.check_link = ixgbe_check_link_t_X550em; + return; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) + mac->ops.setup_link = ixgbe_setup_sgmii; break; default: + mac->ops.setup_fc = ixgbe_setup_fc_x550em; break; } } @@ -1614,7 +1919,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, s32 status; u32 reg_val; - status = ixgbe_read_iosf_sb_reg_x550(hw, + status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status) @@ -1636,7 +1941,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, /* Restart auto-negotiation. */ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, + status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); @@ -1653,9 +1958,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) s32 status; u32 reg_val; - status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, - IXGBE_SB_IOSF_TARGET_KX4_PCS0 + - hw->bus.lan_id, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, ®_val); if (status) return status; @@ -1674,20 +1979,24 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) /* Restart auto-negotiation. */ reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1, - IXGBE_SB_IOSF_TARGET_KX4_PCS0 + - hw->bus.lan_id, reg_val); + status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1, + IXGBE_SB_IOSF_TARGET_KX4_PCS0 + + hw->bus.lan_id, reg_val); return status; } -/** ixgbe_setup_kr_x550em - Configure the KR PHY. - * @hw: pointer to hardware structure +/** + * ixgbe_setup_kr_x550em - Configure the KR PHY + * @hw: pointer to hardware structure * - * Configures the integrated KR PHY. + * Configures the integrated KR PHY for X550EM_x. **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { + if (hw->mac.type != ixgbe_mac_X550EM_x) + return 0; + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); } @@ -1842,6 +2151,86 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, return status; } +/** + * ixgbe_setup_fc_x550em - Set up flow control + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) +{ + bool pause, asm_dir; + u32 reg_val; + s32 rc; + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + /* 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Determine PAUSE and ASM_DIR bits. */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + pause = false; + asm_dir = false; + break; + case ixgbe_fc_tx_pause: + pause = false; + asm_dir = true; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + /* Fallthrough */ + case ixgbe_fc_full: + pause = true; + asm_dir = true; + break; + default: + hw_err(hw, "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && + hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && + hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) + return 0; + + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_val); + if (rc) + return rc; + + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + + return rc; +} + /** ixgbe_enter_lplu_x550em - Transition to low power states * @hw: pointer to hardware structure * @@ -1939,6 +2328,36 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register + * @hw: pointer to hardware structure + * + * Read NW_MNG_IF_SEL register and save field values. + */ +static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) +{ + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set + * PHY address. This register field was has only been used for X552. + */ + if (!hw->phy.nw_mng_if_sel) { + if (hw->mac.type == ixgbe_mac_x550em_a) { + struct ixgbe_adapter *adapter = hw->back; + + e_warn(drv, "nw_mng_if_sel not set\n"); + } + return; + } + + hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; +} + /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * @@ -1953,14 +2372,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) hw->mac.ops.set_lan_id(hw); + ixgbe_read_mng_if_sel_x550em(hw); + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); - - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode. - */ - phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); } /* Identify the PHY or SFP module */ @@ -2023,16 +2439,24 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) /* Detect if there is a copper PHY attached. */ switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + hw->phy.type = ixgbe_phy_sgmii; + /* Fallthrough */ case IXGBE_DEV_ID_X550EM_X_KR: case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_X550EM_X_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: - media_type = ixgbe_media_type_copper; + media_type = ixgbe_media_type_copper; break; default: media_type = ixgbe_media_type_unknown; @@ -2080,6 +2504,27 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) return status; } +/** + * ixgbe_set_mdio_speed - Set MDIO clock speed + * @hw: pointer to hardware structure + */ +static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) +{ + u32 hlreg0; + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + /* Config MDIO clock speed before the first MDIO PHY access */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + break; + default: + break; + } +} + /** ixgbe_reset_hw_X550em - Perform hardware reset ** @hw: pointer to hardware structure ** @@ -2093,7 +2538,6 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) s32 status; u32 ctrl = 0; u32 i; - u32 hlreg0; bool link_up = false; /* Call adapter stop to disable Tx/Rx and clear interrupts */ @@ -2179,11 +2623,7 @@ mac_reset_top: hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { - hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); - hlreg0 &= ~IXGBE_HLREG0_MDCSPD; - IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); - } + ixgbe_set_mdio_speed(hw); if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) ixgbe_setup_mux_ctl(hw); @@ -2206,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof |= (1 << vf_target_shift); + pfvfspoof |= BIT(vf_target_shift); else - pfvfspoof &= ~(1 << vf_target_shift); + pfvfspoof &= ~BIT(vf_target_shift); IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } @@ -2296,6 +2736,110 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) ixgbe_release_swfw_sync_X540(hw, mask); } +/** + * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore and get the shared PHY token as needed + */ +static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + int retries = FW_PHY_TOKEN_RETRIES; + s32 status; + + while (--retries) { + status = 0; + if (hmask) + status = ixgbe_acquire_swfw_sync_X540(hw, hmask); + if (status) + return status; + if (!(mask & IXGBE_GSSR_TOKEN_SM)) + return 0; + + status = ixgbe_get_phy_token(hw); + if (!status) + return 0; + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); + if (status != IXGBE_ERR_TOKEN_RETRY) + return status; + msleep(FW_PHY_TOKEN_DELAY); + } + + return status; +} + +/** + * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Release the SWFW semaphore and puts the shared PHY token as needed + */ +static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask) +{ + u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; + + if (mask & IXGBE_GSSR_TOKEN_SM) + ixgbe_put_phy_token(hw); + + if (hmask) + ixgbe_release_swfw_sync_X540(hw, hmask); +} + +/** + * ixgbe_read_phy_reg_x550a - Reads specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register using the SWFW lock and PHY + * Token. The PHY Token is needed since the MDIO is shared between to MAC + * instances. + */ +static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + +/** + * ixgbe_write_phy_reg_x550a - Writes specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register using the SWFW lock and PHY Token. + * The PHY Token is needed since the MDIO is shared between to MAC instances. + */ +static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + s32 status; + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data); + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -2337,12 +2881,10 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ .init_thermal_sensor_thresh = NULL, \ - .prot_autoc_read = &prot_autoc_read_generic, \ - .prot_autoc_write = &prot_autoc_write_generic, \ .enable_rx = &ixgbe_enable_rx_generic, \ .disable_rx = &ixgbe_disable_rx_x550, \ -static struct ixgbe_mac_operations mac_ops_X550 = { +static const struct ixgbe_mac_operations mac_ops_X550 = { X550_COMMON_MAC .reset_hw = &ixgbe_reset_hw_X540, .get_media_type = &ixgbe_get_media_type_X540, @@ -2354,20 +2896,45 @@ static struct ixgbe_mac_operations mac_ops_X550 = { .setup_sfp = NULL, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, .release_swfw_sync = &ixgbe_release_swfw_sync_X540, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .prot_autoc_read = prot_autoc_read_generic, + .prot_autoc_write = prot_autoc_write_generic, + .setup_fc = ixgbe_setup_fc_generic, }; -static struct ixgbe_mac_operations mac_ops_X550EM_x = { +static const struct ixgbe_mac_operations mac_ops_X550EM_x = { X550_COMMON_MAC .reset_hw = &ixgbe_reset_hw_X550em, .get_media_type = &ixgbe_get_media_type_X550em, .get_san_mac_addr = NULL, .get_wwn_prefix = NULL, - .setup_link = NULL, /* defined later */ + .setup_link = &ixgbe_setup_mac_link_X540, .get_link_capabilities = &ixgbe_get_link_capabilities_X550em, .get_bus_info = &ixgbe_get_bus_info_X550em, .setup_sfp = ixgbe_setup_sfp_modules_X550em, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em, .release_swfw_sync = &ixgbe_release_swfw_sync_X550em, + .init_swfw_sync = &ixgbe_init_swfw_sync_X540, + .setup_fc = NULL, /* defined later */ + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, +}; + +static struct ixgbe_mac_operations mac_ops_x550em_a = { + X550_COMMON_MAC + .reset_hw = ixgbe_reset_hw_X550em, + .get_media_type = ixgbe_get_media_type_X550em, + .get_san_mac_addr = NULL, + .get_wwn_prefix = NULL, + .setup_link = NULL, /* defined later */ + .get_link_capabilities = ixgbe_get_link_capabilities_X550em, + .get_bus_info = ixgbe_get_bus_info_X550em, + .setup_sfp = ixgbe_setup_sfp_modules_X550em, + .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a, + .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a, + .setup_fc = ixgbe_setup_fc_x550em, + .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a, + .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; #define X550_COMMON_EEP \ @@ -2379,12 +2946,12 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = { .update_checksum = &ixgbe_update_eeprom_checksum_X550, \ .calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \ -static struct ixgbe_eeprom_operations eeprom_ops_X550 = { +static const struct ixgbe_eeprom_operations eeprom_ops_X550 = { X550_COMMON_EEP .init_params = &ixgbe_init_eeprom_params_X550, }; -static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { +static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { X550_COMMON_EEP .init_params = &ixgbe_init_eeprom_params_X540, }; @@ -2398,23 +2965,25 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ - .read_reg = &ixgbe_read_phy_reg_generic, \ - .write_reg = &ixgbe_write_phy_reg_generic, \ .setup_link = &ixgbe_setup_phy_link_generic, \ .set_phy_power = NULL, \ .check_overtemp = &ixgbe_tn_check_overtemp, \ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, -static struct ixgbe_phy_operations phy_ops_X550 = { +static const struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY .init = NULL, .identify = &ixgbe_identify_phy_generic, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, }; -static struct ixgbe_phy_operations phy_ops_X550EM_x = { +static const struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_generic, + .write_reg = &ixgbe_write_phy_reg_generic, .read_i2c_combined = &ixgbe_read_i2c_combined_generic, .write_i2c_combined = &ixgbe_write_i2c_combined_generic, .read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked, @@ -2422,6 +2991,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = { &ixgbe_write_i2c_combined_generic_unlocked, }; +static const struct ixgbe_phy_operations phy_ops_x550em_a = { + X550_COMMON_PHY + .init = &ixgbe_init_phy_ops_X550em, + .identify = &ixgbe_identify_phy_x550em, + .read_reg = &ixgbe_read_phy_reg_x550a, + .write_reg = &ixgbe_write_phy_reg_x550a, +}; + static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550) }; @@ -2430,7 +3007,11 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { IXGBE_MVALS_INIT(X550EM_x) }; -struct ixgbe_info ixgbe_X550_info = { +static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550EM_a) +}; + +const struct ixgbe_info ixgbe_X550_info = { .mac = ixgbe_mac_X550, .get_invariants = &ixgbe_get_invariants_X540, .mac_ops = &mac_ops_X550, @@ -2440,7 +3021,7 @@ struct ixgbe_info ixgbe_X550_info = { .mvals = ixgbe_mvals_X550, }; -struct ixgbe_info ixgbe_X550EM_x_info = { +const struct ixgbe_info ixgbe_X550EM_x_info = { .mac = ixgbe_mac_X550EM_x, .get_invariants = &ixgbe_get_invariants_X550_x, .mac_ops = &mac_ops_X550EM_x, @@ -2449,3 +3030,13 @@ struct ixgbe_info ixgbe_X550EM_x_info = { .mbx_ops = &mbx_ops_generic, .mvals = ixgbe_mvals_X550EM_x, }; + +const struct ixgbe_info ixgbe_x550em_a_info = { + .mac = ixgbe_mac_x550em_a, + .get_invariants = &ixgbe_get_invariants_X550_x, + .mac_ops = &mac_ops_x550em_a, + .eeprom_ops = &eeprom_ops_X550EM_x, + .phy_ops = &phy_ops_x550em_a, + .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_x550em_a, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 58434584b16d..ae09d60e7b67 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -33,6 +33,11 @@ #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_DEV_ID_82599_VF_HV 0x152E +#define IXGBE_DEV_ID_X540_VF_HV 0x1530 +#define IXGBE_DEV_ID_X550_VF_HV 0x1564 +#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9 + #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 #define IXGBE_VF_MAX_RX_QUEUES 8 @@ -74,7 +79,7 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDCTL_RLPML_EN 0x00008000 /* DCA Control */ -#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */ /* PSRTYPE bit definitions */ #define IXGBE_PSRTYPE_TCPHDR 0x00000010 @@ -296,16 +301,16 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ #define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ -#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ -#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ -#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ -#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ -#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ -#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ - -#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ -#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ -#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ -#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */ +#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */ + +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */ +#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */ #endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index d7aa4b203f40..508e72c5f1c2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -42,65 +42,54 @@ #define IXGBE_ALL_RAR_ENTRIES 16 +enum {NETDEV_STATS, IXGBEVF_STATS}; + struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; - struct { - int sizeof_stat; - int stat_offset; - int base_stat_offset; - int saved_reset_offset; - }; + int type; + int sizeof_stat; + int stat_offset; }; -#define IXGBEVF_STAT(m, b, r) { \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ - .stat_offset = offsetof(struct ixgbevf_adapter, m), \ - .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \ - .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \ +#define IXGBEVF_STAT(_name, _stat) { \ + .stat_string = _name, \ + .type = IXGBEVF_STATS, \ + .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \ + .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ } -#define IXGBEVF_ZSTAT(m) { \ - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \ - .stat_offset = offsetof(struct ixgbevf_adapter, m), \ - .base_stat_offset = -1, \ - .saved_reset_offset = -1 \ +#define IXGBEVF_NETDEV_STAT(_net_stat) { \ + .stat_string = #_net_stat, \ + .type = NETDEV_STATS, \ + .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ + .stat_offset = offsetof(struct net_device_stats, _net_stat) \ } -static const struct ixgbe_stats ixgbe_gstrings_stats[] = { - {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc, - stats.saved_reset_vfgprc)}, - {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc, - stats.saved_reset_vfgptc)}, - {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc, - stats.saved_reset_vfgorc)}, - {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, - stats.saved_reset_vfgotc)}, - {"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, - {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)}, - {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)}, - {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, - stats.saved_reset_vfmprc)}, - {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, -#ifdef BP_EXTENDED_STATS - {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, - {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, - {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)}, - {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)}, - {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)}, - {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)}, -#endif +static struct ixgbe_stats ixgbevf_gstrings_stats[] = { + IXGBEVF_NETDEV_STAT(rx_packets), + IXGBEVF_NETDEV_STAT(tx_packets), + IXGBEVF_NETDEV_STAT(rx_bytes), + IXGBEVF_NETDEV_STAT(tx_bytes), + IXGBEVF_STAT("tx_busy", tx_busy), + IXGBEVF_STAT("tx_restart_queue", restart_queue), + IXGBEVF_STAT("tx_timeout_count", tx_timeout_count), + IXGBEVF_NETDEV_STAT(multicast), + IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error), }; -#define IXGBE_QUEUE_STATS_LEN 0 -#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) +#define IXGBEVF_QUEUE_STATS_LEN ( \ + (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \ + ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \ + (sizeof(struct ixgbe_stats) / sizeof(u64))) +#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats) -#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) +#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN) static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Link test (on/offline)" }; -#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) +#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) static int ixgbevf_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) @@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, memset(p, 0, regs_len); - regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id; + /* generate a number suitable for ethtool's register version */ + regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id; /* General Registers */ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL); @@ -392,13 +382,13 @@ clear_reset: return err; } -static int ixgbevf_get_sset_count(struct net_device *dev, int stringset) +static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset) { switch (stringset) { case ETH_SS_TEST: - return IXGBE_TEST_LEN; + return IXGBEVF_TEST_LEN; case ETH_SS_STATS: - return IXGBE_GLOBAL_STATS_LEN; + return IXGBEVF_STATS_LEN; default: return -EINVAL; } @@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - char *base = (char *)adapter; - int i; -#ifdef BP_EXTENDED_STATS - u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, - tx_yields = 0, tx_cleaned = 0, tx_missed = 0; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *net_stats; + unsigned int start; + struct ixgbevf_ring *ring; + int i, j; + char *p; - for (i = 0; i < adapter->num_rx_queues; i++) { - rx_yields += adapter->rx_ring[i]->stats.yields; - rx_cleaned += adapter->rx_ring[i]->stats.cleaned; - rx_yields += adapter->rx_ring[i]->stats.yields; - } + ixgbevf_update_stats(adapter); + net_stats = dev_get_stats(netdev, &temp); + for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { + switch (ixgbevf_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *)net_stats + + ixgbevf_gstrings_stats[i].stat_offset; + break; + case IXGBEVF_STATS: + p = (char *)adapter + + ixgbevf_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } - for (i = 0; i < adapter->num_tx_queues; i++) { - tx_yields += adapter->tx_ring[i]->stats.yields; - tx_cleaned += adapter->tx_ring[i]->stats.cleaned; - tx_yields += adapter->tx_ring[i]->stats.yields; + data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } - adapter->bp_rx_yields = rx_yields; - adapter->bp_rx_cleaned = rx_cleaned; - adapter->bp_rx_missed = rx_missed; + /* populate Tx queue data */ + for (j = 0; j < adapter->num_tx_queues; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; + } - adapter->bp_tx_yields = tx_yields; - adapter->bp_tx_cleaned = tx_cleaned; - adapter->bp_tx_missed = tx_missed; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i + 1] = ring->stats.misses; + data[i + 2] = ring->stats.cleaned; + i += 3; #endif + } - ixgbevf_update_stats(adapter); - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - char *p = base + ixgbe_gstrings_stats[i].stat_offset; - char *b = base + ixgbe_gstrings_stats[i].base_stat_offset; - char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset; - - if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) { - if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) - data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r; - else - data[i] = *(u64 *)p; - } else { - if (ixgbe_gstrings_stats[i].base_stat_offset >= 0) - data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r; - else - data[i] = *(u32 *)p; + /* populate Rx queue data */ + for (j = 0; j < adapter->num_rx_queues; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; +#ifdef BP_EXTENDED_STATS + data[i++] = 0; + data[i++] = 0; + data[i++] = 0; +#endif + continue; } + + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + i += 2; +#ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; + data[i + 1] = ring->stats.misses; + data[i + 2] = ring->stats.cleaned; + i += 3; +#endif } } static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct ixgbevf_adapter *adapter = netdev_priv(netdev); char *p = (char *)data; int i; switch (stringset) { case ETH_SS_TEST: memcpy(data, *ixgbe_gstrings_test, - IXGBE_TEST_LEN * ETH_GSTRING_LEN); + IXGBEVF_TEST_LEN * ETH_GSTRING_LEN); break; case ETH_SS_STATS: - for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { - memcpy(p, ixgbe_gstrings_stats[i].stat_string, + for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbevf_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + for (i = 0; i < adapter->num_tx_queues; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "tx_queue_%u_bp_napi_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } + for (i = 0; i < adapter->num_rx_queues; i++) { + sprintf(p, "rx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bytes", i); + p += ETH_GSTRING_LEN; +#ifdef BP_EXTENDED_STATS + sprintf(p, "rx_queue_%u_bp_poll_yield", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_misses", i); + p += ETH_GSTRING_LEN; + sprintf(p, "rx_queue_%u_bp_cleaned", i); + p += ETH_GSTRING_LEN; +#endif /* BP_EXTENDED_STATS */ + } break; } } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 991eeae81473..d5944c391cbb 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -166,10 +166,10 @@ struct ixgbevf_ring { #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) -#define IXGBE_TX_FLAGS_CSUM (u32)(1) -#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) -#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) -#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) +#define IXGBE_TX_FLAGS_CSUM BIT(0) +#define IXGBE_TX_FLAGS_VLAN BIT(1) +#define IXGBE_TX_FLAGS_TSO BIT(2) +#define IXGBE_TX_FLAGS_IPV4 BIT(3) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 @@ -403,13 +403,6 @@ struct ixgbevf_adapter { u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; - /* Some features need tri-state capability, - * thus the additional *_CAPABLE flags. - */ - u32 flags; -#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1) -#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) - struct msix_entry *msix_entries; /* OS defined structs */ @@ -429,16 +422,6 @@ struct ixgbevf_adapter { unsigned int tx_ring_count; unsigned int rx_ring_count; -#ifdef BP_EXTENDED_STATS - u64 bp_rx_yields; - u64 bp_rx_cleaned; - u64 bp_rx_missed; - - u64 bp_tx_yields; - u64 bp_tx_cleaned; - u64 bp_tx_missed; -#endif - u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 link_speed; bool link_up; @@ -461,13 +444,19 @@ enum ixbgevf_state_t { __IXGBEVF_REMOVING, __IXGBEVF_SERVICE_SCHED, __IXGBEVF_SERVICE_INITED, + __IXGBEVF_RESET_REQUESTED, + __IXGBEVF_QUEUE_RESET_REQUESTED, }; enum ixgbevf_boards { board_82599_vf, + board_82599_vf_hv, board_X540_vf, + board_X540_vf_hv, board_X550_vf, + board_X550_vf_hv, board_X550EM_x_vf, + board_X550EM_x_vf_hv, }; enum ixgbevf_xcast_modes { @@ -482,6 +471,12 @@ extern const struct ixgbevf_info ixgbevf_X550_vf_info; extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; +extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info; +extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info; +extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info; +extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info; +extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops; + /* needed by ethtool.c */ extern const char ixgbevf_driver_name[]; extern const char ixgbevf_driver_version[]; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b0edae94d73d..5e348b125090 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -62,10 +62,14 @@ static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2015 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { - [board_82599_vf] = &ixgbevf_82599_vf_info, - [board_X540_vf] = &ixgbevf_X540_vf_info, - [board_X550_vf] = &ixgbevf_X550_vf_info, - [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, + [board_82599_vf] = &ixgbevf_82599_vf_info, + [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info, + [board_X540_vf] = &ixgbevf_X540_vf_info, + [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info, + [board_X550_vf] = &ixgbevf_X550_vf_info, + [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, + [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { */ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, /* required last entry */ {0, } }; @@ -268,7 +276,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) { /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { - adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); ixgbevf_service_event_schedule(adapter); } } @@ -288,9 +296,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev) * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: board private structure * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *tx_ring) + struct ixgbevf_ring *tx_ring, int napi_budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; @@ -328,7 +337,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1013,8 +1022,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) int per_ring_budget, work_done = 0; bool clean_complete = true; - ixgbevf_for_each_ring(ring, q_vector->tx) - clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + ixgbevf_for_each_ring(ring, q_vector->tx) { + if (!ixgbevf_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } if (budget <= 0) return budget; @@ -1035,7 +1046,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; - clean_complete &= (cleaned < per_ring_budget); + if (cleaned >= per_ring_budget) + clean_complete = false; } #ifdef CONFIG_NET_RX_BUSY_POLL @@ -1052,7 +1064,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state)) ixgbevf_irq_enable_queues(adapter, - 1 << q_vector->v_idx); + BIT(q_vector->v_idx)); return 0; } @@ -1154,14 +1166,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) } /* add q_vector eims value to global eims_enable_mask */ - adapter->eims_enable_mask |= 1 << v_idx; + adapter->eims_enable_mask |= BIT(v_idx); ixgbevf_write_eitr(q_vector); } ixgbevf_set_ivar(adapter, -1, 1, v_idx); /* setup eims_other and add value to global eims_enable_mask */ - adapter->eims_other = 1 << v_idx; + adapter->eims_other = BIT(v_idx); adapter->eims_enable_mask |= adapter->eims_other; } @@ -1585,8 +1597,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, txdctl |= (8 << 16); /* WTHRESH = 8 */ /* Setting PTHRESH to 32 both improves performance */ - txdctl |= (1 << 8) | /* HTHRESH = 1 */ - 32; /* PTHRESH = 32 */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); @@ -1642,7 +1654,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) IXGBE_PSRTYPE_L2HDR; if (adapter->num_rx_queues > 1) - psrtype |= 1 << 29; + psrtype |= BIT(29); IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); } @@ -1748,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); +#ifndef CONFIG_SPARC /* enable relaxed ordering */ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), IXGBE_DCA_RXCTRL_DESC_RRO_EN); +#else + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), + IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_WRO_EN); +#endif /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); @@ -1791,7 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ixgbevf_setup_vfmrqc(adapter); /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring @@ -1904,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) spin_lock_bh(&adapter->mbx_lock); - hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode); + hw->mac.ops.update_xcast_mode(hw, xcast_mode); /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); @@ -1984,7 +2002,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) hw->mbx.timeout = 0; /* wait for watchdog to come around and bail us out */ - adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; + set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); } return 0; @@ -2052,7 +2070,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { - err = ixgbevf_negotiate_api_version(hw, api[idx]); + err = hw->mac.ops.negotiate_api_version(hw, api[idx]); if (!err) break; idx++; @@ -2749,11 +2767,9 @@ static void ixgbevf_service_timer(unsigned long data) static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) { - if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) return; - adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED; - /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -2795,7 +2811,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= 1 << i; + eics |= BIT(i); } /* Cause software interrupt to ensure rings are cleaned */ @@ -2821,7 +2837,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) /* if check for link returns error we will need to reset */ if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { - adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); link_up = false; } @@ -3222,11 +3238,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) { struct net_device *dev = adapter->netdev; - if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, + &adapter->state)) return; - adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; - /* if interface is down do nothing */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -3271,9 +3286,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3286,49 +3310,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; - /* update GSO size and bytecount with header size */ + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + + /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 1 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, @@ -3337,76 +3365,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, return 1; } +static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 l4_hdr = 0; - __be16 frag_off; - - switch (first->protocol) { - case htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; - if (likely(skb_network_header_len(skb) == - sizeof(struct ipv6hdr))) - break; - ipv6_skip_exthdr(skb, skb_network_offset(skb) + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - l4_hdr = NEXTHDR_FRAGMENT; - break; - default: - break; - } + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto no_csum; - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + ixgbevf_ipv6_csum_is_sctp(skb))) { + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum, l3 proto=%x, l4 proto=%x\n", - first->protocol, l4_hdr); - } - skb_checksum_help(skb); - goto no_csum; } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + /* fall through */ + default: + skb_checksum_help(skb); + goto no_csum; } - + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@ -3442,7 +3449,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, /* use index 1 context for TSO/FSO/FCOE */ if (tx_flags & IXGBE_TX_FLAGS_TSO) - olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); /* Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running @@ -3747,7 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, max_frame); + hw->mac.ops.set_rlpml(hw, max_frame); return 0; } @@ -3890,6 +3897,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } +#define IXGBEVF_MAX_MAC_HDR_LEN 127 +#define IXGBEVF_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, @@ -3908,7 +3949,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif - .ndo_features_check = passthru_features_check, + .ndo_features_check = ixgbevf_features_check, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) @@ -4013,26 +4054,37 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; +#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) - netdev->vlan_features |= NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_SG; + netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; + netdev->hw_features |= NETIF_F_GSO_PARTIAL | + IXGBEVF_GSO_PARTIAL_FEATURES; + + netdev->features = netdev->hw_features; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->priv_flags |= IFF_UNICAST_FLT; if (IXGBE_REMOVED(hw->hw_addr)) { diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index dc68fea4894b..61a80da8b6f0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = { .check_for_rst = ixgbevf_check_for_rst_vf, }; +/* Mailbox operations when running on Hyper-V. + * On Hyper-V, PF/VF communication is not through the + * hardware mailbox; this communication is through + * a software mediated path. + * Most mail box operations are noop while running on + * Hyper-V. + */ +const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = { + .init_params = ixgbevf_init_mbx_params_vf, + .check_for_rst = ixgbevf_check_for_rst_vf, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index 4d613a4f2a7f..e670d3b19c3c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -27,6 +27,12 @@ #include "vf.h" #include "ixgbevf.h" +/* On Hyper-V, to reset, we need to read from this offset + * from the PCI config space. This is the mechanism used on + * Hyper-V to support PF/VF communication. + */ +#define IXGBE_HV_RESET_OFFSET 0x201 + /** * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure @@ -126,6 +132,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) } /** + * Hyper-V variant; the VF/PF communication is through the PCI + * config space. + */ +static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw) +{ +#if IS_ENABLED(CONFIG_PCI_MMCONFIG) + struct ixgbevf_adapter *adapter = hw->back; + int i; + + for (i = 0; i < 6; i++) + pci_read_config_byte(adapter->pdev, + (i + IXGBE_HV_RESET_OFFSET), + &hw->mac.perm_addr[i]); + return 0; +#else + pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n"); + return -EOPNOTSUPP; +#endif +} + +/** * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units * @hw: pointer to hardware structure * @@ -258,6 +285,11 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) return ret_val; } +static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + return -EOPNOTSUPP; +} + /** * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents. * @adapter: pointer to the port handle @@ -416,6 +448,26 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, return ret_val; } +/** + * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: Unused in this implementation + * + * We don't really allow setting the device MAC address. However, + * if the address being set is the permanent MAC address we will + * permit that. + **/ +static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, + u32 vmdq) +{ + if (ether_addr_equal(addr, hw->mac.perm_addr)) + return 0; + + return -EOPNOTSUPP; +} + static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg, u16 size) { @@ -473,15 +525,22 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, } /** + * Hyper-V variant - just a stub. + */ +static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, + struct net_device *netdev) +{ + return -EOPNOTSUPP; +} + +/** * ixgbevf_update_xcast_mode - Update Multicast mode * @hw: pointer to the HW structure - * @netdev: pointer to net device structure * @xcast_mode: new multicast mode * * Updates the Multicast Mode of VF. **/ -static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, - struct net_device *netdev, int xcast_mode) +static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; @@ -513,6 +572,14 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, } /** + * Hyper-V variant - just a stub. + */ +static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) +{ + return -EOPNOTSUPP; +} + +/** * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address * @hw: pointer to the HW structure * @vlan: 12 bit VLAN ID @@ -551,6 +618,15 @@ mbx_err: } /** + * Hyper-V variant - just a stub. + */ +static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + return -EOPNOTSUPP; +} + +/** * ixgbevf_setup_mac_link_vf - Setup MAC link settings * @hw: pointer to hardware structure * @speed: Unused in this implementation @@ -656,11 +732,72 @@ out: } /** - * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * Hyper-V variant; there is no mailbox communication. + */ +static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + u32 links_reg; + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + + /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs + * before the link status is correct + */ + if (mac->type == ixgbe_mac_82599_vf) { + int i; + + for (i = 0; i < 5; i++) { + udelay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; + } + } + + switch (links_reg & IXGBE_LINKS_SPEED_82599) { + case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; + break; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link + */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return 0; +} + +/** + * ixgbevf_set_rlpml_vf - Set the maximum receive packet length * @hw: pointer to the HW structure * @max_size: value to assign to max frame size **/ -void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) { u32 msgbuf[2]; @@ -670,11 +807,30 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) } /** - * ixgbevf_negotiate_api_version - Negotiate supported API version + * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + * Hyper-V variant. + **/ +static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 reg; + + /* If we are on Hyper-V, we implement this functionality + * differently. + */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); + /* CRC == 4 */ + reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); +} + +/** + * ixgbevf_negotiate_api_version_vf - Negotiate supported API version * @hw: pointer to the HW structure * @api: integer containing requested API version **/ -int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) { int err; u32 msg[3]; @@ -703,6 +859,21 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) return err; } +/** + * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + * Hyper-V version - only ixgbe_mbox_api_10 supported. + **/ +static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) +{ + /* Hyper-V only supports api version ixgbe_mbox_api_10 */ + if (api != ixgbe_mbox_api_10) + return IXGBE_ERR_INVALID_ARGUMENT; + + return 0; +} + int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc) { @@ -769,11 +940,30 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = { .stop_adapter = ixgbevf_stop_hw_vf, .setup_link = ixgbevf_setup_mac_link_vf, .check_link = ixgbevf_check_mac_link_vf, + .negotiate_api_version = ixgbevf_negotiate_api_version_vf, .set_rar = ixgbevf_set_rar_vf, .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf, .update_xcast_mode = ixgbevf_update_xcast_mode, .set_uc_addr = ixgbevf_set_uc_addr_vf, .set_vfta = ixgbevf_set_vfta_vf, + .set_rlpml = ixgbevf_set_rlpml_vf, +}; + +static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = { + .init_hw = ixgbevf_init_hw_vf, + .reset_hw = ixgbevf_hv_reset_hw_vf, + .start_hw = ixgbevf_start_hw_vf, + .get_mac_addr = ixgbevf_get_mac_addr_vf, + .stop_adapter = ixgbevf_stop_hw_vf, + .setup_link = ixgbevf_setup_mac_link_vf, + .check_link = ixgbevf_hv_check_mac_link_vf, + .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf, + .set_rar = ixgbevf_hv_set_rar_vf, + .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf, + .update_xcast_mode = ixgbevf_hv_update_xcast_mode, + .set_uc_addr = ixgbevf_hv_set_uc_addr_vf, + .set_vfta = ixgbevf_hv_set_vfta_vf, + .set_rlpml = ixgbevf_hv_set_rlpml_vf, }; const struct ixgbevf_info ixgbevf_82599_vf_info = { @@ -781,17 +971,37 @@ const struct ixgbevf_info ixgbevf_82599_vf_info = { .mac_ops = &ixgbevf_mac_ops, }; +const struct ixgbevf_info ixgbevf_82599_vf_hv_info = { + .mac = ixgbe_mac_82599_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; + const struct ixgbevf_info ixgbevf_X540_vf_info = { .mac = ixgbe_mac_X540_vf, .mac_ops = &ixgbevf_mac_ops, }; +const struct ixgbevf_info ixgbevf_X540_vf_hv_info = { + .mac = ixgbe_mac_X540_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; + const struct ixgbevf_info ixgbevf_X550_vf_info = { .mac = ixgbe_mac_X550_vf, .mac_ops = &ixgbevf_mac_ops, }; +const struct ixgbevf_info ixgbevf_X550_vf_hv_info = { + .mac = ixgbe_mac_X550_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; + const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = { .mac = ixgbe_mac_X550EM_x_vf, .mac_ops = &ixgbevf_mac_ops, }; + +const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = { + .mac = ixgbe_mac_X550EM_x_vf, + .mac_ops = &ixgbevf_hv_mac_ops, +}; diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index ef9f7736b4dc..2cac610f32ba 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -51,6 +51,7 @@ struct ixgbe_mac_operations { s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); s32 (*stop_adapter)(struct ixgbe_hw *); s32 (*get_bus_info)(struct ixgbe_hw *); + s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api); /* Link */ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); @@ -63,11 +64,12 @@ struct ixgbe_mac_operations { s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); - s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int); + s32 (*update_xcast_mode)(struct ixgbe_hw *, int); s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + void (*set_rlpml)(struct ixgbe_hw *, u16); }; enum ixgbe_mac_type { @@ -207,8 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) -void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); -int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, unsigned int *default_tc); int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues); diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index 3ddf657bc10b..836ebd8ee768 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -222,7 +222,7 @@ jme_clear_ghc_reset(struct jme_adapter *jme) jwrite32f(jme, JME_GHC, jme->reg_ghc); } -static inline void +static void jme_reset_mac_processor(struct jme_adapter *jme) { static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index d74f5f4e5782..1799fe1415df 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -152,7 +152,7 @@ static inline void korina_abort_dma(struct net_device *dev, writel(0x10, &ch->dmac); while (!(readl(&ch->dmas) & DMA_STAT_HALT)) - dev->trans_start = jiffies; + netif_trans_update(dev); writel(0, &ch->dmas); } @@ -283,7 +283,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev) } dma_cache_wback((u32) td, sizeof(*td)); - dev->trans_start = jiffies; + netif_trans_update(dev); spin_unlock_irqrestore(&lp->lock, flags); return NETDEV_TX_OK; @@ -622,7 +622,7 @@ korina_tx_dma_interrupt(int irq, void *dev_id) &(lp->tx_dma_regs->dmandptr)); lp->tx_chain_status = desc_empty; lp->tx_chain_head = lp->tx_chain_tail; - dev->trans_start = jiffies; + netif_trans_update(dev); } if (dmas & DMA_STAT_ERR) printk(KERN_ERR "%s: DMA error\n", dev->name); @@ -811,7 +811,7 @@ static int korina_init(struct net_device *dev) /* reset ethernet logic */ writel(0, &lp->eth_regs->ethintfc); while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP)) - dev->trans_start = jiffies; + netif_trans_update(dev); /* Enable Ethernet Interface */ writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index b630ef1e9646..dc82b1b19574 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -519,7 +519,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) byte_offset = CPHYSADDR(skb->data) % 16; ch->skb[ch->dma.desc] = skb; - dev->trans_start = jiffies; + netif_trans_update(dev); spin_lock_irqsave(&priv->lock, flags); desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, @@ -657,7 +657,7 @@ ltq_etop_tx_timeout(struct net_device *dev) err = ltq_etop_hw_init(dev); if (err) goto err_hw; - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); return; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index b5c6d42daa12..2664827ddecd 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -68,7 +68,7 @@ config MVNETA config MVNETA_BM tristate - default y if MVNETA=y && MVNETA_BM_ENABLE + default y if MVNETA=y && MVNETA_BM_ENABLE!=n default MVNETA_BM_ENABLE select HWBM help diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 7fc490225da5..a6d26d351dfc 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, /* Enable per-CPU interrupts on the CPU that is * brought up. */ - smp_call_function_single(cpu, mvneta_percpu_enable, - pp, true); + mvneta_percpu_enable(pp); /* Enable per-CPU interrupt on the one CPU we care * about. @@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, /* Disable per-CPU interrupts on the CPU that is * brought down. */ - smp_call_function_single(cpu, mvneta_percpu_disable, - pp, true); + mvneta_percpu_disable(pp); break; case CPU_DEAD: diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 7ace07dad6a3..54d5154ac0f8 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -286,12 +286,12 @@ static int pxa168_eth_stop(struct net_device *dev); static inline u32 rdl(struct pxa168_eth_private *pep, int offset) { - return readl(pep->base + offset); + return readl_relaxed(pep->base + offset); } static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data) { - writel(data, pep->base + offset); + writel_relaxed(data, pep->base + offset); } static void abort_dma(struct pxa168_eth_private *pep) @@ -342,9 +342,9 @@ static void rxq_refill(struct net_device *dev) pep->rx_skb[used_rx_desc] = skb; /* Return the descriptor to DMA ownership */ - wmb(); + dma_wmb(); p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT; - wmb(); + dma_wmb(); /* Move the used descriptor pointer to the next descriptor */ pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size; @@ -794,7 +794,7 @@ static int rxq_process(struct net_device *dev, int budget) rx_used_desc = pep->rx_used_desc_q; rx_desc = &pep->p_rx_desc_area[rx_curr_desc]; cmd_sts = rx_desc->cmd_sts; - rmb(); + dma_rmb(); if (cmd_sts & (BUF_OWNED_BY_DMA)) break; skb = pep->rx_skb[rx_curr_desc]; @@ -979,8 +979,8 @@ static int pxa168_init_phy(struct net_device *dev) return 0; pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr); - if (!pep->phy) - return -ENODEV; + if (IS_ERR(pep->phy)) + return PTR_ERR(pep->phy); err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link, pep->phy_intf); @@ -1287,7 +1287,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); - wmb(); + dma_wmb(); desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC | TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT; wmb(); @@ -1295,7 +1295,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) stats->tx_bytes += length; stats->tx_packets++; - dev->trans_start = jiffies; + netif_trans_update(dev); if (pep->tx_ring_size - pep->tx_desc_count <= 1) { /* We handled the current skb, but now we are out of space.*/ netif_stop_queue(dev); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index ec0a22119e09..467138b423d3 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2418,7 +2418,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) sky2_write32(hw, B0_IMSK, 0); sky2_read32(hw, B0_IMSK); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ napi_disable(&hw->napi); netif_tx_disable(dev); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index e0b68afea56e..c984462fad2a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -536,7 +536,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, struct mtk_eth *eth = mac->hw; struct mtk_tx_dma *itxd, *txd; struct mtk_tx_buf *tx_buf; - unsigned long flags; dma_addr_t mapped_addr; unsigned int nr_frags; int i, n_desc = 1; @@ -568,11 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) return -ENOMEM; - /* normally we can rely on the stack not calling this more than once, - * however we have 2 queues running ont he same ring so we need to lock - * the ring access - */ - spin_lock_irqsave(ð->page_lock, flags); WRITE_ONCE(itxd->txd1, mapped_addr); tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); @@ -609,8 +603,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(txd->txd1, mapped_addr); WRITE_ONCE(txd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(frag_map_size) | - last_frag * TX_DMA_LS0) | - mac->id); + last_frag * TX_DMA_LS0)); WRITE_ONCE(txd->txd4, 0); tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; @@ -632,8 +625,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | (!nr_frags * TX_DMA_LS0))); - spin_unlock_irqrestore(ð->page_lock, flags); - netdev_sent_queue(dev, skb->len); skb_tx_timestamp(skb); @@ -661,8 +652,6 @@ err_dma: itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); } while (itxd != txd); - spin_unlock_irqrestore(ð->page_lock, flags); - return -ENOMEM; } @@ -681,7 +670,29 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb) nfrags += skb_shinfo(skb)->nr_frags; } - return DIV_ROUND_UP(nfrags, 2); + return nfrags; +} + +static void mtk_wake_queue(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + netif_wake_queue(eth->netdev[i]); + } +} + +static void mtk_stop_queue(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + netif_stop_queue(eth->netdev[i]); + } } static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -690,14 +701,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) struct mtk_eth *eth = mac->hw; struct mtk_tx_ring *ring = ð->tx_ring; struct net_device_stats *stats = &dev->stats; + unsigned long flags; bool gso = false; int tx_num; + /* normally we can rely on the stack not calling this more than once, + * however we have 2 queues running on the same ring so we need to lock + * the ring access + */ + spin_lock_irqsave(ð->page_lock, flags); + tx_num = mtk_cal_txd_req(skb); if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { - netif_stop_queue(dev); + mtk_stop_queue(eth); netif_err(eth, tx_queued, dev, "Tx Ring full when queue awake!\n"); + spin_unlock_irqrestore(ð->page_lock, flags); return NETDEV_TX_BUSY; } @@ -720,15 +739,17 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { - netif_stop_queue(dev); + mtk_stop_queue(eth); if (unlikely(atomic_read(&ring->free_count) > ring->thresh)) - netif_wake_queue(dev); + mtk_wake_queue(eth); } + spin_unlock_irqrestore(ð->page_lock, flags); return NETDEV_TX_OK; drop: + spin_unlock_irqrestore(ð->page_lock, flags); stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; @@ -897,13 +918,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) if (!total) return 0; - for (i = 0; i < MTK_MAC_COUNT; i++) { - if (!eth->netdev[i] || - unlikely(!netif_queue_stopped(eth->netdev[i]))) - continue; - if (atomic_read(&ring->free_count) > ring->thresh) - netif_wake_queue(eth->netdev[i]); - } + if (atomic_read(&ring->free_count) > ring->thresh) + mtk_wake_queue(eth); return total; } @@ -1176,7 +1192,7 @@ static void mtk_tx_timeout(struct net_device *dev) eth->netdev[mac->id]->stats.tx_errors++; netif_err(eth, tx_err, dev, "transmit timed out\n"); - schedule_work(&mac->pending_work); + schedule_work(ð->pending_work); } static irqreturn_t mtk_handle_irq(int irq, void *_eth) @@ -1413,19 +1429,30 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void mtk_pending_work(struct work_struct *work) { - struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work); - struct mtk_eth *eth = mac->hw; - struct net_device *dev = eth->netdev[mac->id]; - int err; + struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); + int err, i; + unsigned long restart = 0; rtnl_lock(); - mtk_stop(dev); - err = mtk_open(dev); - if (err) { - netif_alert(eth, ifup, dev, - "Driver up/down cycle failed, closing device.\n"); - dev_close(dev); + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + mtk_stop(eth->netdev[i]); + __set_bit(i, &restart); + } + + /* restart DMA and enable IRQs */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!test_bit(i, &restart)) + continue; + err = mtk_open(eth->netdev[i]); + if (err) { + netif_alert(eth, ifup, eth->netdev[i], + "Driver up/down cycle failed, closing device.\n"); + dev_close(eth->netdev[i]); + } } rtnl_unlock(); } @@ -1435,15 +1462,13 @@ static int mtk_cleanup(struct mtk_eth *eth) int i; for (i = 0; i < MTK_MAC_COUNT; i++) { - struct mtk_mac *mac = netdev_priv(eth->netdev[i]); - if (!eth->netdev[i]) continue; unregister_netdev(eth->netdev[i]); free_netdev(eth->netdev[i]); - cancel_work_sync(&mac->pending_work); } + cancel_work_sync(ð->pending_work); return 0; } @@ -1631,7 +1656,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->id = id; mac->hw = eth; mac->of_node = np; - INIT_WORK(&mac->pending_work, mtk_pending_work); mac->hw_stats = devm_kzalloc(eth->dev, sizeof(*mac->hw_stats), @@ -1645,6 +1669,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; SET_NETDEV_DEV(eth->netdev[id], eth->dev); + eth->netdev[id]->watchdog_timeo = HZ; eth->netdev[id]->netdev_ops = &mtk_netdev_ops; eth->netdev[id]->base_addr = (unsigned long)eth->base; eth->netdev[id]->vlan_features = MTK_HW_FEATURES & @@ -1678,10 +1703,6 @@ static int mtk_probe(struct platform_device *pdev) struct mtk_eth *eth; int err; - err = device_reset(&pdev->dev); - if (err) - return err; - match = of_match_device(of_mtk_match, &pdev->dev); soc = (struct mtk_soc_data *)match->data; @@ -1736,6 +1757,7 @@ static int mtk_probe(struct platform_device *pdev) eth->dev = &pdev->dev; eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); + INIT_WORK(ð->pending_work, mtk_pending_work); err = mtk_hw_init(eth); if (err) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 48a5292c8ed8..eed626d56ea4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -363,6 +363,7 @@ struct mtk_rx_ring { * @clk_gp1: The gmac1 clock * @clk_gp2: The gmac2 clock * @mii_bus: If there is a bus we need to create an instance for it + * @pending_work: The workqueue used to reset the dma ring */ struct mtk_eth { @@ -389,6 +390,7 @@ struct mtk_eth { struct clk *clk_gp1; struct clk *clk_gp2; struct mii_bus *mii_bus; + struct work_struct pending_work; }; /* struct mtk_mac - the structure that holds the info about the MACs of the @@ -398,7 +400,6 @@ struct mtk_eth { * @hw: Backpointer to our main datastruture * @hw_stats: Packet statistics counter * @phy_dev: The attached PHY if available - * @pending_work: The workqueue used to reset the dma ring */ struct mtk_mac { int id; @@ -406,7 +407,6 @@ struct mtk_mac { struct mtk_eth *hw; struct mtk_hw_stats *hw_stats; struct phy_device *phy_dev; - struct work_struct pending_work; }; /* the struct describing the SoC. these are declared in the soc_xyz.c files */ diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 0c51c69f802f..249a4584401a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -576,41 +576,48 @@ out: return res; } -/* - * Handling for queue buffers -- we allocate a bunch of memory and - * register it in a memory region at HCA virtual address 0. If the - * requested size is > max_direct, we split the allocation into - * multiple pages, so we don't require too much contiguous memory. - */ -int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, - struct mlx4_buf *buf, gfp_t gfp) +static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, + struct mlx4_buf *buf, gfp_t gfp) { dma_addr_t t; - if (size <= max_direct) { - buf->nbufs = 1; - buf->npages = 1; - buf->page_shift = get_order(size) + PAGE_SHIFT; - buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev, - size, &t, gfp); - if (!buf->direct.buf) - return -ENOMEM; + buf->nbufs = 1; + buf->npages = 1; + buf->page_shift = get_order(size) + PAGE_SHIFT; + buf->direct.buf = + dma_zalloc_coherent(&dev->persist->pdev->dev, + size, &t, gfp); + if (!buf->direct.buf) + return -ENOMEM; - buf->direct.map = t; + buf->direct.map = t; - while (t & ((1 << buf->page_shift) - 1)) { - --buf->page_shift; - buf->npages *= 2; - } + while (t & ((1 << buf->page_shift) - 1)) { + --buf->page_shift; + buf->npages *= 2; + } - memset(buf->direct.buf, 0, size); + return 0; +} + +/* Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ +int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, + struct mlx4_buf *buf, gfp_t gfp) +{ + if (size <= max_direct) { + return mlx4_buf_direct_alloc(dev, size, buf, gfp); } else { + dma_addr_t t; int i; - buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; - buf->npages = buf->nbufs; + buf->direct.buf = NULL; + buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->npages = buf->nbufs; buf->page_shift = PAGE_SHIFT; buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), gfp); @@ -619,28 +626,12 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, for (i = 0; i < buf->nbufs; ++i) { buf->page_list[i].buf = - dma_alloc_coherent(&dev->persist->pdev->dev, - PAGE_SIZE, - &t, gfp); + dma_zalloc_coherent(&dev->persist->pdev->dev, + PAGE_SIZE, &t, gfp); if (!buf->page_list[i].buf) goto err_free; buf->page_list[i].map = t; - - memset(buf->page_list[i].buf, 0, PAGE_SIZE); - } - - if (BITS_PER_LONG == 64) { - struct page **pages; - pages = kmalloc(sizeof *pages * buf->nbufs, gfp); - if (!pages) - goto err_free; - for (i = 0; i < buf->nbufs; ++i) - pages[i] = virt_to_page(buf->page_list[i].buf); - buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - goto err_free; } } @@ -655,15 +646,11 @@ EXPORT_SYMBOL_GPL(mlx4_buf_alloc); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) { - int i; - - if (buf->nbufs == 1) + if (buf->nbufs == 1) { dma_free_coherent(&dev->persist->pdev->dev, size, - buf->direct.buf, - buf->direct.map); - else { - if (BITS_PER_LONG == 64) - vunmap(buf->direct.buf); + buf->direct.buf, buf->direct.map); + } else { + int i; for (i = 0; i < buf->nbufs; ++i) if (buf->page_list[i].buf) @@ -789,7 +776,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) EXPORT_SYMBOL_GPL(mlx4_db_free); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, - int size, int max_direct) + int size) { int err; @@ -799,7 +786,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, *wqres->db.db = 0; - err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL); + err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL); if (err) goto err_db; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index af975a2b74c6..132cea655920 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -73,22 +73,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, */ set_dev_node(&mdev->dev->persist->pdev->dev, node); err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, - cq->buf_size, 2 * PAGE_SIZE); + cq->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) goto err_cq; - err = mlx4_en_map_buffer(&cq->wqres.buf); - if (err) - goto err_res; - cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf; *pcq = cq; return 0; -err_res: - mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); err_cq: kfree(cq); *pcq = NULL; @@ -177,7 +171,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq = *pcq; - mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) && cq->is_tx == RX) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index f69584a9b47f..c761194bb323 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -337,7 +337,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return bitmap_iterator_count(&it) + (priv->tx_ring_num * 2) + - (priv->rx_ring_num * 2); + (priv->rx_ring_num * 3); case ETH_SS_TEST: return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2; @@ -404,6 +404,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, for (i = 0; i < priv->rx_ring_num; i++) { data[index++] = priv->rx_ring[i]->packets; data[index++] = priv->rx_ring[i]->bytes; + data[index++] = priv->rx_ring[i]->dropped; } spin_unlock_bh(&priv->stats_lock); @@ -477,6 +478,8 @@ static void mlx4_en_get_strings(struct net_device *dev, "rx%d_packets", i); sprintf(data + (index++) * ETH_GSTRING_LEN, "rx%d_bytes", i); + sprintf(data + (index++) * ETH_GSTRING_LEN, + "rx%d_dropped", i); } break; case ETH_SS_PRIV_FLAGS: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index b4b258c8ca47..92e0624f4cf0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1856,6 +1856,7 @@ static void mlx4_en_restart(struct work_struct *work) en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); + rtnl_lock(); mutex_lock(&mdev->state_lock); if (priv->port_up) { mlx4_en_stop_port(dev, 1); @@ -1863,6 +1864,7 @@ static void mlx4_en_restart(struct work_struct *work) en_err(priv, "Failed restarting port %d\n", priv->port); } mutex_unlock(&mdev->state_lock); + rtnl_unlock(); } static void mlx4_en_clear_stats(struct net_device *dev) @@ -2355,8 +2357,12 @@ out: } /* set offloads */ - priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; } static void mlx4_en_del_vxlan_offloads(struct work_struct *work) @@ -2365,8 +2371,12 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, vxlan_del_task); /* unset offloads */ - priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL); ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC, 0); @@ -2425,7 +2435,18 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb, netdev_features_t features) { features = vlan_features_check(skb, features); - return vxlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + /* The ConnectX-3 doesn't support outer IPv6 checksums but it does + * support inner IPv6 checksums and segmentation so we need to + * strip that feature if this is an IPv6 encapsulated frame. + */ + if (skb->encapsulation && + (skb->ip_summed == CHECKSUM_PARTIAL) && + (ip_hdr(skb)->version != 4)) + features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + + return features; } #endif @@ -2907,7 +2928,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, /* Allocate page for receive rings */ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, - MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); + MLX4_EN_PAGE_SIZE); if (err) { en_err(priv, "Failed to allocate page for rx qps\n"); goto out; @@ -2990,8 +3011,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { - dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; - dev->features |= NETIF_F_GSO_UDP_TUNNEL; + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + dev->features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } mdev->pndev[port] = dev; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index 3904b5fc0b7c..20b6c2e678b8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c @@ -158,6 +158,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) u64 in_mod = reset << 8 | port; int err; int i, counter_index; + unsigned long sw_rx_dropped = 0; mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) @@ -180,6 +181,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) for (i = 0; i < priv->rx_ring_num; i++) { stats->rx_packets += priv->rx_ring[i]->packets; stats->rx_bytes += priv->rx_ring[i]->bytes; + sw_rx_dropped += priv->rx_ring[i]->dropped; priv->port_stats.rx_chksum_good += priv->rx_ring[i]->csum_ok; priv->port_stats.rx_chksum_none += priv->rx_ring[i]->csum_none; priv->port_stats.rx_chksum_complete += priv->rx_ring[i]->csum_complete; @@ -236,7 +238,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) &mlx4_en_stats->MCAST_prio_1, NUM_PRIORITIES); stats->collisions = 0; - stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); + stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) + + sw_rx_dropped; stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); stats->rx_over_errors = 0; stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 02e925d6f734..a6b0db0e0383 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -107,37 +107,6 @@ int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp, return ret; } -int mlx4_en_map_buffer(struct mlx4_buf *buf) -{ - struct page **pages; - int i; - - if (BITS_PER_LONG == 64 || buf->nbufs == 1) - return 0; - - pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); - if (!pages) - return -ENOMEM; - - for (i = 0; i < buf->nbufs; ++i) - pages[i] = virt_to_page(buf->page_list[i].buf); - - buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - return -ENOMEM; - - return 0; -} - -void mlx4_en_unmap_buffer(struct mlx4_buf *buf) -{ - if (BITS_PER_LONG == 64 || buf->nbufs == 1) - return; - - vunmap(buf->direct.buf); -} - void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event) { return; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 86bcfe510e4e..c1b3a9c8cf3b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -61,7 +61,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv, gfp_t gfp = _gfp; if (order) - gfp |= __GFP_COMP | __GFP_NOWARN; + gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NOMEMALLOC; page = alloc_pages(gfp, order); if (likely(page)) break; @@ -126,7 +126,9 @@ out: dma_unmap_page(priv->ddev, page_alloc[i].dma, page_alloc[i].page_size, PCI_DMA_FROMDEVICE); page = page_alloc[i].page; - set_page_count(page, 1); + /* Revert changes done by mlx4_alloc_pages */ + page_ref_sub(page, page_alloc[i].page_size / + priv->frag_info[i].frag_stride - 1); put_page(page); } } @@ -176,7 +178,9 @@ out: dma_unmap_page(priv->ddev, page_alloc->dma, page_alloc->page_size, PCI_DMA_FROMDEVICE); page = page_alloc->page; - set_page_count(page, 1); + /* Revert changes done by mlx4_alloc_pages */ + page_ref_sub(page, page_alloc->page_size / + priv->frag_info[i].frag_stride - 1); put_page(page); page_alloc->page = NULL; } @@ -390,17 +394,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, /* Allocate HW buffers on provided NUMA node */ set_dev_node(&mdev->dev->persist->pdev->dev, node); - err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, - ring->buf_size, 2 * PAGE_SIZE); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) goto err_info; - err = mlx4_en_map_buffer(&ring->wqres.buf); - if (err) { - en_err(priv, "Failed to map RX buffer\n"); - goto err_hwq; - } ring->buf = ring->wqres.buf.direct.buf; ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter; @@ -408,8 +406,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, *pring = ring; return 0; -err_hwq: - mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_info: vfree(ring->rx_info); ring->rx_info = NULL; @@ -513,7 +509,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_rx_ring *ring = *pring; - mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE); vfree(ring->rx_info); ring->rx_info = NULL; @@ -703,7 +698,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS) return -1; - hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8)); + hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr)); csum_pseudo_hdr = csum_partial(&ipv6h->saddr, sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0); @@ -939,7 +934,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* GRO not possible, complete processing here */ skb = mlx4_en_rx_skb(priv, rx_desc, frags, length); if (!skb) { - priv->stats.rx_dropped++; + ring->dropped++; goto next; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index c0d7b7296236..f6e61570cb2c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -41,6 +41,7 @@ #include <linux/vmalloc.h> #include <linux/tcp.h> #include <linux/ip.h> +#include <linux/ipv6.h> #include <linux/moduleparam.h> #include "mlx4_en.h" @@ -93,20 +94,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, /* Allocate HW buffers on provided NUMA node */ set_dev_node(&mdev->dev->persist->pdev->dev, node); - err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, - 2 * PAGE_SIZE); + err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node); if (err) { en_err(priv, "Failed allocating hwq resources\n"); goto err_bounce; } - err = mlx4_en_map_buffer(&ring->wqres.buf); - if (err) { - en_err(priv, "Failed to map TX buffer\n"); - goto err_hwq_res; - } - ring->buf = ring->wqres.buf.direct.buf; en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n", @@ -117,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, MLX4_RESERVE_ETH_BF_QP); if (err) { en_err(priv, "failed reserving qp for TX ring\n"); - goto err_map; + goto err_hwq_res; } err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); @@ -154,8 +148,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, err_reserve: mlx4_qp_release_range(mdev->dev, ring->qpn, 1); -err_map: - mlx4_en_unmap_buffer(&ring->wqres.buf); err_hwq_res: mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); err_bounce: @@ -182,7 +174,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); - mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); kfree(ring->bounce_buf); ring->bounce_buf = NULL; @@ -405,7 +396,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, u32 packets = 0; u32 bytes = 0; int factor = priv->cqe_factor; - u64 timestamp = 0; int done = 0; int budget = priv->tx_work_limit; u32 last_nr_txbb; @@ -445,9 +435,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, new_index = be16_to_cpu(cqe->wqe_index) & size_mask; do { + u64 timestamp = 0; + txbbs_skipped += last_nr_txbb; ring_index = (ring_index + last_nr_txbb) & size_mask; - if (ring->tx_info[ring_index].ts_requested) + + if (unlikely(ring->tx_info[ring_index].ts_requested)) timestamp = mlx4_en_get_cqe_ts(cqe); /* free next descriptor */ @@ -918,8 +911,18 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) tx_ind, fragptr); if (skb->encapsulation) { - struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb); - if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP) + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + u8 proto; + + ip.hdr = skb_inner_network_header(skb); + proto = (ip.v4->version == 4) ? ip.v4->protocol : + ip.v6->nexthdr; + + if (proto == IPPROTO_TCP || proto == IPPROTO_UDP) op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP); else op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 358f7230da58..12c77a70abdb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3172,6 +3172,34 @@ static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap return 0; } +static int mlx4_pci_enable_device(struct mlx4_dev *dev) +{ + struct pci_dev *pdev = dev->persist->pdev; + int err = 0; + + mutex_lock(&dev->persist->pci_status_mutex); + if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) { + err = pci_enable_device(pdev); + if (!err) + dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED; + } + mutex_unlock(&dev->persist->pci_status_mutex); + + return err; +} + +static void mlx4_pci_disable_device(struct mlx4_dev *dev) +{ + struct pci_dev *pdev = dev->persist->pdev; + + mutex_lock(&dev->persist->pci_status_mutex); + if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) { + pci_disable_device(pdev); + dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED; + } + mutex_unlock(&dev->persist->pci_status_mutex); +} + static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, int total_vfs, int *nvfs, struct mlx4_priv *priv, int reset_flow) @@ -3582,7 +3610,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); - err = pci_enable_device(pdev); + err = mlx4_pci_enable_device(&priv->dev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; @@ -3715,7 +3743,7 @@ err_release_regions: pci_release_regions(pdev); err_disable_pdev: - pci_disable_device(pdev); + mlx4_pci_disable_device(&priv->dev); pci_set_drvdata(pdev, NULL); return err; } @@ -3775,6 +3803,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) priv->pci_dev_data = id->driver_data; mutex_init(&dev->persist->device_state_mutex); mutex_init(&dev->persist->interface_state_mutex); + mutex_init(&dev->persist->pci_status_mutex); ret = devlink_register(devlink, &pdev->dev); if (ret) @@ -3923,7 +3952,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) } pci_release_regions(pdev); - pci_disable_device(pdev); + mlx4_pci_disable_device(dev); devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); @@ -4042,7 +4071,7 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; - pci_disable_device(pdev); + mlx4_pci_disable_device(persist->dev); return PCI_ERS_RESULT_NEED_RESET; } @@ -4050,45 +4079,53 @@ static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) { struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; - struct mlx4_priv *priv = mlx4_priv(dev); - int ret; - int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; - int total_vfs; + int err; mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); - ret = pci_enable_device(pdev); - if (ret) { - mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); + err = mlx4_pci_enable_device(dev); + if (err) { + mlx4_err(dev, "Can not re-enable device, err=%d\n", err); return PCI_ERS_RESULT_DISCONNECT; } pci_set_master(pdev); pci_restore_state(pdev); pci_save_state(pdev); + return PCI_ERS_RESULT_RECOVERED; +} +static void mlx4_pci_resume(struct pci_dev *pdev) +{ + struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); + struct mlx4_dev *dev = persist->dev; + struct mlx4_priv *priv = mlx4_priv(dev); + int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; + int total_vfs; + int err; + + mlx4_err(dev, "%s was called\n", __func__); total_vfs = dev->persist->num_vfs; memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); mutex_lock(&persist->interface_state_mutex); if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { - ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, + err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, priv, 1); - if (ret) { - mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", - __func__, ret); + if (err) { + mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n", + __func__, err); goto end; } - ret = restore_current_port_types(dev, dev->persist-> + err = restore_current_port_types(dev, dev->persist-> curr_port_type, dev->persist-> curr_port_poss_type); - if (ret) - mlx4_err(dev, "could not restore original port types (%d)\n", ret); + if (err) + mlx4_err(dev, "could not restore original port types (%d)\n", err); } end: mutex_unlock(&persist->interface_state_mutex); - return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; } static void mlx4_shutdown(struct pci_dev *pdev) @@ -4105,6 +4142,7 @@ static void mlx4_shutdown(struct pci_dev *pdev) static const struct pci_error_handlers mlx4_err_handler = { .error_detected = mlx4_pci_err_detected, .slot_reset = mlx4_pci_slot_reset, + .resume = mlx4_pci_resume, }; static struct pci_driver mlx4_driver = { diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 6aa73972d478..f2d0920018a5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -1102,7 +1102,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], struct mlx4_cmd_mailbox *mailbox; struct mlx4_mgm *mgm; u32 members_count; - int index, prev; + int index = -1, prev; int link = 0; int i; int err; @@ -1181,7 +1181,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], goto out; out: - if (prot == MLX4_PROT_ETH) { + if (prot == MLX4_PROT_ETH && index != -1) { /* manage the steering entry for promisc mode */ if (new_entry) err = new_steering_entry(dev, port, steer, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index ef9683101ead..c9d7fc5159f2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -586,6 +586,8 @@ struct mlx4_mfunc_master_ctx { struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1]; int init_port_ref[MLX4_MAX_PORTS + 1]; u16 max_mtu[MLX4_MAX_PORTS + 1]; + u8 pptx; + u8 pprx; int disable_mcast_ref[MLX4_MAX_PORTS + 1]; struct mlx4_resource_tracker res_tracker; struct workqueue_struct *comm_wq; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d12ab6a73344..cc84e09f324a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -323,6 +323,7 @@ struct mlx4_en_rx_ring { unsigned long csum_ok; unsigned long csum_none; unsigned long csum_complete; + unsigned long dropped; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; }; @@ -671,8 +672,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context); void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); -int mlx4_en_map_buffer(struct mlx4_buf *buf); -void mlx4_en_unmap_buffer(struct mlx4_buf *buf); int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp, int loopback); void mlx4_en_calc_rx_buf(struct net_device *dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 211c65087997..087b23b320cb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1317,6 +1317,19 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, } gen_context->mtu = cpu_to_be16(master->max_mtu[port]); + /* Slave cannot change Global Pause configuration */ + if (slave != mlx4_master_func_num(dev) && + ((gen_context->pptx != master->pptx) || + (gen_context->pprx != master->pprx))) { + gen_context->pptx = master->pptx; + gen_context->pprx = master->pprx; + mlx4_warn(dev, + "denying Global Pause change for slave:%d\n", + slave); + } else { + master->pptx = gen_context->pptx; + master->pprx = gen_context->pprx; + } break; case MLX4_SET_PORT_GID_TABLE: /* change to MULTIPLE entries: number of guest's gids diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 4fc45ee0c5d1..9ea7b583096a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -2,10 +2,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ - mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o + mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o vxlan.o en_tc.o + en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index eb926e1ee71c..dcd2df6518de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -294,6 +294,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_DESTROY_FLOW_TABLE: case MLX5_CMD_OP_DESTROY_FLOW_GROUP: case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: + case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -395,6 +396,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_FLOW_GROUP: case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: + case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: + case MLX5_CMD_OP_QUERY_FLOW_COUNTER: *status = MLX5_DRIVER_STATUS_ABORTED; *synd = MLX5_DRIVER_SYND; return -EIO; @@ -406,178 +409,142 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, const char *mlx5_command_str(int command) { - switch (command) { - case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: - return "QUERY_HCA_VPORT_CONTEXT"; - - case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: - return "MODIFY_HCA_VPORT_CONTEXT"; - - case MLX5_CMD_OP_QUERY_HCA_CAP: - return "QUERY_HCA_CAP"; - - case MLX5_CMD_OP_SET_HCA_CAP: - return "SET_HCA_CAP"; - - case MLX5_CMD_OP_QUERY_ADAPTER: - return "QUERY_ADAPTER"; - - case MLX5_CMD_OP_INIT_HCA: - return "INIT_HCA"; - - case MLX5_CMD_OP_TEARDOWN_HCA: - return "TEARDOWN_HCA"; - - case MLX5_CMD_OP_ENABLE_HCA: - return "MLX5_CMD_OP_ENABLE_HCA"; - - case MLX5_CMD_OP_DISABLE_HCA: - return "MLX5_CMD_OP_DISABLE_HCA"; - - case MLX5_CMD_OP_QUERY_PAGES: - return "QUERY_PAGES"; - - case MLX5_CMD_OP_MANAGE_PAGES: - return "MANAGE_PAGES"; - - case MLX5_CMD_OP_CREATE_MKEY: - return "CREATE_MKEY"; - - case MLX5_CMD_OP_QUERY_MKEY: - return "QUERY_MKEY"; - - case MLX5_CMD_OP_DESTROY_MKEY: - return "DESTROY_MKEY"; - - case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: - return "QUERY_SPECIAL_CONTEXTS"; - - case MLX5_CMD_OP_CREATE_EQ: - return "CREATE_EQ"; - - case MLX5_CMD_OP_DESTROY_EQ: - return "DESTROY_EQ"; - - case MLX5_CMD_OP_QUERY_EQ: - return "QUERY_EQ"; - - case MLX5_CMD_OP_CREATE_CQ: - return "CREATE_CQ"; - - case MLX5_CMD_OP_DESTROY_CQ: - return "DESTROY_CQ"; - - case MLX5_CMD_OP_QUERY_CQ: - return "QUERY_CQ"; - - case MLX5_CMD_OP_MODIFY_CQ: - return "MODIFY_CQ"; - - case MLX5_CMD_OP_CREATE_QP: - return "CREATE_QP"; - - case MLX5_CMD_OP_DESTROY_QP: - return "DESTROY_QP"; - - case MLX5_CMD_OP_RST2INIT_QP: - return "RST2INIT_QP"; - - case MLX5_CMD_OP_INIT2RTR_QP: - return "INIT2RTR_QP"; - - case MLX5_CMD_OP_RTR2RTS_QP: - return "RTR2RTS_QP"; - - case MLX5_CMD_OP_RTS2RTS_QP: - return "RTS2RTS_QP"; - - case MLX5_CMD_OP_SQERR2RTS_QP: - return "SQERR2RTS_QP"; - - case MLX5_CMD_OP_2ERR_QP: - return "2ERR_QP"; - - case MLX5_CMD_OP_2RST_QP: - return "2RST_QP"; - - case MLX5_CMD_OP_QUERY_QP: - return "QUERY_QP"; - - case MLX5_CMD_OP_MAD_IFC: - return "MAD_IFC"; - - case MLX5_CMD_OP_INIT2INIT_QP: - return "INIT2INIT_QP"; - - case MLX5_CMD_OP_CREATE_PSV: - return "CREATE_PSV"; - - case MLX5_CMD_OP_DESTROY_PSV: - return "DESTROY_PSV"; - - case MLX5_CMD_OP_CREATE_SRQ: - return "CREATE_SRQ"; - - case MLX5_CMD_OP_DESTROY_SRQ: - return "DESTROY_SRQ"; - - case MLX5_CMD_OP_QUERY_SRQ: - return "QUERY_SRQ"; - - case MLX5_CMD_OP_ARM_RQ: - return "ARM_RQ"; - - case MLX5_CMD_OP_CREATE_XRC_SRQ: - return "CREATE_XRC_SRQ"; - - case MLX5_CMD_OP_DESTROY_XRC_SRQ: - return "DESTROY_XRC_SRQ"; - - case MLX5_CMD_OP_QUERY_XRC_SRQ: - return "QUERY_XRC_SRQ"; - - case MLX5_CMD_OP_ARM_XRC_SRQ: - return "ARM_XRC_SRQ"; - - case MLX5_CMD_OP_ALLOC_PD: - return "ALLOC_PD"; - - case MLX5_CMD_OP_DEALLOC_PD: - return "DEALLOC_PD"; - - case MLX5_CMD_OP_ALLOC_UAR: - return "ALLOC_UAR"; - - case MLX5_CMD_OP_DEALLOC_UAR: - return "DEALLOC_UAR"; - - case MLX5_CMD_OP_ATTACH_TO_MCG: - return "ATTACH_TO_MCG"; - - case MLX5_CMD_OP_DETTACH_FROM_MCG: - return "DETTACH_FROM_MCG"; - - case MLX5_CMD_OP_ALLOC_XRCD: - return "ALLOC_XRCD"; - - case MLX5_CMD_OP_DEALLOC_XRCD: - return "DEALLOC_XRCD"; - - case MLX5_CMD_OP_ACCESS_REG: - return "MLX5_CMD_OP_ACCESS_REG"; - - case MLX5_CMD_OP_SET_WOL_ROL: - return "SET_WOL_ROL"; - - case MLX5_CMD_OP_QUERY_WOL_ROL: - return "QUERY_WOL_ROL"; - - case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: - return "ADD_VXLAN_UDP_DPORT"; - - case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: - return "DELETE_VXLAN_UDP_DPORT"; +#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd + switch (command) { + MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); + MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); + MLX5_COMMAND_STR_CASE(INIT_HCA); + MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); + MLX5_COMMAND_STR_CASE(ENABLE_HCA); + MLX5_COMMAND_STR_CASE(DISABLE_HCA); + MLX5_COMMAND_STR_CASE(QUERY_PAGES); + MLX5_COMMAND_STR_CASE(MANAGE_PAGES); + MLX5_COMMAND_STR_CASE(SET_HCA_CAP); + MLX5_COMMAND_STR_CASE(QUERY_ISSI); + MLX5_COMMAND_STR_CASE(SET_ISSI); + MLX5_COMMAND_STR_CASE(CREATE_MKEY); + MLX5_COMMAND_STR_CASE(QUERY_MKEY); + MLX5_COMMAND_STR_CASE(DESTROY_MKEY); + MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); + MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); + MLX5_COMMAND_STR_CASE(CREATE_EQ); + MLX5_COMMAND_STR_CASE(DESTROY_EQ); + MLX5_COMMAND_STR_CASE(QUERY_EQ); + MLX5_COMMAND_STR_CASE(GEN_EQE); + MLX5_COMMAND_STR_CASE(CREATE_CQ); + MLX5_COMMAND_STR_CASE(DESTROY_CQ); + MLX5_COMMAND_STR_CASE(QUERY_CQ); + MLX5_COMMAND_STR_CASE(MODIFY_CQ); + MLX5_COMMAND_STR_CASE(CREATE_QP); + MLX5_COMMAND_STR_CASE(DESTROY_QP); + MLX5_COMMAND_STR_CASE(RST2INIT_QP); + MLX5_COMMAND_STR_CASE(INIT2RTR_QP); + MLX5_COMMAND_STR_CASE(RTR2RTS_QP); + MLX5_COMMAND_STR_CASE(RTS2RTS_QP); + MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); + MLX5_COMMAND_STR_CASE(2ERR_QP); + MLX5_COMMAND_STR_CASE(2RST_QP); + MLX5_COMMAND_STR_CASE(QUERY_QP); + MLX5_COMMAND_STR_CASE(SQD_RTS_QP); + MLX5_COMMAND_STR_CASE(INIT2INIT_QP); + MLX5_COMMAND_STR_CASE(CREATE_PSV); + MLX5_COMMAND_STR_CASE(DESTROY_PSV); + MLX5_COMMAND_STR_CASE(CREATE_SRQ); + MLX5_COMMAND_STR_CASE(DESTROY_SRQ); + MLX5_COMMAND_STR_CASE(QUERY_SRQ); + MLX5_COMMAND_STR_CASE(ARM_RQ); + MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); + MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); + MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); + MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); + MLX5_COMMAND_STR_CASE(CREATE_DCT); + MLX5_COMMAND_STR_CASE(DESTROY_DCT); + MLX5_COMMAND_STR_CASE(DRAIN_DCT); + MLX5_COMMAND_STR_CASE(QUERY_DCT); + MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); + MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); + MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); + MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); + MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); + MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); + MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); + MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); + MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); + MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); + MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); + MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); + MLX5_COMMAND_STR_CASE(ALLOC_PD); + MLX5_COMMAND_STR_CASE(DEALLOC_PD); + MLX5_COMMAND_STR_CASE(ALLOC_UAR); + MLX5_COMMAND_STR_CASE(DEALLOC_UAR); + MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); + MLX5_COMMAND_STR_CASE(ACCESS_REG); + MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); + MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG); + MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); + MLX5_COMMAND_STR_CASE(MAD_IFC); + MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); + MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); + MLX5_COMMAND_STR_CASE(NOP); + MLX5_COMMAND_STR_CASE(ALLOC_XRCD); + MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); + MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); + MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); + MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); + MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); + MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); + MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); + MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); + MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); + MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); + MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(SET_WOL_ROL); + MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); + MLX5_COMMAND_STR_CASE(CREATE_TIR); + MLX5_COMMAND_STR_CASE(MODIFY_TIR); + MLX5_COMMAND_STR_CASE(DESTROY_TIR); + MLX5_COMMAND_STR_CASE(QUERY_TIR); + MLX5_COMMAND_STR_CASE(CREATE_SQ); + MLX5_COMMAND_STR_CASE(MODIFY_SQ); + MLX5_COMMAND_STR_CASE(DESTROY_SQ); + MLX5_COMMAND_STR_CASE(QUERY_SQ); + MLX5_COMMAND_STR_CASE(CREATE_RQ); + MLX5_COMMAND_STR_CASE(MODIFY_RQ); + MLX5_COMMAND_STR_CASE(DESTROY_RQ); + MLX5_COMMAND_STR_CASE(QUERY_RQ); + MLX5_COMMAND_STR_CASE(CREATE_RMP); + MLX5_COMMAND_STR_CASE(MODIFY_RMP); + MLX5_COMMAND_STR_CASE(DESTROY_RMP); + MLX5_COMMAND_STR_CASE(QUERY_RMP); + MLX5_COMMAND_STR_CASE(CREATE_TIS); + MLX5_COMMAND_STR_CASE(MODIFY_TIS); + MLX5_COMMAND_STR_CASE(DESTROY_TIS); + MLX5_COMMAND_STR_CASE(QUERY_TIS); + MLX5_COMMAND_STR_CASE(CREATE_RQT); + MLX5_COMMAND_STR_CASE(MODIFY_RQT); + MLX5_COMMAND_STR_CASE(DESTROY_RQT); + MLX5_COMMAND_STR_CASE(QUERY_RQT); + MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT); + MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); + MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); + MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); + MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); + MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); + MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); + MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); + MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); + MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); + MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 879e6276c473..e8a6c3325b39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -46,6 +46,9 @@ #include <linux/rhashtable.h> #include "wq.h" #include "mlx5_core.h" +#include "en_stats.h" + +#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) #define MLX5E_MAX_NUM_TC 8 @@ -57,12 +60,30 @@ #define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa #define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd +#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1 +#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4 +#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6 + +#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */ +#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */ +#define MLX5_MPWRQ_LOG_WQE_SZ 17 +#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \ + MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0) +#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER) +#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \ + MLX5_MPWRQ_WQE_PAGE_ORDER) +#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \ + BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW)) +#define MLX5_UMR_ALIGN (2048) +#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128) + #define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 +#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 #define MLX5E_LOG_INDIR_RQT_SIZE 0x7 #define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) @@ -73,233 +94,70 @@ #define MLX5E_NUM_MAIN_GROUPS 9 +static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) +{ + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW, + wq_size / 2); + default: + return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES, + wq_size / 2); + } +} + +static inline int mlx5_min_log_rq_size(int wq_type) +{ + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW; + default: + return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; + } +} + +static inline int mlx5_max_log_rq_size(int wq_type) +{ + switch (wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW; + default: + return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; + } +} + +struct mlx5e_tx_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_eth_seg eth; +}; + +struct mlx5e_rx_wqe { + struct mlx5_wqe_srq_next_seg next; + struct mlx5_wqe_data_seg data; +}; + +struct mlx5e_umr_wqe { + struct mlx5_wqe_ctrl_seg ctrl; + struct mlx5_wqe_umr_ctrl_seg uctrl; + struct mlx5_mkey_seg mkc; + struct mlx5_wqe_data_seg data; +}; + #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ #define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ #endif -static const char vport_strings[][ETH_GSTRING_LEN] = { - /* vport statistics */ - "rx_packets", - "rx_bytes", - "tx_packets", - "tx_bytes", - "rx_error_packets", - "rx_error_bytes", - "tx_error_packets", - "tx_error_bytes", - "rx_unicast_packets", - "rx_unicast_bytes", - "tx_unicast_packets", - "tx_unicast_bytes", - "rx_multicast_packets", - "rx_multicast_bytes", - "tx_multicast_packets", - "tx_multicast_bytes", - "rx_broadcast_packets", - "rx_broadcast_bytes", - "tx_broadcast_packets", - "tx_broadcast_bytes", - - /* SW counters */ - "tso_packets", - "tso_bytes", - "tso_inner_packets", - "tso_inner_bytes", - "lro_packets", - "lro_bytes", - "rx_csum_good", - "rx_csum_none", - "rx_csum_sw", - "tx_csum_offload", - "tx_csum_inner", - "tx_queue_stopped", - "tx_queue_wake", - "tx_queue_dropped", - "rx_wqe_err", -}; - -struct mlx5e_vport_stats { - /* HW counters */ - u64 rx_packets; - u64 rx_bytes; - u64 tx_packets; - u64 tx_bytes; - u64 rx_error_packets; - u64 rx_error_bytes; - u64 tx_error_packets; - u64 tx_error_bytes; - u64 rx_unicast_packets; - u64 rx_unicast_bytes; - u64 tx_unicast_packets; - u64 tx_unicast_bytes; - u64 rx_multicast_packets; - u64 rx_multicast_bytes; - u64 tx_multicast_packets; - u64 tx_multicast_bytes; - u64 rx_broadcast_packets; - u64 rx_broadcast_bytes; - u64 tx_broadcast_packets; - u64 tx_broadcast_bytes; - - /* SW counters */ - u64 tso_packets; - u64 tso_bytes; - u64 tso_inner_packets; - u64 tso_inner_bytes; - u64 lro_packets; - u64 lro_bytes; - u64 rx_csum_good; - u64 rx_csum_none; - u64 rx_csum_sw; - u64 tx_csum_offload; - u64 tx_csum_inner; - u64 tx_queue_stopped; - u64 tx_queue_wake; - u64 tx_queue_dropped; - u64 rx_wqe_err; - -#define NUM_VPORT_COUNTERS 35 -}; - -static const char pport_strings[][ETH_GSTRING_LEN] = { - /* IEEE802.3 counters */ - "frames_tx", - "frames_rx", - "check_seq_err", - "alignment_err", - "octets_tx", - "octets_received", - "multicast_xmitted", - "broadcast_xmitted", - "multicast_rx", - "broadcast_rx", - "in_range_len_errors", - "out_of_range_len", - "too_long_errors", - "symbol_err", - "mac_control_tx", - "mac_control_rx", - "unsupported_op_rx", - "pause_ctrl_rx", - "pause_ctrl_tx", - - /* RFC2863 counters */ - "in_octets", - "in_ucast_pkts", - "in_discards", - "in_errors", - "in_unknown_protos", - "out_octets", - "out_ucast_pkts", - "out_discards", - "out_errors", - "in_multicast_pkts", - "in_broadcast_pkts", - "out_multicast_pkts", - "out_broadcast_pkts", - - /* RFC2819 counters */ - "drop_events", - "octets", - "pkts", - "broadcast_pkts", - "multicast_pkts", - "crc_align_errors", - "undersize_pkts", - "oversize_pkts", - "fragments", - "jabbers", - "collisions", - "p64octets", - "p65to127octets", - "p128to255octets", - "p256to511octets", - "p512to1023octets", - "p1024to1518octets", - "p1519to2047octets", - "p2048to4095octets", - "p4096to8191octets", - "p8192to10239octets", -}; - -#define NUM_IEEE_802_3_COUNTERS 19 -#define NUM_RFC_2863_COUNTERS 13 -#define NUM_RFC_2819_COUNTERS 21 -#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \ - NUM_RFC_2863_COUNTERS + \ - NUM_RFC_2819_COUNTERS) - -struct mlx5e_pport_stats { - __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS]; - __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS]; - __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS]; -}; - -static const char rq_stats_strings[][ETH_GSTRING_LEN] = { - "packets", - "bytes", - "csum_none", - "csum_sw", - "lro_packets", - "lro_bytes", - "wqe_err" -}; - -struct mlx5e_rq_stats { - u64 packets; - u64 bytes; - u64 csum_none; - u64 csum_sw; - u64 lro_packets; - u64 lro_bytes; - u64 wqe_err; -#define NUM_RQ_STATS 7 -}; - -static const char sq_stats_strings[][ETH_GSTRING_LEN] = { - "packets", - "bytes", - "tso_packets", - "tso_bytes", - "tso_inner_packets", - "tso_inner_bytes", - "csum_offload_inner", - "nop", - "csum_offload_none", - "stopped", - "wake", - "dropped", -}; - -struct mlx5e_sq_stats { - /* commonly accessed in data path */ - u64 packets; - u64 bytes; - u64 tso_packets; - u64 tso_bytes; - u64 tso_inner_packets; - u64 tso_inner_bytes; - u64 csum_offload_inner; - u64 nop; - /* less likely accessed in data path */ - u64 csum_offload_none; - u64 stopped; - u64 wake; - u64 dropped; -#define NUM_SQ_STATS 12 -}; - -struct mlx5e_stats { - struct mlx5e_vport_stats vport; - struct mlx5e_pport_stats pport; -}; - struct mlx5e_params { u8 log_sq_size; + u8 rq_wq_type; + u8 mpwqe_log_stride_sz; + u8 mpwqe_log_num_strides; u8 log_rq_size; u16 num_channels; u8 num_tc; + bool rx_cqe_compress_admin; + bool rx_cqe_compress; u16 rx_cq_moderation_usec; u16 rx_cq_moderation_pkts; u16 tx_cq_moderation_usec; @@ -311,6 +169,7 @@ struct mlx5e_params { u8 rss_hfunc; u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; + bool vlan_strip_disable; #ifdef CONFIG_MLX5_CORE_EN_DCB struct ieee_ets ets; #endif @@ -331,6 +190,7 @@ struct mlx5e_tstamp { enum { MLX5E_RQ_STATE_POST_WQES_ENABLE, + MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, }; struct mlx5e_cq { @@ -343,32 +203,88 @@ struct mlx5e_cq { struct mlx5e_channel *channel; struct mlx5e_priv *priv; + /* cqe decompression */ + struct mlx5_cqe64 title; + struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE]; + u8 mini_arr_idx; + u16 decmprs_left; + u16 decmprs_wqe_counter; + /* control */ struct mlx5_wq_ctrl wq_ctrl; } ____cacheline_aligned_in_smp; +struct mlx5e_rq; +typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe); +typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, + u16 ix); + +struct mlx5e_dma_info { + struct page *page; + dma_addr_t addr; +}; + struct mlx5e_rq { /* data path */ struct mlx5_wq_ll wq; u32 wqe_sz; struct sk_buff **skb; + struct mlx5e_mpw_info *wqe_info; + __be32 mkey_be; + __be32 umr_mkey_be; struct device *pdev; struct net_device *netdev; struct mlx5e_tstamp *tstamp; struct mlx5e_rq_stats stats; struct mlx5e_cq cq; + mlx5e_fp_handle_rx_cqe handle_rx_cqe; + mlx5e_fp_alloc_wqe alloc_wqe; unsigned long state; int ix; /* control */ struct mlx5_wq_ctrl wq_ctrl; + u8 wq_type; + u32 mpwqe_stride_sz; + u32 mpwqe_num_strides; u32 rqn; struct mlx5e_channel *channel; struct mlx5e_priv *priv; } ____cacheline_aligned_in_smp; +struct mlx5e_umr_dma_info { + __be64 *mtt; + __be64 *mtt_no_align; + dma_addr_t mtt_addr; + struct mlx5e_dma_info *dma_info; +}; + +struct mlx5e_mpw_info { + union { + struct mlx5e_dma_info dma_info; + struct mlx5e_umr_dma_info umr; + }; + u16 consumed_strides; + u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE]; + + void (*dma_pre_sync)(struct device *pdev, + struct mlx5e_mpw_info *wi, + u32 wqe_offset, u32 len); + void (*add_skb_frag)(struct mlx5e_rq *rq, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 frag_offset, u32 len); + void (*copy_skb_header)(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 offset, + u32 headlen); + void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); +}; + struct mlx5e_tx_wqe_info { u32 num_bytes; u8 num_wqebbs; @@ -391,6 +307,11 @@ enum { MLX5E_SQ_STATE_BF_ENABLE, }; +struct mlx5e_ico_wqe_info { + u8 opcode; + u8 num_wqebbs; +}; + struct mlx5e_sq { /* data path */ @@ -432,6 +353,7 @@ struct mlx5e_sq { struct mlx5_uar uar; struct mlx5e_channel *channel; int tc; + struct mlx5e_ico_wqe_info *ico_wqe_info; } ____cacheline_aligned_in_smp; static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n) @@ -448,6 +370,7 @@ struct mlx5e_channel { /* data path */ struct mlx5e_rq rq; struct mlx5e_sq sq[MLX5E_MAX_NUM_TC]; + struct mlx5e_sq icosq; /* internal control operations */ struct napi_struct napi; struct device *pdev; struct net_device *netdev; @@ -474,42 +397,42 @@ enum mlx5e_traffic_types { MLX5E_TT_IPV6, MLX5E_TT_ANY, MLX5E_NUM_TT, + MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, }; -#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY) +enum { + MLX5E_STATE_ASYNC_EVENTS_ENABLE, + MLX5E_STATE_OPENED, + MLX5E_STATE_DESTROYING, +}; -enum mlx5e_rqt_ix { - MLX5E_INDIRECTION_RQT, - MLX5E_SINGLE_RQ_RQT, - MLX5E_NUM_RQT, +struct mlx5e_vxlan_db { + spinlock_t lock; /* protect vxlan table */ + struct radix_tree_root tree; }; -struct mlx5e_eth_addr_info { +struct mlx5e_l2_rule { u8 addr[ETH_ALEN + 2]; - u32 tt_vec; - struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT]; + struct mlx5_flow_rule *rule; }; -#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE) - -struct mlx5e_eth_addr_db { - struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE]; - struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE]; - struct mlx5e_eth_addr_info broadcast; - struct mlx5e_eth_addr_info allmulti; - struct mlx5e_eth_addr_info promisc; - bool broadcast_enabled; - bool allmulti_enabled; - bool promisc_enabled; +struct mlx5e_flow_table { + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; }; -enum { - MLX5E_STATE_ASYNC_EVENTS_ENABLE, - MLX5E_STATE_OPENED, - MLX5E_STATE_DESTROYING, +#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE) + +struct mlx5e_tc_table { + struct mlx5_flow_table *t; + + struct rhashtable_params ht_params; + struct rhashtable ht; }; -struct mlx5e_vlan_db { +struct mlx5e_vlan_table { + struct mlx5e_flow_table ft; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_rule *untagged_rule; @@ -517,29 +440,74 @@ struct mlx5e_vlan_db { bool filter_disabled; }; -struct mlx5e_vxlan_db { - spinlock_t lock; /* protect vxlan table */ - struct radix_tree_root tree; +struct mlx5e_l2_table { + struct mlx5e_flow_table ft; + struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE]; + struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE]; + struct mlx5e_l2_rule broadcast; + struct mlx5e_l2_rule allmulti; + struct mlx5e_l2_rule promisc; + bool broadcast_enabled; + bool allmulti_enabled; + bool promisc_enabled; }; -struct mlx5e_flow_table { - int num_groups; - struct mlx5_flow_table *t; - struct mlx5_flow_group **g; +/* L3/L4 traffic type classifier */ +struct mlx5e_ttc_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; }; -struct mlx5e_tc_flow_table { - struct mlx5_flow_table *t; +#define ARFS_HASH_SHIFT BITS_PER_BYTE +#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) +struct arfs_table { + struct mlx5e_flow_table ft; + struct mlx5_flow_rule *default_rule; + struct hlist_head rules_hash[ARFS_HASH_SIZE]; +}; - struct rhashtable_params ht_params; - struct rhashtable ht; +enum arfs_type { + ARFS_IPV4_TCP, + ARFS_IPV6_TCP, + ARFS_IPV4_UDP, + ARFS_IPV6_UDP, + ARFS_NUM_TYPES, +}; + +struct mlx5e_arfs_tables { + struct arfs_table arfs_tables[ARFS_NUM_TYPES]; + /* Protect aRFS rules list */ + spinlock_t arfs_lock; + struct list_head rules; + int last_filter_id; + struct workqueue_struct *wq; +}; + +/* NIC prio FTS */ +enum { + MLX5E_VLAN_FT_LEVEL = 0, + MLX5E_L2_FT_LEVEL, + MLX5E_TTC_FT_LEVEL, + MLX5E_ARFS_FT_LEVEL }; -struct mlx5e_flow_tables { - struct mlx5_flow_namespace *ns; - struct mlx5e_tc_flow_table tc; - struct mlx5e_flow_table vlan; - struct mlx5e_flow_table main; +struct mlx5e_flow_steering { + struct mlx5_flow_namespace *ns; + struct mlx5e_tc_table tc; + struct mlx5e_vlan_table vlan; + struct mlx5e_l2_table l2; + struct mlx5e_ttc_table ttc; + struct mlx5e_arfs_tables arfs; +}; + +struct mlx5e_direct_tir { + u32 tirn; + u32 rqtn; +}; + +enum { + MLX5E_TC_PRIO = 0, + MLX5E_NIC_PRIO }; struct mlx5e_priv { @@ -554,19 +522,20 @@ struct mlx5e_priv { u32 pdn; u32 tdn; struct mlx5_core_mkey mkey; + struct mlx5_core_mkey umr_mkey; struct mlx5e_rq drop_rq; struct mlx5e_channel **channel; u32 tisn[MLX5E_MAX_NUM_TC]; - u32 rqtn[MLX5E_NUM_RQT]; - u32 tirn[MLX5E_NUM_TT]; + u32 indir_rqtn; + u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; - struct mlx5e_flow_tables fts; - struct mlx5e_eth_addr_db eth_addr; - struct mlx5e_vlan_db vlan; + struct mlx5e_flow_steering fs; struct mlx5e_vxlan_db vxlan; struct mlx5e_params params; + struct workqueue_struct *wq; struct work_struct update_carrier_work; struct work_struct set_rx_mode_work; struct delayed_work update_stats_work; @@ -575,18 +544,7 @@ struct mlx5e_priv { struct net_device *netdev; struct mlx5e_stats stats; struct mlx5e_tstamp tstamp; -}; - -#define MLX5E_NET_IP_ALIGN 2 - -struct mlx5e_tx_wqe { - struct mlx5_wqe_ctrl_seg ctrl; - struct mlx5_wqe_eth_seg eth; -}; - -struct mlx5e_rx_wqe { - struct mlx5_wqe_srq_next_seg next; - struct mlx5_wqe_data_seg data; + u16 q_counter; }; enum mlx5e_link_mode { @@ -609,7 +567,7 @@ enum mlx5e_link_mode { MLX5E_100GBASE_KR4 = 22, MLX5E_100GBASE_LR4 = 23, MLX5E_100BASE_TX = 24, - MLX5E_100BASE_T = 25, + MLX5E_1000BASE_T = 25, MLX5E_10GBASE_T = 26, MLX5E_25GBASE_CR = 27, MLX5E_25GBASE_KR = 28, @@ -631,14 +589,35 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); + +void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); +int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); +int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix); +void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq); +void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u16 byte_cnt, + struct mlx5e_mpw_info *wi, + struct sk_buff *skb); +void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u16 byte_cnt, + struct mlx5e_mpw_info *wi, + struct sk_buff *skb); +void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi); +void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); void mlx5e_update_stats(struct mlx5e_priv *priv); -int mlx5e_create_flow_tables(struct mlx5e_priv *priv); -void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv); -void mlx5e_init_eth_addr(struct mlx5e_priv *priv); +int mlx5e_create_flow_steering(struct mlx5e_priv *priv); +void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); +void mlx5e_init_l2_addr(struct mlx5e_priv *priv); +void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, @@ -647,6 +626,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv); void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr); int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr); +void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val); int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); @@ -655,16 +635,20 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); +int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); + +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); -void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, +void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, + u32 *indirection_rqt, int len, int num_channels); +int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, - struct mlx5e_tx_wqe *wqe, int bf_sz) + struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz) { u16 ofst = MLX5_BF_OFFSET + sq->bf_offset; @@ -678,9 +662,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, */ wmb(); if (bf_sz) - __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz); + __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz); else - mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL); + mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL); /* flush the write-combining mapped buffer */ wmb(); @@ -701,12 +685,43 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) MLX5E_MAX_NUM_CHANNELS); } +static inline int mlx5e_get_mtt_octw(int npages) +{ + return ALIGN(npages, 8) / 2; +} + extern const struct ethtool_ops mlx5e_ethtool_ops; #ifdef CONFIG_MLX5_CORE_EN_DCB extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); #endif +#ifndef CONFIG_RFS_ACCEL +static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) +{ + return 0; +} + +static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {} + +static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) +{ + return -ENOTSUPP; +} + +static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) +{ + return -ENOTSUPP; +} +#else +int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); +void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv); +int mlx5e_arfs_enable(struct mlx5e_priv *priv); +int mlx5e_arfs_disable(struct mlx5e_priv *priv); +int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id); +#endif + u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c new file mode 100644 index 000000000000..3515e78ba68f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -0,0 +1,752 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifdef CONFIG_RFS_ACCEL + +#include <linux/hash.h> +#include <linux/mlx5/fs.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include "en.h" + +struct arfs_tuple { + __be16 etype; + u8 ip_proto; + union { + __be32 src_ipv4; + struct in6_addr src_ipv6; + }; + union { + __be32 dst_ipv4; + struct in6_addr dst_ipv6; + }; + __be16 src_port; + __be16 dst_port; +}; + +struct arfs_rule { + struct mlx5e_priv *priv; + struct work_struct arfs_work; + struct mlx5_flow_rule *rule; + struct hlist_node hlist; + int rxq; + /* Flow ID passed to ndo_rx_flow_steer */ + int flow_id; + /* Filter ID returned by ndo_rx_flow_steer */ + int filter_id; + struct arfs_tuple tuple; +}; + +#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \ + for (i = 0; i < ARFS_NUM_TYPES; i++) \ + mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j) + +#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \ + for (j = 0; j < ARFS_HASH_SIZE; j++) \ + hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist) + +static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type) +{ + switch (type) { + case ARFS_IPV4_TCP: + return MLX5E_TT_IPV4_TCP; + case ARFS_IPV4_UDP: + return MLX5E_TT_IPV4_UDP; + case ARFS_IPV6_TCP: + return MLX5E_TT_IPV6_TCP; + case ARFS_IPV6_UDP: + return MLX5E_TT_IPV6_UDP; + default: + return -EINVAL; + } +} + +static int arfs_disable(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest; + u32 *tirn = priv->indir_tirn; + int err = 0; + int tt; + int i; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + for (i = 0; i < ARFS_NUM_TYPES; i++) { + dest.tir_num = tirn[i]; + tt = arfs_get_tt(i); + /* Modify ttc rules destination to bypass the aRFS tables*/ + err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], + &dest); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc destination failed\n", + __func__); + return err; + } + } + return 0; +} + +static void arfs_del_rules(struct mlx5e_priv *priv); + +int mlx5e_arfs_disable(struct mlx5e_priv *priv) +{ + arfs_del_rules(priv); + + return arfs_disable(priv); +} + +int mlx5e_arfs_enable(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest; + int err = 0; + int tt; + int i; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + for (i = 0; i < ARFS_NUM_TYPES; i++) { + dest.ft = priv->fs.arfs.arfs_tables[i].ft.t; + tt = arfs_get_tt(i); + /* Modify ttc rules destination to point on the aRFS FTs */ + err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], + &dest); + if (err) { + netdev_err(priv->netdev, + "%s: modify ttc destination failed err=%d\n", + __func__, err); + arfs_disable(priv); + return err; + } + } + return 0; +} + +static void arfs_destroy_table(struct arfs_table *arfs_t) +{ + mlx5_del_flow_rule(arfs_t->default_rule); + mlx5e_destroy_flow_table(&arfs_t->ft); +} + +void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) +{ + int i; + + if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) + return; + + arfs_del_rules(priv); + destroy_workqueue(priv->fs.arfs.wq); + for (i = 0; i < ARFS_NUM_TYPES; i++) { + if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t)) + arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]); + } +} + +static int arfs_add_default_rule(struct mlx5e_priv *priv, + enum arfs_type type) +{ + struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; + struct mlx5_flow_destination dest; + u8 match_criteria_enable = 0; + u32 *tirn = priv->indir_tirn; + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto out; + } + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + switch (type) { + case ARFS_IPV4_TCP: + dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; + break; + case ARFS_IPV4_UDP: + dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; + break; + case ARFS_IPV6_TCP: + dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; + break; + case ARFS_IPV6_UDP: + dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; + break; + default: + err = -EINVAL; + goto out; + } + + arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable, + match_criteria, match_value, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + &dest); + if (IS_ERR(arfs_t->default_rule)) { + err = PTR_ERR(arfs_t->default_rule); + arfs_t->default_rule = NULL; + netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n", + __func__, type); + } +out: + kvfree(match_criteria); + kvfree(match_value); + return err; +} + +#define MLX5E_ARFS_NUM_GROUPS 2 +#define MLX5E_ARFS_GROUP1_SIZE BIT(12) +#define MLX5E_ARFS_GROUP2_SIZE BIT(0) +#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\ + MLX5E_ARFS_GROUP2_SIZE) +static int arfs_create_groups(struct mlx5e_flow_table *ft, + enum arfs_type type) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + void *outer_headers_c; + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS, + sizeof(*ft->g), GFP_KERNEL); + in = mlx5_vzalloc(inlen); + if (!in || !ft->g) { + kvfree(ft->g); + kvfree(in); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, + outer_headers); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype); + switch (type) { + case ARFS_IPV4_TCP: + case ARFS_IPV6_TCP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport); + break; + case ARFS_IPV4_UDP: + case ARFS_IPV6_UDP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport); + break; + default: + err = -EINVAL; + goto out; + } + + switch (type) { + case ARFS_IPV4_TCP: + case ARFS_IPV4_UDP: + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + break; + case ARFS_IPV6_TCP: + case ARFS_IPV6_UDP: + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, 16); + memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, 16); + break; + default: + err = -EINVAL; + goto out; + } + + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_ARFS_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_ARFS_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err; + ft->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; +out: + kvfree(in); + + return err; +} + +static int arfs_create_table(struct mlx5e_priv *priv, + enum arfs_type type) +{ + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft; + int err; + + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + err = arfs_create_groups(ft, type); + if (err) + goto err; + + err = arfs_add_default_rule(priv, type); + if (err) + goto err; + + return 0; +err: + mlx5e_destroy_flow_table(ft); + return err; +} + +int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) +{ + int err = 0; + int i; + + if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) + return 0; + + spin_lock_init(&priv->fs.arfs.arfs_lock); + INIT_LIST_HEAD(&priv->fs.arfs.rules); + priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs"); + if (!priv->fs.arfs.wq) + return -ENOMEM; + + for (i = 0; i < ARFS_NUM_TYPES; i++) { + err = arfs_create_table(priv, i); + if (err) + goto err; + } + return 0; +err: + mlx5e_arfs_destroy_tables(priv); + return err; +} + +#define MLX5E_ARFS_EXPIRY_QUOTA 60 + +static void arfs_may_expire_flow(struct mlx5e_priv *priv) +{ + struct arfs_rule *arfs_rule; + struct hlist_node *htmp; + int quota = 0; + int i; + int j; + + HLIST_HEAD(del_list); + spin_lock_bh(&priv->fs.arfs.arfs_lock); + mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) + break; + if (!work_pending(&arfs_rule->arfs_work) && + rps_may_expire_flow(priv->netdev, + arfs_rule->rxq, arfs_rule->flow_id, + arfs_rule->filter_id)) { + hlist_del_init(&arfs_rule->hlist); + hlist_add_head(&arfs_rule->hlist, &del_list); + } + } + spin_unlock_bh(&priv->fs.arfs.arfs_lock); + hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { + if (arfs_rule->rule) + mlx5_del_flow_rule(arfs_rule->rule); + hlist_del(&arfs_rule->hlist); + kfree(arfs_rule); + } +} + +static void arfs_del_rules(struct mlx5e_priv *priv) +{ + struct hlist_node *htmp; + struct arfs_rule *rule; + int i; + int j; + + HLIST_HEAD(del_list); + spin_lock_bh(&priv->fs.arfs.arfs_lock); + mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) { + hlist_del_init(&rule->hlist); + hlist_add_head(&rule->hlist, &del_list); + } + spin_unlock_bh(&priv->fs.arfs.arfs_lock); + + hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { + cancel_work_sync(&rule->arfs_work); + if (rule->rule) + mlx5_del_flow_rule(rule->rule); + hlist_del(&rule->hlist); + kfree(rule); + } +} + +static struct hlist_head * +arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port, + __be16 dst_port) +{ + unsigned long l; + int bucket_idx; + + l = (__force unsigned long)src_port | + ((__force unsigned long)dst_port << 2); + + bucket_idx = hash_long(l, ARFS_HASH_SHIFT); + + return &arfs_t->rules_hash[bucket_idx]; +} + +static u8 arfs_get_ip_proto(const struct sk_buff *skb) +{ + return (skb->protocol == htons(ETH_P_IP)) ? + ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr; +} + +static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, + u8 ip_proto, __be16 etype) +{ + if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP) + return &arfs->arfs_tables[ARFS_IPV4_TCP]; + if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP) + return &arfs->arfs_tables[ARFS_IPV4_UDP]; + if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP) + return &arfs->arfs_tables[ARFS_IPV6_TCP]; + if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP) + return &arfs->arfs_tables[ARFS_IPV6_UDP]; + + return NULL; +} + +static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, + struct arfs_rule *arfs_rule) +{ + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct arfs_tuple *tuple = &arfs_rule->tuple; + struct mlx5_flow_rule *rule = NULL; + struct mlx5_flow_destination dest; + struct arfs_table *arfs_table; + u8 match_criteria_enable = 0; + struct mlx5_flow_table *ft; + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto out; + } + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.ethertype); + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, + ntohs(tuple->etype)); + arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype); + if (!arfs_table) { + err = -EINVAL; + goto out; + } + + ft = arfs_table->ft.t; + if (tuple->ip_proto == IPPROTO_TCP) { + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.tcp_dport); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.tcp_sport); + MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport, + ntohs(tuple->dst_port)); + MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport, + ntohs(tuple->src_port)); + } else { + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.udp_dport); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.udp_sport); + MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport, + ntohs(tuple->dst_port)); + MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport, + ntohs(tuple->src_port)); + } + if (tuple->etype == htons(ETH_P_IP)) { + memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), + &tuple->src_ipv4, + 4); + memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &tuple->dst_ipv4, + 4); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); + } else { + memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + &tuple->src_ipv6, + 16); + memcpy(MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &tuple->dst_ipv6, + 16); + memset(MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), + 0xff, + 16); + memset(MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + 0xff, + 16); + } + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; + rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, + match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + &dest); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", + __func__, arfs_rule->filter_id, arfs_rule->rxq, err); + } + +out: + kvfree(match_criteria); + kvfree(match_value); + return err ? ERR_PTR(err) : rule; +} + +static void arfs_modify_rule_rq(struct mlx5e_priv *priv, + struct mlx5_flow_rule *rule, u16 rxq) +{ + struct mlx5_flow_destination dst; + int err = 0; + + dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dst.tir_num = priv->direct_tir[rxq].tirn; + err = mlx5_modify_rule_destination(rule, &dst); + if (err) + netdev_warn(priv->netdev, + "Failed to modfiy aRFS rule destination to rq=%d\n", rxq); +} + +static void arfs_handle_work(struct work_struct *work) +{ + struct arfs_rule *arfs_rule = container_of(work, + struct arfs_rule, + arfs_work); + struct mlx5e_priv *priv = arfs_rule->priv; + struct mlx5_flow_rule *rule; + + mutex_lock(&priv->state_lock); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + spin_lock_bh(&priv->fs.arfs.arfs_lock); + hlist_del(&arfs_rule->hlist); + spin_unlock_bh(&priv->fs.arfs.arfs_lock); + + mutex_unlock(&priv->state_lock); + kfree(arfs_rule); + goto out; + } + mutex_unlock(&priv->state_lock); + + if (!arfs_rule->rule) { + rule = arfs_add_rule(priv, arfs_rule); + if (IS_ERR(rule)) + goto out; + arfs_rule->rule = rule; + } else { + arfs_modify_rule_rq(priv, arfs_rule->rule, + arfs_rule->rxq); + } +out: + arfs_may_expire_flow(priv); +} + +/* return L4 destination port from ip4/6 packets */ +static __be16 arfs_get_dst_port(const struct sk_buff *skb) +{ + char *transport_header; + + transport_header = skb_transport_header(skb); + if (arfs_get_ip_proto(skb) == IPPROTO_TCP) + return ((struct tcphdr *)transport_header)->dest; + return ((struct udphdr *)transport_header)->dest; +} + +/* return L4 source port from ip4/6 packets */ +static __be16 arfs_get_src_port(const struct sk_buff *skb) +{ + char *transport_header; + + transport_header = skb_transport_header(skb); + if (arfs_get_ip_proto(skb) == IPPROTO_TCP) + return ((struct tcphdr *)transport_header)->source; + return ((struct udphdr *)transport_header)->source; +} + +static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, + struct arfs_table *arfs_t, + const struct sk_buff *skb, + u16 rxq, u32 flow_id) +{ + struct arfs_rule *rule; + struct arfs_tuple *tuple; + + rule = kzalloc(sizeof(*rule), GFP_ATOMIC); + if (!rule) + return NULL; + + rule->priv = priv; + rule->rxq = rxq; + INIT_WORK(&rule->arfs_work, arfs_handle_work); + + tuple = &rule->tuple; + tuple->etype = skb->protocol; + if (tuple->etype == htons(ETH_P_IP)) { + tuple->src_ipv4 = ip_hdr(skb)->saddr; + tuple->dst_ipv4 = ip_hdr(skb)->daddr; + } else { + memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr)); + memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)); + } + tuple->ip_proto = arfs_get_ip_proto(skb); + tuple->src_port = arfs_get_src_port(skb); + tuple->dst_port = arfs_get_dst_port(skb); + + rule->flow_id = flow_id; + rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER; + + hlist_add_head(&rule->hlist, + arfs_hash_bucket(arfs_t, tuple->src_port, + tuple->dst_port)); + return rule; +} + +static bool arfs_cmp_ips(struct arfs_tuple *tuple, + const struct sk_buff *skb) +{ + if (tuple->etype == htons(ETH_P_IP) && + tuple->src_ipv4 == ip_hdr(skb)->saddr && + tuple->dst_ipv4 == ip_hdr(skb)->daddr) + return true; + if (tuple->etype == htons(ETH_P_IPV6) && + (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr, + sizeof(struct in6_addr))) && + (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr, + sizeof(struct in6_addr)))) + return true; + return false; +} + +static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t, + const struct sk_buff *skb) +{ + struct arfs_rule *arfs_rule; + struct hlist_head *head; + __be16 src_port = arfs_get_src_port(skb); + __be16 dst_port = arfs_get_dst_port(skb); + + head = arfs_hash_bucket(arfs_t, src_port, dst_port); + hlist_for_each_entry(arfs_rule, head, hlist) { + if (arfs_rule->tuple.src_port == src_port && + arfs_rule->tuple.dst_port == dst_port && + arfs_cmp_ips(&arfs_rule->tuple, skb)) { + return arfs_rule; + } + } + + return NULL; +} + +int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq_index, u32 flow_id) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; + struct arfs_table *arfs_t; + struct arfs_rule *arfs_rule; + + if (skb->protocol != htons(ETH_P_IP) && + skb->protocol != htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); + if (!arfs_t) + return -EPROTONOSUPPORT; + + spin_lock_bh(&arfs->arfs_lock); + arfs_rule = arfs_find_rule(arfs_t, skb); + if (arfs_rule) { + if (arfs_rule->rxq == rxq_index) { + spin_unlock_bh(&arfs->arfs_lock); + return arfs_rule->filter_id; + } + arfs_rule->rxq = rxq_index; + } else { + arfs_rule = arfs_alloc_rule(priv, arfs_t, skb, + rxq_index, flow_id); + if (!arfs_rule) { + spin_unlock_bh(&arfs->arfs_lock); + return -ENOMEM; + } + } + queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work); + spin_unlock_bh(&arfs->arfs_lock); + return arfs_rule->filter_id; +} +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c index 2018eebe1531..847a8f3ac2b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c @@ -93,6 +93,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) /* RX HW timestamp */ switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: + /* Reset CQE compression to Admin default */ + mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: @@ -108,6 +110,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* Disable CQE compression */ + mlx5e_modify_rx_cqe_compression(priv, false); config.rx_filter = HWTSTAMP_FILTER_ALL; break; default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 3036f279a8fd..b2db180ae2a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -174,8 +174,14 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + int i; pfc->pfc_cap = mlx5_max_tc(mdev) + 1; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause); + pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause); + } return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 68834b715f6c..fc7dcc03b1de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -138,10 +138,10 @@ static const struct { [MLX5E_100BASE_TX] = { .speed = 100, }, - [MLX5E_100BASE_T] = { - .supported = SUPPORTED_100baseT_Full, - .advertised = ADVERTISED_100baseT_Full, - .speed = 100, + [MLX5E_1000BASE_T] = { + .supported = SUPPORTED_1000baseT_Full, + .advertised = ADVERTISED_1000baseT_Full, + .speed = 1000, }, [MLX5E_10GBASE_T] = { .supported = SUPPORTED_10000baseT_Full, @@ -165,26 +165,112 @@ static const struct { }, }; +static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u8 pfc_en_tx; + u8 pfc_en_rx; + int err; + + err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx); + + return err ? 0 : pfc_en_tx | pfc_en_rx; +} + +#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) +#define MLX5E_NUM_RQ_STATS(priv) \ + (NUM_RQ_STATS * priv->params.num_channels * \ + test_bit(MLX5E_STATE_OPENED, &priv->state)) +#define MLX5E_NUM_SQ_STATS(priv) \ + (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \ + test_bit(MLX5E_STATE_OPENED, &priv->state)) +#define MLX5E_NUM_PFC_COUNTERS(priv) hweight8(mlx5e_query_pfc_combined(priv)) + static int mlx5e_get_sset_count(struct net_device *dev, int sset) { struct mlx5e_priv *priv = netdev_priv(dev); switch (sset) { case ETH_SS_STATS: - return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + - priv->params.num_channels * NUM_RQ_STATS + - priv->params.num_channels * priv->params.num_tc * - NUM_SQ_STATS; + return NUM_SW_COUNTERS + + MLX5E_NUM_Q_CNTRS(priv) + + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + + MLX5E_NUM_RQ_STATS(priv) + + MLX5E_NUM_SQ_STATS(priv) + + MLX5E_NUM_PFC_COUNTERS(priv); /* fallthrough */ default: return -EOPNOTSUPP; } } +static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) +{ + int i, j, tc, prio, idx = 0; + unsigned long pfc_combined; + + /* SW counters */ + for (i = 0; i < NUM_SW_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].name); + + /* Q counters */ + for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].name); + + /* VPORT counters */ + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + vport_stats_desc[i].name); + + /* PPORT counters */ + for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_802_3_stats_desc[i].name); + + for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_2863_stats_desc[i].name); + + for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_2819_stats_desc[i].name); + + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", + prio, + pport_per_prio_traffic_stats_desc[i].name); + } + + pfc_combined = mlx5e_query_pfc_combined(priv); + for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + sprintf(data + (idx++) * ETH_GSTRING_LEN, "prio%d_%s", + prio, pport_per_prio_pfc_stats_desc[i].name); + } + } + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return; + + /* per channel counters */ + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, + rq_stats_desc[j].name); + + for (tc = 0; tc < priv->params.num_tc; tc++) + for (i = 0; i < priv->params.num_channels; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + "tx%d_%s", + priv->channeltc_to_txq_map[i][tc], + sq_stats_desc[j].name); +} + static void mlx5e_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { - int i, j, tc, idx = 0; struct mlx5e_priv *priv = netdev_priv(dev); switch (stringset) { @@ -195,30 +281,7 @@ static void mlx5e_get_strings(struct net_device *dev, break; case ETH_SS_STATS: - /* VPORT counters */ - for (i = 0; i < NUM_VPORT_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - vport_strings[i]); - - /* PPORT counters */ - for (i = 0; i < NUM_PPORT_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_strings[i]); - - /* per channel counters */ - for (i = 0; i < priv->params.num_channels; i++) - for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - "rx%d_%s", i, rq_stats_strings[j]); - - for (tc = 0; tc < priv->params.num_tc; tc++) - for (i = 0; i < priv->params.num_channels; i++) - for (j = 0; j < NUM_SQ_STATS; j++) - sprintf(data + - (idx++) * ETH_GSTRING_LEN, - "tx%d_%s", - priv->channeltc_to_txq_map[i][tc], - sq_stats_strings[j]); + mlx5e_fill_stats_strings(priv, data); break; } } @@ -227,7 +290,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct mlx5e_priv *priv = netdev_priv(dev); - int i, j, tc, idx = 0; + int i, j, tc, prio, idx = 0; + unsigned long pfc_combined; if (!data) return; @@ -237,33 +301,68 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, mlx5e_update_stats(priv); mutex_unlock(&priv->state_lock); + for (i = 0; i < NUM_SW_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, + sw_stats_desc, i); + + for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) + data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, + q_stats_desc, i); + for (i = 0; i < NUM_VPORT_COUNTERS; i++) - data[idx++] = ((u64 *)&priv->stats.vport)[i]; + data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, + vport_stats_desc, i); - for (i = 0; i < NUM_PPORT_COUNTERS; i++) - data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]); + for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, + pport_802_3_stats_desc, i); + + for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, + pport_2863_stats_desc, i); + + for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, + pport_2819_stats_desc, i); + + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], + pport_per_prio_traffic_stats_desc, i); + } + + pfc_combined = mlx5e_query_pfc_combined(priv); + for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], + pport_per_prio_pfc_stats_desc, i); + } + } + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return; /* per channel counters */ for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_RQ_STATS; j++) - data[idx++] = !test_bit(MLX5E_STATE_OPENED, - &priv->state) ? 0 : - ((u64 *)&priv->channel[i]->rq.stats)[j]; + data[idx++] = + MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats, + rq_stats_desc, j); for (tc = 0; tc < priv->params.num_tc; tc++) for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) - data[idx++] = !test_bit(MLX5E_STATE_OPENED, - &priv->state) ? 0 : - ((u64 *)&priv->channel[i]->sq[tc].stats)[j]; + data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats, + sq_stats_desc, j); } static void mlx5e_get_ringparam(struct net_device *dev, struct ethtool_ringparam *param) { struct mlx5e_priv *priv = netdev_priv(dev); + int rq_wq_type = priv->params.rq_wq_type; - param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE; + param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type); param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE; param->rx_pending = 1 << priv->params.log_rq_size; param->tx_pending = 1 << priv->params.log_sq_size; @@ -274,6 +373,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); bool was_opened; + int rq_wq_type = priv->params.rq_wq_type; u16 min_rx_wqes; u8 log_rq_size; u8 log_sq_size; @@ -289,16 +389,16 @@ static int mlx5e_set_ringparam(struct net_device *dev, __func__); return -EINVAL; } - if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) { + if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) { netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n", __func__, param->rx_pending, - 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE); + 1 << mlx5_min_log_rq_size(rq_wq_type)); return -EINVAL; } - if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) { + if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) { netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n", __func__, param->rx_pending, - 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE); + 1 << mlx5_max_log_rq_size(rq_wq_type)); return -EINVAL; } if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) { @@ -316,8 +416,7 @@ static int mlx5e_set_ringparam(struct net_device *dev, log_rq_size = order_base_2(param->rx_pending); log_sq_size = order_base_2(param->tx_pending); - min_rx_wqes = min_t(u16, param->rx_pending - 1, - MLX5E_PARAMS_DEFAULT_MIN_RX_WQES); + min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending); if (log_rq_size == priv->params.log_rq_size && log_sq_size == priv->params.log_sq_size && @@ -357,6 +456,7 @@ static int mlx5e_set_channels(struct net_device *dev, struct mlx5e_priv *priv = netdev_priv(dev); int ncv = mlx5e_get_max_num_channels(priv->mdev); unsigned int count = ch->combined_count; + bool arfs_enabled; bool was_opened; int err = 0; @@ -385,13 +485,27 @@ static int mlx5e_set_channels(struct net_device *dev, if (was_opened) mlx5e_close_locked(dev); + arfs_enabled = dev->features & NETIF_F_NTUPLE; + if (arfs_enabled) + mlx5e_arfs_disable(priv); + priv->params.num_channels = count; - mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, count); if (was_opened) err = mlx5e_open_locked(dev); + if (err) + goto out; + if (arfs_enabled) { + err = mlx5e_arfs_enable(priv); + if (err) + netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n", + __func__, err); + } + +out: mutex_unlock(&priv->state_lock); return err; @@ -499,6 +613,25 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap) return 0; } +int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) +{ + u32 max_speed = 0; + u32 proto_cap; + int err; + int i; + + err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN); + if (err) + return err; + + for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) + if (proto_cap & MLX5E_PROT_MASK(i)) + max_speed = max(max_speed, ptys2ethtool_table[i].speed); + + *speed = max_speed; + return 0; +} + static void get_speed_duplex(struct net_device *netdev, u32 eth_proto_oper, struct ethtool_cmd *cmd) @@ -727,9 +860,8 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) MLX5_SET(modify_tir_in, in, bitmask.hash, 1); mlx5e_build_tir_ctx_hash(tirc, priv); - for (i = 0; i < MLX5E_NUM_TT; i++) - if (IS_HASHING_TT(i)) - mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen); + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) + mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen); } static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, @@ -751,9 +883,11 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mutex_lock(&priv->state_lock); if (indir) { + u32 rqtn = priv->indir_rqtn; + memcpy(priv->params.indirection_rqt, indir, sizeof(priv->params.indirection_rqt)); - mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); } if (key) @@ -1036,6 +1170,108 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return mlx5_set_port_wol(mdev, mlx5_wol_mode); } +static int mlx5e_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + u16 beacon_duration; + + if (!MLX5_CAP_GEN(mdev, beacon_led)) + return -EOPNOTSUPP; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + beacon_duration = MLX5_BEACON_DURATION_INF; + break; + case ETHTOOL_ID_INACTIVE: + beacon_duration = MLX5_BEACON_DURATION_OFF; + break; + default: + return -EOPNOTSUPP; + } + + return mlx5_set_port_beacon(mdev, beacon_duration); +} + +static int mlx5e_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *dev = priv->mdev; + int size_read = 0; + u8 data[4]; + + size_read = mlx5_query_module_eeprom(dev, 0, 2, data); + if (size_read < 2) + return -EIO; + + /* data[0] = identifier byte */ + switch (data[0]) { + case MLX5_MODULE_ID_QSFP: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + case MLX5_MODULE_ID_QSFP_PLUS: + case MLX5_MODULE_ID_QSFP28: + /* data[1] = revision id */ + if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) { + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + } + break; + case MLX5_MODULE_ID_SFP: + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + break; + default: + netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", + __func__, data[0]); + return -EINVAL; + } + + return 0; +} + +static int mlx5e_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + int offset = ee->offset; + int size_read; + int i = 0; + + if (!ee->len) + return -EINVAL; + + memset(data, 0, ee->len); + + while (i < ee->len) { + size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i, + data + i); + + if (!size_read) + /* Done reading */ + return 0; + + if (size_read < 0) { + netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n", + __func__, size_read); + return 0; + } + + i += size_read; + offset += size_read; + } + + return 0; +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1060,6 +1296,9 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_pauseparam = mlx5e_get_pauseparam, .set_pauseparam = mlx5e_set_pauseparam, .get_ts_info = mlx5e_get_ts_info, + .set_phys_id = mlx5e_set_phys_id, .get_wol = mlx5e_get_wol, .set_wol = mlx5e_set_wol, + .get_module_info = mlx5e_get_module_info, + .get_module_eeprom = mlx5e_get_module_eeprom, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index d00a24203410..b32740092854 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -37,7 +37,10 @@ #include <linux/mlx5/fs.h> #include "en.h" -#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) +static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_l2_rule *ai, int type); +static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_l2_rule *ai); enum { MLX5E_FULLMATCH = 0, @@ -58,21 +61,21 @@ enum { MLX5E_ACTION_DEL = 2, }; -struct mlx5e_eth_addr_hash_node { +struct mlx5e_l2_hash_node { struct hlist_node hlist; u8 action; - struct mlx5e_eth_addr_info ai; + struct mlx5e_l2_rule ai; }; -static inline int mlx5e_hash_eth_addr(u8 *addr) +static inline int mlx5e_hash_l2(u8 *addr) { return addr[5]; } -static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) +static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr) { - struct mlx5e_eth_addr_hash_node *hn; - int ix = mlx5e_hash_eth_addr(addr); + struct mlx5e_l2_hash_node *hn; + int ix = mlx5e_hash_l2(addr); int found = 0; hlist_for_each_entry(hn, &hash[ix], hlist) @@ -96,371 +99,12 @@ static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr) hlist_add_head(&hn->hlist, &hash[ix]); } -static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn) +static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn) { hlist_del(&hn->hlist); kfree(hn); } -static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai) -{ - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV6)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]); - - if (ai->tt_vec & BIT(MLX5E_TT_IPV4)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]); - - if (ai->tt_vec & BIT(MLX5E_TT_ANY)) - mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]); -} - -static int mlx5e_get_eth_addr_type(u8 *addr) -{ - if (is_unicast_ether_addr(addr)) - return MLX5E_UC; - - if ((addr[0] == 0x01) && - (addr[1] == 0x00) && - (addr[2] == 0x5e) && - !(addr[3] & 0x80)) - return MLX5E_MC_IPV4; - - if ((addr[0] == 0x33) && - (addr[1] == 0x33)) - return MLX5E_MC_IPV6; - - return MLX5E_MC_OTHER; -} - -static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type) -{ - int eth_addr_type; - u32 ret; - - switch (type) { - case MLX5E_FULLMATCH: - eth_addr_type = mlx5e_get_eth_addr_type(ai->addr); - switch (eth_addr_type) { - case MLX5E_UC: - ret = - BIT(MLX5E_TT_IPV4_TCP) | - BIT(MLX5E_TT_IPV6_TCP) | - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4_IPSEC_AH) | - BIT(MLX5E_TT_IPV6_IPSEC_AH) | - BIT(MLX5E_TT_IPV4_IPSEC_ESP) | - BIT(MLX5E_TT_IPV6_IPSEC_ESP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - - case MLX5E_MC_IPV4: - ret = - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV4) | - 0; - break; - - case MLX5E_MC_IPV6: - ret = - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV6) | - 0; - break; - - case MLX5E_MC_OTHER: - ret = - BIT(MLX5E_TT_ANY) | - 0; - break; - } - - break; - - case MLX5E_ALLMULTI: - ret = - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - - default: /* MLX5E_PROMISC */ - ret = - BIT(MLX5E_TT_IPV4_TCP) | - BIT(MLX5E_TT_IPV6_TCP) | - BIT(MLX5E_TT_IPV4_UDP) | - BIT(MLX5E_TT_IPV6_UDP) | - BIT(MLX5E_TT_IPV4_IPSEC_AH) | - BIT(MLX5E_TT_IPV6_IPSEC_AH) | - BIT(MLX5E_TT_IPV4_IPSEC_ESP) | - BIT(MLX5E_TT_IPV6_IPSEC_ESP) | - BIT(MLX5E_TT_IPV4) | - BIT(MLX5E_TT_IPV6) | - BIT(MLX5E_TT_ANY) | - 0; - break; - } - - return ret; -} - -static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai, - int type, u32 *mc, u32 *mv) -{ - struct mlx5_flow_destination dest; - u8 match_criteria_enable = 0; - struct mlx5_flow_rule **rule_p; - struct mlx5_flow_table *ft = priv->fts.main.t; - u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, - outer_headers.dmac_47_16); - u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv, - outer_headers.dmac_47_16); - u32 *tirn = priv->tirn; - u32 tt_vec; - int err = 0; - - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - - switch (type) { - case MLX5E_FULLMATCH: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - eth_broadcast_addr(mc_dmac); - ether_addr_copy(mv_dmac, ai->addr); - break; - - case MLX5E_ALLMULTI: - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - mc_dmac[0] = 0x01; - mv_dmac[0] = 0x01; - break; - - case MLX5E_PROMISC: - break; - } - - tt_vec = mlx5e_get_tt_vec(ai, type); - - if (tt_vec & BIT(MLX5E_TT_ANY)) { - rule_p = &ai->ft_rule[MLX5E_TT_ANY]; - dest.tir_num = tirn[MLX5E_TT_ANY]; - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_ANY); - } - - match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - - if (tt_vec & BIT(MLX5E_TT_IPV4)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4]; - dest.tir_num = tirn[MLX5E_TT_IPV4]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6]; - dest.tir_num = tirn[MLX5E_TT_IPV6]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV6); - } - - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP); - - if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP]; - dest.tir_num = tirn[MLX5E_TT_IPV4_UDP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP]; - dest.tir_num = tirn[MLX5E_TT_IPV6_UDP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP); - } - - MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP); - - if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP]; - dest.tir_num = tirn[MLX5E_TT_IPV4_TCP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP]; - dest.tir_num = tirn[MLX5E_TT_IPV6_TCP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - - ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP); - } - - MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH); - - if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]; - dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]; - dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH); - } - - MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP); - - if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]; - dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IP); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP); - } - - if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) { - rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]; - dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP]; - MLX5_SET(fte_match_param, mv, outer_headers.ethertype, - ETH_P_IPV6); - *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - MLX5_FS_DEFAULT_FLOW_TAG, &dest); - if (IS_ERR_OR_NULL(*rule_p)) - goto err_del_ai; - ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP); - } - - return 0; - -err_del_ai: - err = PTR_ERR(*rule_p); - *rule_p = NULL; - mlx5e_del_eth_addr_from_flow_table(priv, ai); - - return err; -} - -static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_info *ai, int type) -{ - u32 *match_criteria; - u32 *match_value; - int err = 0; - - match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); - if (!match_value || !match_criteria) { - netdev_err(priv->netdev, "%s: alloc failed\n", __func__); - err = -ENOMEM; - goto add_eth_addr_rule_out; - } - - err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria, - match_value); - -add_eth_addr_rule_out: - kvfree(match_criteria); - kvfree(match_value); - - return err; -} - static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) { struct net_device *ndev = priv->netdev; @@ -472,7 +116,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) int i; list_size = 0; - for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) + for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) list_size++; max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); @@ -489,7 +133,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) return -ENOMEM; i = 0; - for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) { + for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) { if (i >= list_size) break; vlans[i++] = vlan; @@ -514,28 +158,28 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, enum mlx5e_vlan_rule_type rule_type, u16 vid, u32 *mc, u32 *mv) { - struct mlx5_flow_table *ft = priv->fts.vlan.t; + struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_destination dest; u8 match_criteria_enable = 0; struct mlx5_flow_rule **rule_p; int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fts.main.t; + dest.ft = priv->fs.l2.ft.t; match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - rule_p = &priv->vlan.untagged_rule; + rule_p = &priv->fs.vlan.untagged_rule; break; case MLX5E_VLAN_RULE_TYPE_ANY_VID: - rule_p = &priv->vlan.any_vlan_rule; + rule_p = &priv->fs.vlan.any_vlan_rule; MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ - rule_p = &priv->vlan.active_vlans_rule[vid]; + rule_p = &priv->fs.vlan.active_vlans_rule[vid]; MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); @@ -589,22 +233,22 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, { switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - if (priv->vlan.untagged_rule) { - mlx5_del_flow_rule(priv->vlan.untagged_rule); - priv->vlan.untagged_rule = NULL; + if (priv->fs.vlan.untagged_rule) { + mlx5_del_flow_rule(priv->fs.vlan.untagged_rule); + priv->fs.vlan.untagged_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_ANY_VID: - if (priv->vlan.any_vlan_rule) { - mlx5_del_flow_rule(priv->vlan.any_vlan_rule); - priv->vlan.any_vlan_rule = NULL; + if (priv->fs.vlan.any_vlan_rule) { + mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule); + priv->fs.vlan.any_vlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_VID: mlx5e_vport_context_update_vlans(priv); - if (priv->vlan.active_vlans_rule[vid]) { - mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]); - priv->vlan.active_vlans_rule[vid] = NULL; + if (priv->fs.vlan.active_vlans_rule[vid]) { + mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]); + priv->fs.vlan.active_vlans_rule[vid] = NULL; } mlx5e_vport_context_update_vlans(priv); break; @@ -613,10 +257,10 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) { - if (!priv->vlan.filter_disabled) + if (!priv->fs.vlan.filter_disabled) return; - priv->vlan.filter_disabled = false; + priv->fs.vlan.filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); @@ -624,10 +268,10 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) { - if (priv->vlan.filter_disabled) + if (priv->fs.vlan.filter_disabled) return; - priv->vlan.filter_disabled = true; + priv->fs.vlan.filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); @@ -638,7 +282,7 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, { struct mlx5e_priv *priv = netdev_priv(dev); - set_bit(vid, priv->vlan.active_vlans); + set_bit(vid, priv->fs.vlan.active_vlans); return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); } @@ -648,7 +292,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, { struct mlx5e_priv *priv = netdev_priv(dev); - clear_bit(vid, priv->vlan.active_vlans); + clear_bit(vid, priv->fs.vlan.active_vlans); mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); @@ -656,21 +300,21 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, } #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ - for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \ + for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) -static void mlx5e_execute_action(struct mlx5e_priv *priv, - struct mlx5e_eth_addr_hash_node *hn) +static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, + struct mlx5e_l2_hash_node *hn) { switch (hn->action) { case MLX5E_ACTION_ADD: - mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH); + mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); hn->action = MLX5E_ACTION_NONE; break; case MLX5E_ACTION_DEL: - mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai); - mlx5e_del_eth_addr_from_hash(hn); + mlx5e_del_l2_flow_rule(priv, &hn->ai); + mlx5e_del_l2_from_hash(hn); break; } } @@ -682,14 +326,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) netif_addr_lock_bh(netdev); - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, - priv->netdev->dev_addr); + mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, + priv->netdev->dev_addr); netdev_for_each_uc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr); + mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr); netdev_for_each_mc_addr(ha, netdev) - mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr); + mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr); netif_addr_unlock_bh(netdev); } @@ -699,17 +343,17 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, { bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); struct net_device *ndev = priv->netdev; - struct mlx5e_eth_addr_hash_node *hn; + struct mlx5e_l2_hash_node *hn; struct hlist_head *addr_list; struct hlist_node *tmp; int i = 0; int hi; - addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; + addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; if (is_uc) /* Make sure our own address is pushed first */ ether_addr_copy(addr_array[i++], ndev->dev_addr); - else if (priv->eth_addr.broadcast_enabled) + else if (priv->fs.l2.broadcast_enabled) ether_addr_copy(addr_array[i++], ndev->broadcast); mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { @@ -725,7 +369,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, int list_type) { bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); - struct mlx5e_eth_addr_hash_node *hn; + struct mlx5e_l2_hash_node *hn; u8 (*addr_array)[ETH_ALEN] = NULL; struct hlist_head *addr_list; struct hlist_node *tmp; @@ -734,12 +378,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, int err; int hi; - size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0); + size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0); max_size = is_uc ? 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); - addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc; + addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) size++; @@ -770,7 +414,7 @@ out: static void mlx5e_vport_context_update(struct mlx5e_priv *priv) { - struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct mlx5e_l2_table *ea = &priv->fs.l2; mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); @@ -781,26 +425,26 @@ static void mlx5e_vport_context_update(struct mlx5e_priv *priv) static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) { - struct mlx5e_eth_addr_hash_node *hn; + struct mlx5e_l2_hash_node *hn; struct hlist_node *tmp; int i; - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) - mlx5e_execute_action(priv, hn); + mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) + mlx5e_execute_l2_action(priv, hn); - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) - mlx5e_execute_action(priv, hn); + mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) + mlx5e_execute_l2_action(priv, hn); } static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) { - struct mlx5e_eth_addr_hash_node *hn; + struct mlx5e_l2_hash_node *hn; struct hlist_node *tmp; int i; - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i) + mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) hn->action = MLX5E_ACTION_DEL; - mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i) + mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) hn->action = MLX5E_ACTION_DEL; if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) @@ -814,7 +458,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, set_rx_mode_work); - struct mlx5e_eth_addr_db *ea = &priv->eth_addr; + struct mlx5e_l2_table *ea = &priv->fs.l2; struct net_device *ndev = priv->netdev; bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); @@ -830,27 +474,27 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; if (enable_promisc) { - mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC); - if (!priv->vlan.filter_disabled) + mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); + if (!priv->fs.vlan.filter_disabled) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); } if (enable_allmulti) - mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); + mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); if (enable_broadcast) - mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); + mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); mlx5e_handle_netdev_addr(priv); if (disable_broadcast) - mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast); + mlx5e_del_l2_flow_rule(priv, &ea->broadcast); if (disable_allmulti) - mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti); + mlx5e_del_l2_flow_rule(priv, &ea->allmulti); if (disable_promisc) { - if (!priv->vlan.filter_disabled) + if (!priv->fs.vlan.filter_disabled) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0); - mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc); + mlx5e_del_l2_flow_rule(priv, &ea->promisc); } ea->promisc_enabled = promisc_enabled; @@ -872,224 +516,454 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) ft->num_groups = 0; } -void mlx5e_init_eth_addr(struct mlx5e_priv *priv) +void mlx5e_init_l2_addr(struct mlx5e_priv *priv) { - ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast); + ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast); } -#define MLX5E_MAIN_GROUP0_SIZE BIT(3) -#define MLX5E_MAIN_GROUP1_SIZE BIT(1) -#define MLX5E_MAIN_GROUP2_SIZE BIT(0) -#define MLX5E_MAIN_GROUP3_SIZE BIT(14) -#define MLX5E_MAIN_GROUP4_SIZE BIT(13) -#define MLX5E_MAIN_GROUP5_SIZE BIT(11) -#define MLX5E_MAIN_GROUP6_SIZE BIT(2) -#define MLX5E_MAIN_GROUP7_SIZE BIT(1) -#define MLX5E_MAIN_GROUP8_SIZE BIT(0) -#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\ - MLX5E_MAIN_GROUP1_SIZE +\ - MLX5E_MAIN_GROUP2_SIZE +\ - MLX5E_MAIN_GROUP3_SIZE +\ - MLX5E_MAIN_GROUP4_SIZE +\ - MLX5E_MAIN_GROUP5_SIZE +\ - MLX5E_MAIN_GROUP6_SIZE +\ - MLX5E_MAIN_GROUP7_SIZE +\ - MLX5E_MAIN_GROUP8_SIZE) - -static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in, - int inlen) +void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) { - u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in, - match_criteria.outer_headers.dmac_47_16); + mlx5e_destroy_groups(ft); + kfree(ft->g); + mlx5_destroy_flow_table(ft->t); + ft->t = NULL; +} + +static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) +{ + int i; + + for (i = 0; i < MLX5E_NUM_TT; i++) { + if (!IS_ERR_OR_NULL(ttc->rules[i])) { + mlx5_del_flow_rule(ttc->rules[i]); + ttc->rules[i] = NULL; + } + } +} + +static struct { + u16 etype; + u8 proto; +} ttc_rules[] = { + [MLX5E_TT_IPV4_TCP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_TCP, + }, + [MLX5E_TT_IPV6_TCP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_TCP, + }, + [MLX5E_TT_IPV4_UDP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_UDP, + }, + [MLX5E_TT_IPV6_UDP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_UDP, + }, + [MLX5E_TT_IPV4_IPSEC_AH] = { + .etype = ETH_P_IP, + .proto = IPPROTO_AH, + }, + [MLX5E_TT_IPV6_IPSEC_AH] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_AH, + }, + [MLX5E_TT_IPV4_IPSEC_ESP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_ESP, + }, + [MLX5E_TT_IPV6_IPSEC_ESP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_ESP, + }, + [MLX5E_TT_IPV4] = { + .etype = ETH_P_IP, + .proto = 0, + }, + [MLX5E_TT_IPV6] = { + .etype = ETH_P_IPV6, + .proto = 0, + }, + [MLX5E_TT_ANY] = { + .etype = 0, + .proto = 0, + }, +}; + +static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, + struct mlx5_flow_table *ft, + struct mlx5_flow_destination *dest, + u16 etype, + u8 proto) +{ + struct mlx5_flow_rule *rule; + u8 match_criteria_enable = 0; + u32 *match_criteria; + u32 *match_value; + int err = 0; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto out; + } + + if (proto) { + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto); + } + if (etype) { + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype); + } + + rule = mlx5_add_flow_rule(ft, match_criteria_enable, + match_criteria, match_value, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, + dest); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + netdev_err(priv->netdev, "%s: add rule failed\n", __func__); + } +out: + kvfree(match_criteria); + kvfree(match_value); + return err ? ERR_PTR(err) : rule; +} + +static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) +{ + struct mlx5_flow_destination dest; + struct mlx5e_ttc_table *ttc; + struct mlx5_flow_rule **rules; + struct mlx5_flow_table *ft; + int tt; int err; + + ttc = &priv->fs.ttc; + ft = ttc->ft.t; + rules = ttc->rules; + + dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; + for (tt = 0; tt < MLX5E_NUM_TT; tt++) { + if (tt == MLX5E_TT_ANY) + dest.tir_num = priv->direct_tir[0].tirn; + else + dest.tir_num = priv->indir_tirn[tt]; + rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rules[tt])) + goto del_rules; + } + + return 0; + +del_rules: + err = PTR_ERR(rules[tt]); + rules[tt] = NULL; + mlx5e_cleanup_ttc_rules(ttc); + return err; +} + +#define MLX5E_TTC_NUM_GROUPS 3 +#define MLX5E_TTC_GROUP1_SIZE BIT(3) +#define MLX5E_TTC_GROUP2_SIZE BIT(1) +#define MLX5E_TTC_GROUP3_SIZE BIT(0) +#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ + MLX5E_TTC_GROUP2_SIZE +\ + MLX5E_TTC_GROUP3_SIZE) +static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5e_flow_table *ft = &ttc->ft; int ix = 0; + u32 *in; + int err; + u8 *mc; - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP0_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; + ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS, + sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + in = mlx5_vzalloc(inlen); + if (!in) { + kfree(ft->g); + return -ENOMEM; + } - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + /* L4 Group */ + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP1_SIZE; + ix += MLX5E_TTC_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; + goto err; ft->num_groups++; - memset(in, 0, inlen); + /* L3 Group */ + MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP2_SIZE; + ix += MLX5E_TTC_GROUP2_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; + goto err; ft->num_groups++; + /* Any Group */ memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - eth_broadcast_addr(dmac); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP3_SIZE; + ix += MLX5E_TTC_GROUP3_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; + goto err; ft->num_groups++; - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - eth_broadcast_addr(dmac); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP4_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; + kvfree(in); + return 0; - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - eth_broadcast_addr(dmac); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP5_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err_destroy_groups; - ft->num_groups++; +err: + err = PTR_ERR(ft->g[ft->num_groups]); + ft->g[ft->num_groups] = NULL; + kvfree(in); - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - dmac[0] = 0x01; + return err; +} + +static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) +{ + struct mlx5e_ttc_table *ttc = &priv->fs.ttc; + + mlx5e_cleanup_ttc_rules(ttc); + mlx5e_destroy_flow_table(&ttc->ft); +} + +static int mlx5e_create_ttc_table(struct mlx5e_priv *priv) +{ + struct mlx5e_ttc_table *ttc = &priv->fs.ttc; + struct mlx5e_flow_table *ft = &ttc->ft; + int err; + + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL); + if (IS_ERR(ft->t)) { + err = PTR_ERR(ft->t); + ft->t = NULL; + return err; + } + + err = mlx5e_create_ttc_table_groups(ttc); + if (err) + goto err; + + err = mlx5e_generate_ttc_table_rules(priv); + if (err) + goto err; + + return 0; +err: + mlx5e_destroy_flow_table(ft); + return err; +} + +static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_l2_rule *ai) +{ + if (!IS_ERR_OR_NULL(ai->rule)) { + mlx5_del_flow_rule(ai->rule); + ai->rule = NULL; + } +} + +static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_l2_rule *ai, int type) +{ + struct mlx5_flow_table *ft = priv->fs.l2.ft.t; + struct mlx5_flow_destination dest; + u8 match_criteria_enable = 0; + u32 *match_criteria; + u32 *match_value; + int err = 0; + u8 *mc_dmac; + u8 *mv_dmac; + + match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); + if (!match_value || !match_criteria) { + netdev_err(priv->netdev, "%s: alloc failed\n", __func__); + err = -ENOMEM; + goto add_l2_rule_out; + } + + mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, + outer_headers.dmac_47_16); + mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value, + outer_headers.dmac_47_16); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fs.ttc.ft.t; + + switch (type) { + case MLX5E_FULLMATCH: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + eth_broadcast_addr(mc_dmac); + ether_addr_copy(mv_dmac, ai->addr); + break; + + case MLX5E_ALLMULTI: + match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + mc_dmac[0] = 0x01; + mv_dmac[0] = 0x01; + break; + + case MLX5E_PROMISC: + break; + } + + ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, + match_value, + MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, + MLX5_FS_DEFAULT_FLOW_TAG, &dest); + if (IS_ERR(ai->rule)) { + netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", + __func__, mv_dmac); + err = PTR_ERR(ai->rule); + ai->rule = NULL; + } + +add_l2_rule_out: + kvfree(match_criteria); + kvfree(match_value); + + return err; +} + +#define MLX5E_NUM_L2_GROUPS 3 +#define MLX5E_L2_GROUP1_SIZE BIT(0) +#define MLX5E_L2_GROUP2_SIZE BIT(15) +#define MLX5E_L2_GROUP3_SIZE BIT(0) +#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\ + MLX5E_L2_GROUP2_SIZE +\ + MLX5E_L2_GROUP3_SIZE) +static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5e_flow_table *ft = &l2_table->ft; + int ix = 0; + u8 *mc_dmac; + u32 *in; + int err; + u8 *mc; + + ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL); + if (!ft->g) + return -ENOMEM; + in = mlx5_vzalloc(inlen); + if (!in) { + kfree(ft->g); + return -ENOMEM; + } + + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + mc_dmac = MLX5_ADDR_OF(fte_match_param, mc, + outer_headers.dmac_47_16); + /* Flow Group for promiscuous */ MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP6_SIZE; + ix += MLX5E_L2_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) goto err_destroy_groups; ft->num_groups++; - memset(in, 0, inlen); + /* Flow Group for full match */ + eth_broadcast_addr(mc_dmac); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - dmac[0] = 0x01; MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP7_SIZE; + ix += MLX5E_L2_GROUP2_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) goto err_destroy_groups; ft->num_groups++; - memset(in, 0, inlen); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - dmac[0] = 0x01; + /* Flow Group for allmulti */ + eth_zero_addr(mc_dmac); + mc_dmac[0] = 0x01; MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_MAIN_GROUP8_SIZE; + ix += MLX5E_L2_GROUP3_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); if (IS_ERR(ft->g[ft->num_groups])) goto err_destroy_groups; ft->num_groups++; + kvfree(in); return 0; err_destroy_groups: err = PTR_ERR(ft->g[ft->num_groups]); ft->g[ft->num_groups] = NULL; mlx5e_destroy_groups(ft); + kvfree(in); return err; } -static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft) +static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv) { - u32 *in; - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - int err; - - in = mlx5_vzalloc(inlen); - if (!in) - return -ENOMEM; - - err = __mlx5e_create_main_groups(ft, in, inlen); - - kvfree(in); - return err; + mlx5e_destroy_flow_table(&priv->fs.l2.ft); } -static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) +static int mlx5e_create_l2_table(struct mlx5e_priv *priv) { - struct mlx5e_flow_table *ft = &priv->fts.main; + struct mlx5e_l2_table *l2_table = &priv->fs.l2; + struct mlx5e_flow_table *ft = &l2_table->ft; int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE); + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; return err; } - ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) { - err = -ENOMEM; - goto err_destroy_main_flow_table; - } - err = mlx5e_create_main_groups(ft); + err = mlx5e_create_l2_table_groups(l2_table); if (err) - goto err_free_g; - return 0; + goto err_destroy_flow_table; -err_free_g: - kfree(ft->g); + return 0; -err_destroy_main_flow_table: +err_destroy_flow_table: mlx5_destroy_flow_table(ft->t); ft->t = NULL; return err; } -static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) -{ - mlx5e_destroy_groups(ft); - kfree(ft->g); - mlx5_destroy_flow_table(ft->t); - ft->t = NULL; -} - -static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv) -{ - mlx5e_destroy_flow_table(&priv->fts.main); -} - #define MLX5E_NUM_VLAN_GROUPS 2 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) #define MLX5E_VLAN_GROUP1_SIZE BIT(1) #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ MLX5E_VLAN_GROUP1_SIZE) -static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in, - int inlen) +static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, + int inlen) { int err; int ix = 0; @@ -1128,7 +1002,7 @@ err_destroy_groups: return err; } -static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) +static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft) { u32 *in; int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -1138,19 +1012,20 @@ static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft) if (!in) return -ENOMEM; - err = __mlx5e_create_vlan_groups(ft, in, inlen); + err = __mlx5e_create_vlan_table_groups(ft, in, inlen); kvfree(in); return err; } -static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) +static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) { - struct mlx5e_flow_table *ft = &priv->fts.vlan; + struct mlx5e_flow_table *ft = &priv->fs.vlan.ft; int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE); + ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO, + MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1160,65 +1035,90 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL); if (!ft->g) { err = -ENOMEM; - goto err_destroy_vlan_flow_table; + goto err_destroy_vlan_table; } - err = mlx5e_create_vlan_groups(ft); + err = mlx5e_create_vlan_table_groups(ft); if (err) goto err_free_g; + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + if (err) + goto err_destroy_vlan_flow_groups; + return 0; +err_destroy_vlan_flow_groups: + mlx5e_destroy_groups(ft); err_free_g: kfree(ft->g); - -err_destroy_vlan_flow_table: +err_destroy_vlan_table: mlx5_destroy_flow_table(ft->t); ft->t = NULL; return err; } -static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv) +static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) { - mlx5e_destroy_flow_table(&priv->fts.vlan); + mlx5e_destroy_flow_table(&priv->fs.vlan.ft); } -int mlx5e_create_flow_tables(struct mlx5e_priv *priv) +int mlx5e_create_flow_steering(struct mlx5e_priv *priv) { int err; - priv->fts.ns = mlx5_get_flow_namespace(priv->mdev, + priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); - if (!priv->fts.ns) + if (!priv->fs.ns) return -EINVAL; - err = mlx5e_create_vlan_flow_table(priv); - if (err) - return err; + err = mlx5e_arfs_create_tables(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", + err); + priv->netdev->hw_features &= ~NETIF_F_NTUPLE; + } - err = mlx5e_create_main_flow_table(priv); - if (err) - goto err_destroy_vlan_flow_table; + err = mlx5e_create_ttc_table(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", + err); + goto err_destroy_arfs_tables; + } - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - if (err) - goto err_destroy_main_flow_table; + err = mlx5e_create_l2_table(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n", + err); + goto err_destroy_ttc_table; + } + + err = mlx5e_create_vlan_table(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n", + err); + goto err_destroy_l2_table; + } return 0; -err_destroy_main_flow_table: - mlx5e_destroy_main_flow_table(priv); -err_destroy_vlan_flow_table: - mlx5e_destroy_vlan_flow_table(priv); +err_destroy_l2_table: + mlx5e_destroy_l2_table(priv); +err_destroy_ttc_table: + mlx5e_destroy_ttc_table(priv); +err_destroy_arfs_tables: + mlx5e_arfs_destroy_tables(priv); return err; } -void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv) +void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - mlx5e_destroy_main_flow_table(priv); - mlx5e_destroy_vlan_flow_table(priv); + mlx5e_destroy_vlan_table(priv); + mlx5e_destroy_l2_table(priv); + mlx5e_destroy_ttc_table(priv); + mlx5e_arfs_destroy_tables(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e0adb604f461..fd4392999eee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -48,6 +48,7 @@ struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; u16 max_inline; + bool icosq; }; struct mlx5e_cq_param { @@ -59,8 +60,10 @@ struct mlx5e_cq_param { struct mlx5e_channel_param { struct mlx5e_rq_param rq; struct mlx5e_sq_param sq; + struct mlx5e_sq_param icosq; struct mlx5e_cq_param rx_cq; struct mlx5e_cq_param tx_cq; + struct mlx5e_cq_param icosq_cq; }; static void mlx5e_update_carrier(struct mlx5e_priv *priv) @@ -88,82 +91,15 @@ static void mlx5e_update_carrier_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } -static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) -{ - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_pport_stats *s = &priv->stats.pport; - u32 *in; - u32 *out; - int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - - in = mlx5_vzalloc(sz); - out = mlx5_vzalloc(sz); - if (!in || !out) - goto free_out; - - MLX5_SET(ppcnt_reg, in, local_port, 1); - - MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, - sz, MLX5_REG_PPCNT, 0, 0); - memcpy(s->IEEE_802_3_counters, - MLX5_ADDR_OF(ppcnt_reg, out, counter_set), - sizeof(s->IEEE_802_3_counters)); - - MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, - sz, MLX5_REG_PPCNT, 0, 0); - memcpy(s->RFC_2863_counters, - MLX5_ADDR_OF(ppcnt_reg, out, counter_set), - sizeof(s->RFC_2863_counters)); - - MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); - mlx5_core_access_reg(mdev, in, sz, out, - sz, MLX5_REG_PPCNT, 0, 0); - memcpy(s->RFC_2819_counters, - MLX5_ADDR_OF(ppcnt_reg, out, counter_set), - sizeof(s->RFC_2819_counters)); - -free_out: - kvfree(in); - kvfree(out); -} - -void mlx5e_update_stats(struct mlx5e_priv *priv) +static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5e_vport_stats *s = &priv->stats.vport; + struct mlx5e_sw_stats *s = &priv->stats.sw; struct mlx5e_rq_stats *rq_stats; struct mlx5e_sq_stats *sq_stats; - u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; - u32 *out; - int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); - u64 tx_offload_none; + u64 tx_offload_none = 0; int i, j; - out = mlx5_vzalloc(outlen); - if (!out) - return; - - /* Collect firts the SW counters and then HW for consistency */ - s->rx_packets = 0; - s->rx_bytes = 0; - s->tx_packets = 0; - s->tx_bytes = 0; - s->tso_packets = 0; - s->tso_bytes = 0; - s->tso_inner_packets = 0; - s->tso_inner_bytes = 0; - s->tx_queue_stopped = 0; - s->tx_queue_wake = 0; - s->tx_queue_dropped = 0; - s->tx_csum_inner = 0; - tx_offload_none = 0; - s->lro_packets = 0; - s->lro_bytes = 0; - s->rx_csum_none = 0; - s->rx_csum_sw = 0; - s->rx_wqe_err = 0; + memset(s, 0, sizeof(*s)); for (i = 0; i < priv->params.num_channels; i++) { rq_stats = &priv->channel[i]->rq.stats; @@ -173,7 +109,13 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->lro_bytes += rq_stats->lro_bytes; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_sw += rq_stats->csum_sw; + s->rx_csum_inner += rq_stats->csum_inner; s->rx_wqe_err += rq_stats->wqe_err; + s->rx_mpwqe_filler += rq_stats->mpwqe_filler; + s->rx_mpwqe_frag += rq_stats->mpwqe_frag; + s->rx_buff_alloc_err += rq_stats->buff_alloc_err; + s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; + s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; for (j = 0; j < priv->params.num_tc; j++) { sq_stats = &priv->channel[i]->sq[j].stats; @@ -192,7 +134,23 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) } } - /* HW counters */ + /* Update calculated offload counters */ + s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; + s->rx_csum_good = s->rx_packets - s->rx_csum_none - + s->rx_csum_sw; + + s->link_down_events = MLX5_GET(ppcnt_reg, + priv->stats.pport.phy_counters, + counter_set.phys_layer_cntrs.link_down_events); +} + +static void mlx5e_update_vport_counters(struct mlx5e_priv *priv) +{ + int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out); + u32 *out = (u32 *)priv->stats.vport.query_vport_out; + u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)]; + struct mlx5_core_dev *mdev = priv->mdev; + memset(in, 0, sizeof(in)); MLX5_SET(query_vport_counter_in, in, opcode, @@ -202,56 +160,69 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) memset(out, 0, outlen); - if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen)) + mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen); +} + +static void mlx5e_update_pport_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct mlx5_core_dev *mdev = priv->mdev; + int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); + int prio; + void *out; + u32 *in; + + in = mlx5_vzalloc(sz); + if (!in) goto free_out; -#define MLX5_GET_CTR(p, x) \ - MLX5_GET64(query_vport_counter_out, p, x) - - s->rx_error_packets = - MLX5_GET_CTR(out, received_errors.packets); - s->rx_error_bytes = - MLX5_GET_CTR(out, received_errors.octets); - s->tx_error_packets = - MLX5_GET_CTR(out, transmit_errors.packets); - s->tx_error_bytes = - MLX5_GET_CTR(out, transmit_errors.octets); - - s->rx_unicast_packets = - MLX5_GET_CTR(out, received_eth_unicast.packets); - s->rx_unicast_bytes = - MLX5_GET_CTR(out, received_eth_unicast.octets); - s->tx_unicast_packets = - MLX5_GET_CTR(out, transmitted_eth_unicast.packets); - s->tx_unicast_bytes = - MLX5_GET_CTR(out, transmitted_eth_unicast.octets); - - s->rx_multicast_packets = - MLX5_GET_CTR(out, received_eth_multicast.packets); - s->rx_multicast_bytes = - MLX5_GET_CTR(out, received_eth_multicast.octets); - s->tx_multicast_packets = - MLX5_GET_CTR(out, transmitted_eth_multicast.packets); - s->tx_multicast_bytes = - MLX5_GET_CTR(out, transmitted_eth_multicast.octets); - - s->rx_broadcast_packets = - MLX5_GET_CTR(out, received_eth_broadcast.packets); - s->rx_broadcast_bytes = - MLX5_GET_CTR(out, received_eth_broadcast.octets); - s->tx_broadcast_packets = - MLX5_GET_CTR(out, transmitted_eth_broadcast.packets); - s->tx_broadcast_bytes = - MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); + MLX5_SET(ppcnt_reg, in, local_port, 1); - /* Update calculated offload counters */ - s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; - s->rx_csum_good = s->rx_packets - s->rx_csum_none - - s->rx_csum_sw; + out = pstats->IEEE_802_3_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + + out = pstats->RFC_2863_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + + out = pstats->RFC_2819_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + + out = pstats->phy_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + out = pstats->per_prio_counters[prio]; + MLX5_SET(ppcnt_reg, in, prio_tc, prio); + mlx5_core_access_reg(mdev, in, sz, out, sz, + MLX5_REG_PPCNT, 0, 0); + } - mlx5e_update_pport_counters(priv); free_out: - kvfree(out); + kvfree(in); +} + +static void mlx5e_update_q_counter(struct mlx5e_priv *priv) +{ + struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt; + + if (!priv->q_counter) + return; + + mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter, + &qcnt->rx_out_of_buffer); +} + +void mlx5e_update_stats(struct mlx5e_priv *priv) +{ + mlx5e_update_q_counter(priv); + mlx5e_update_vport_counters(priv); + mlx5e_update_pport_counters(priv); + mlx5e_update_sw_counters(priv); } static void mlx5e_update_stats_work(struct work_struct *work) @@ -262,9 +233,8 @@ static void mlx5e_update_stats_work(struct work_struct *work) mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { mlx5e_update_stats(priv); - schedule_delayed_work(dwork, - msecs_to_jiffies( - MLX5E_UPDATE_STATS_INTERVAL)); + queue_delayed_work(priv->wq, dwork, + msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL)); } mutex_unlock(&priv->state_lock); } @@ -280,7 +250,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, switch (event) { case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: - schedule_work(&priv->update_carrier_work); + queue_work(priv->wq, &priv->update_carrier_work); break; default: @@ -310,6 +280,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = priv->mdev; void *rqc = param->rqc; void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); + u32 byte_count; int wq_sz; int err; int i; @@ -324,32 +295,56 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; wq_sz = mlx5_wq_ll_get_size(&rq->wq); - rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, - cpu_to_node(c->cpu)); - if (!rq->skb) { - err = -ENOMEM; - goto err_rq_wq_destroy; - } - rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : - MLX5E_SW2HW_MTU(priv->netdev->mtu); - rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN); + switch (priv->params.rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info), + GFP_KERNEL, cpu_to_node(c->cpu)); + if (!rq->wqe_info) { + err = -ENOMEM; + goto err_rq_wq_destroy; + } + rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq; + rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; + + rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz); + rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides); + rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; + byte_count = rq->wqe_sz; + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!rq->skb) { + err = -ENOMEM; + goto err_rq_wq_destroy; + } + rq->handle_rx_cqe = mlx5e_handle_rx_cqe; + rq->alloc_wqe = mlx5e_alloc_rx_wqe; + + rq->wqe_sz = (priv->params.lro_en) ? + priv->params.lro_wqe_sz : + MLX5E_SW2HW_MTU(priv->netdev->mtu); + rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz); + byte_count = rq->wqe_sz; + byte_count |= MLX5_HW_START_PADDING; + } for (i = 0; i < wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); - u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN; - wqe->data.lkey = c->mkey_be; - wqe->data.byte_count = - cpu_to_be32(byte_count | MLX5_HW_START_PADDING); + wqe->data.byte_count = cpu_to_be32(byte_count); } + rq->wq_type = priv->params.rq_wq_type; rq->pdev = c->pdev; rq->netdev = c->netdev; rq->tstamp = &priv->tstamp; rq->channel = c; rq->ix = c->ix; rq->priv = c->priv; + rq->mkey_be = c->mkey_be; + rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key); return 0; @@ -361,7 +356,14 @@ err_rq_wq_destroy: static void mlx5e_destroy_rq(struct mlx5e_rq *rq) { - kfree(rq->skb); + switch (rq->wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + kfree(rq->wqe_info); + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + kfree(rq->skb); + } + mlx5_wq_destroy(&rq->wq_ctrl); } @@ -390,6 +392,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); MLX5_SET(rqc, rqc, flush_in_error_en, 1); + MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable); MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); @@ -404,7 +407,8 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) return err; } -static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) +static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, + int next_state) { struct mlx5e_channel *c = rq->channel; struct mlx5e_priv *priv = c->priv; @@ -432,6 +436,36 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state) return err; } +static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) +{ + struct mlx5e_channel *c = rq->channel; + struct mlx5e_priv *priv = c->priv; + struct mlx5_core_dev *mdev = priv->mdev; + + void *in; + void *rqc; + int inlen; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_rq_in); + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); + + MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); + MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD); + MLX5_SET(rqc, rqc, vsd, vsd); + MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); + + err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen); + + kvfree(in); + + return err; +} + static void mlx5e_disable_rq(struct mlx5e_rq *rq) { mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn); @@ -458,6 +492,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_rq_param *param, struct mlx5e_rq *rq) { + struct mlx5e_sq *sq = &c->icosq; + u16 pi = sq->pc & sq->wq.sz_m1; int err; err = mlx5e_create_rq(c, param, rq); @@ -468,12 +504,15 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, if (err) goto err_destroy_rq; - err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); + err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); if (err) goto err_disable_rq; set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); - mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */ + + sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; + sq->ico_wqe_info[pi].num_wqebbs = 1; + mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */ return 0; @@ -490,7 +529,7 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ - mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); + mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR); while (!mlx5_wq_ll_is_empty(&rq->wq)) msleep(20); @@ -539,7 +578,6 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, void *sqc = param->sqc; void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); - int txq_ix; int err; err = mlx5_alloc_map_uar(mdev, &sq->uar, true); @@ -567,8 +605,24 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, if (err) goto err_sq_wq_destroy; - txq_ix = c->ix + tc * priv->params.num_channels; - sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); + if (param->icosq) { + u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + + sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) * + wq_sz, + GFP_KERNEL, + cpu_to_node(c->cpu)); + if (!sq->ico_wqe_info) { + err = -ENOMEM; + goto err_free_sq_db; + } + } else { + int txq_ix; + + txq_ix = c->ix + tc * priv->params.num_channels; + sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix); + priv->txq_to_sq_map[txq_ix] = sq; + } sq->pdev = c->pdev; sq->tstamp = &priv->tstamp; @@ -577,10 +631,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, sq->tc = tc; sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; sq->bf_budget = MLX5E_SQ_BF_BUDGET; - priv->txq_to_sq_map[txq_ix] = sq; return 0; +err_free_sq_db: + mlx5e_free_sq_db(sq); + err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); @@ -595,6 +651,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq) struct mlx5e_channel *c = sq->channel; struct mlx5e_priv *priv = c->priv; + kfree(sq->ico_wqe_info); mlx5e_free_sq_db(sq); mlx5_wq_destroy(&sq->wq_ctrl); mlx5_unmap_free_uar(priv->mdev, &sq->uar); @@ -623,10 +680,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param) memcpy(sqc, param->sqc, sizeof(param->sqc)); - MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]); - MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn); + MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]); + MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); - MLX5_SET(sqc, sqc, tis_lst_sz, 1); + MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1); MLX5_SET(sqc, sqc, flush_in_error_en, 1); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); @@ -701,9 +758,11 @@ static int mlx5e_open_sq(struct mlx5e_channel *c, if (err) goto err_disable_sq; - set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); - netdev_tx_reset_queue(sq->txq); - netif_tx_start_queue(sq->txq); + if (sq->txq) { + set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + netdev_tx_reset_queue(sq->txq); + netif_tx_start_queue(sq->txq); + } return 0; @@ -724,15 +783,19 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_close_sq(struct mlx5e_sq *sq) { - clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); - napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */ - netif_tx_disable_queue(sq->txq); + if (sq->txq) { + clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state); + /* prevent netif_tx_wake_queue */ + napi_synchronize(&sq->channel->napi); + netif_tx_disable_queue(sq->txq); - /* ensure hw is notified of all pending wqes */ - if (mlx5e_sq_has_room_for(sq, 1)) - mlx5e_send_nop(sq, true); + /* ensure hw is notified of all pending wqes */ + if (mlx5e_sq_has_room_for(sq, 1)) + mlx5e_send_nop(sq, true); + + mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); + } - mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); while (sq->cc != sq->pc) /* wait till sq is empty */ msleep(20); @@ -986,10 +1049,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); - err = mlx5e_open_tx_cqs(c, cparam); + err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0); if (err) goto err_napi_del; + err = mlx5e_open_tx_cqs(c, cparam); + if (err) + goto err_close_icosq_cq; + err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq, priv->params.rx_cq_moderation_usec, priv->params.rx_cq_moderation_pkts); @@ -998,10 +1065,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, napi_enable(&c->napi); - err = mlx5e_open_sqs(c, cparam); + err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); if (err) goto err_disable_napi; + err = mlx5e_open_sqs(c, cparam); + if (err) + goto err_close_icosq; + err = mlx5e_open_rq(c, &cparam->rq, &c->rq); if (err) goto err_close_sqs; @@ -1014,6 +1085,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, err_close_sqs: mlx5e_close_sqs(c); +err_close_icosq: + mlx5e_close_sq(&c->icosq); + err_disable_napi: napi_disable(&c->napi); mlx5e_close_cq(&c->rq.cq); @@ -1021,6 +1095,9 @@ err_disable_napi: err_close_tx_cqs: mlx5e_close_tx_cqs(c); +err_close_icosq_cq: + mlx5e_close_cq(&c->icosq.cq); + err_napi_del: netif_napi_del(&c->napi); napi_hash_del(&c->napi); @@ -1033,9 +1110,11 @@ static void mlx5e_close_channel(struct mlx5e_channel *c) { mlx5e_close_rq(&c->rq); mlx5e_close_sqs(c); + mlx5e_close_sq(&c->icosq); napi_disable(&c->napi); mlx5e_close_cq(&c->rq.cq); mlx5e_close_tx_cqs(c); + mlx5e_close_cq(&c->icosq.cq); netif_napi_del(&c->napi); napi_hash_del(&c->napi); @@ -1050,11 +1129,23 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, void *rqc = param->rqc; void *wq = MLX5_ADDR_OF(rqc, rqc, wq); - MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + switch (priv->params.rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + MLX5_SET(wq, wq, log_wqe_num_of_strides, + priv->params.mpwqe_log_num_strides - 9); + MLX5_SET(wq, wq, log_wqe_stride_size, + priv->params.mpwqe_log_stride_sz - 6); + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ); + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + } + MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); MLX5_SET(wq, wq, pd, priv->pdn); + MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); param->wq.linear = 1; @@ -1069,17 +1160,27 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); } -static void mlx5e_build_sq_param(struct mlx5e_priv *priv, - struct mlx5e_sq_param *param) +static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv, + struct mlx5e_sq_param *param) { void *sqc = param->sqc; void *wq = MLX5_ADDR_OF(sqc, sqc, wq); - MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); MLX5_SET(wq, wq, pd, priv->pdn); param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); +} + +static void mlx5e_build_sq_param(struct mlx5e_priv *priv, + struct mlx5e_sq_param *param) +{ + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + mlx5e_build_sq_param_common(priv, param); + MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); + param->max_inline = priv->params.tx_max_inline; } @@ -1095,8 +1196,22 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, struct mlx5e_cq_param *param) { void *cqc = param->cqc; + u8 log_cq_size; - MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size); + switch (priv->params.rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + log_cq_size = priv->params.log_rq_size + + priv->params.mpwqe_log_num_strides; + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + log_cq_size = priv->params.log_rq_size; + } + + MLX5_SET(cqc, cqc, log_cq_size, log_cq_size); + if (priv->params.rx_cqe_compress) { + MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM); + MLX5_SET(cqc, cqc, cqe_comp_en, 1); + } mlx5e_build_common_cq_param(priv, param); } @@ -1106,25 +1221,52 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, { void *cqc = param->cqc; - MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); + MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size); + + mlx5e_build_common_cq_param(priv, param); +} + +static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, + struct mlx5e_cq_param *param, + u8 log_wq_size) +{ + void *cqc = param->cqc; + + MLX5_SET(cqc, cqc, log_cq_size, log_wq_size); mlx5e_build_common_cq_param(priv, param); } -static void mlx5e_build_channel_param(struct mlx5e_priv *priv, - struct mlx5e_channel_param *cparam) +static void mlx5e_build_icosq_param(struct mlx5e_priv *priv, + struct mlx5e_sq_param *param, + u8 log_wq_size) { - memset(cparam, 0, sizeof(*cparam)); + void *sqc = param->sqc; + void *wq = MLX5_ADDR_OF(sqc, sqc, wq); + + mlx5e_build_sq_param_common(priv, param); + + MLX5_SET(wq, wq, log_wq_sz, log_wq_size); + MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq)); + + param->icosq = true; +} + +static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam) +{ + u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; mlx5e_build_rq_param(priv, &cparam->rq); mlx5e_build_sq_param(priv, &cparam->sq); + mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz); mlx5e_build_rx_cq_param(priv, &cparam->rx_cq); mlx5e_build_tx_cq_param(priv, &cparam->tx_cq); + mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz); } static int mlx5e_open_channels(struct mlx5e_priv *priv) { - struct mlx5e_channel_param cparam; + struct mlx5e_channel_param *cparam; int nch = priv->params.num_channels; int err = -ENOMEM; int i; @@ -1136,12 +1278,15 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv) priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc, sizeof(struct mlx5e_sq *), GFP_KERNEL); - if (!priv->channel || !priv->txq_to_sq_map) + cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); + + if (!priv->channel || !priv->txq_to_sq_map || !cparam) goto err_free_txq_to_sq_map; - mlx5e_build_channel_param(priv, &cparam); + mlx5e_build_channel_param(priv, cparam); + for (i = 0; i < nch; i++) { - err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]); + err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]); if (err) goto err_close_channels; } @@ -1152,6 +1297,7 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv) goto err_close_channels; } + kfree(cparam); return 0; err_close_channels: @@ -1161,6 +1307,7 @@ err_close_channels: err_free_txq_to_sq_map: kfree(priv->txq_to_sq_map); kfree(priv->channel); + kfree(cparam); return err; } @@ -1200,48 +1347,36 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc) for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { int ix = i; + u32 rqn; if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR) ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); ix = priv->params.indirection_rqt[ix]; - MLX5_SET(rqtc, rqtc, rq_num[i], - test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[ix]->rq.rqn : - priv->drop_rq.rqn); + rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[ix]->rq.rqn : + priv->drop_rq.rqn; + MLX5_SET(rqtc, rqtc, rq_num[i], rqn); } } -static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc, - enum mlx5e_rqt_ix rqt_ix) +static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc, + int ix) { + u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ? + priv->channel[ix]->rq.rqn : + priv->drop_rq.rqn; - switch (rqt_ix) { - case MLX5E_INDIRECTION_RQT: - mlx5e_fill_indir_rqt_rqns(priv, rqtc); - - break; - - default: /* MLX5E_SINGLE_RQ_RQT */ - MLX5_SET(rqtc, rqtc, rq_num[0], - test_bit(MLX5E_STATE_OPENED, &priv->state) ? - priv->channel[0]->rq.rqn : - priv->drop_rq.rqn); - - break; - } + MLX5_SET(rqtc, rqtc, rq_num[0], rqn); } -static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn) { struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; void *rqtc; int inlen; - int sz; int err; - - sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE; + u32 *in; inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); @@ -1253,26 +1388,73 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); + if (sz > 1) /* RSS */ + mlx5e_fill_indir_rqt_rqns(priv, rqtc); + else + mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); - err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]); + err = mlx5_core_create_rqt(mdev, in, inlen, rqtn); kvfree(in); + return err; +} + +static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn) +{ + mlx5_core_destroy_rqt(priv->mdev, rqtn); +} + +static int mlx5e_create_rqts(struct mlx5e_priv *priv) +{ + int nch = mlx5e_get_max_num_channels(priv->mdev); + u32 *rqtn; + int err; + int ix; + + /* Indirect RQT */ + rqtn = &priv->indir_rqtn; + err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn); + if (err) + return err; + + /* Direct RQTs */ + for (ix = 0; ix < nch; ix++) { + rqtn = &priv->direct_tir[ix].rqtn; + err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn); + if (err) + goto err_destroy_rqts; + } + + return 0; + +err_destroy_rqts: + for (ix--; ix >= 0; ix--) + mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn); + + mlx5e_destroy_rqt(priv, priv->indir_rqtn); return err; } -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) +static void mlx5e_destroy_rqts(struct mlx5e_priv *priv) +{ + int nch = mlx5e_get_max_num_channels(priv->mdev); + int i; + + for (i = 0; i < nch; i++) + mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn); + + mlx5e_destroy_rqt(priv, priv->indir_rqtn); +} + +int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix) { struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; void *rqtc; int inlen; - int sz; + u32 *in; int err; - sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE; - inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; in = mlx5_vzalloc(inlen); if (!in) @@ -1281,27 +1463,31 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - - mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix); + if (sz > 1) /* RSS */ + mlx5e_fill_indir_rqt_rqns(priv, rqtc); + else + mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix); MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); - err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen); + err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen); kvfree(in); return err; } -static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix) -{ - mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]); -} - static void mlx5e_redirect_rqts(struct mlx5e_priv *priv) { - mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); - mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT); + u32 rqtn; + int ix; + + rqtn = priv->indir_rqtn; + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); + for (ix = 0; ix < priv->params.num_channels; ix++) { + rqtn = priv->direct_tir[ix].rqtn; + mlx5e_redirect_rqt(priv, rqtn, 1, ix); + } } static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv) @@ -1346,6 +1532,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) int inlen; int err; int tt; + int ix; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); @@ -1357,23 +1544,32 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) mlx5e_build_tir_ctx_lro(tirc, priv); - for (tt = 0; tt < MLX5E_NUM_TT; tt++) { - err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in, + inlen); if (err) - break; + goto free_in; } + for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) { + err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, + in, inlen); + if (err) + goto free_in; + } + +free_in: kvfree(in); return err; } -static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev, - u32 tirn) +static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) { void *in; int inlen; int err; + int i; inlen = MLX5_ST_SZ_BYTES(modify_tir_in); in = mlx5_vzalloc(inlen); @@ -1382,46 +1578,70 @@ static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev, MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); - err = mlx5_core_modify_tir(mdev, tirn, in, inlen); + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { + err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in, + inlen); + if (err) + return err; + } + + for (i = 0; i < priv->params.num_channels; i++) { + err = mlx5_core_modify_tir(priv->mdev, + priv->direct_tir[i].tirn, in, + inlen); + if (err) + return err; + } kvfree(in); - return err; + return 0; } -static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv) +static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) { + struct mlx5_core_dev *mdev = priv->mdev; + u16 hw_mtu = MLX5E_SW2HW_MTU(mtu); int err; - int i; - for (i = 0; i < MLX5E_NUM_TT; i++) { - err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev, - priv->tirn[i]); - if (err) - return err; - } + err = mlx5_set_port_mtu(mdev, hw_mtu, 1); + if (err) + return err; + /* Update vport context MTU */ + mlx5_modify_nic_vport_mtu(mdev, hw_mtu); return 0; } +static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u16 hw_mtu = 0; + int err; + + err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); + if (err || !hw_mtu) /* fallback to port oper mtu */ + mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); + + *mtu = MLX5E_HW2SW_MTU(hw_mtu); +} + static int mlx5e_set_dev_port_mtu(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; - int hw_mtu; + u16 mtu; int err; - err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1); + err = mlx5e_set_mtu(priv, netdev->mtu); if (err) return err; - mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); + mlx5e_query_mtu(priv, &mtu); + if (mtu != netdev->mtu) + netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", + __func__, mtu, netdev->mtu); - if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu) - netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n", - __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu); - - netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu); + netdev->mtu = mtu; return 0; } @@ -1478,8 +1698,11 @@ int mlx5e_open_locked(struct net_device *netdev) mlx5e_redirect_rqts(priv); mlx5e_update_carrier(priv); mlx5e_timestamp_init(priv); +#ifdef CONFIG_RFS_ACCEL + priv->netdev->rx_cpu_rmap = priv->mdev->rmap; +#endif - schedule_delayed_work(&priv->update_stats_work, 0); + queue_delayed_work(priv->wq, &priv->update_stats_work, 0); return 0; @@ -1685,7 +1908,8 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv) mlx5e_destroy_tis(priv, tc); } -static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) +static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, + enum mlx5e_traffic_types tt) { void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); @@ -1706,19 +1930,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) mlx5e_build_tir_ctx_lro(tirc, priv); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - - switch (tt) { - case MLX5E_TT_ANY: - MLX5_SET(tirc, tirc, indirect_table, - priv->rqtn[MLX5E_SINGLE_RQ_RQT]); - MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); - break; - default: - MLX5_SET(tirc, tirc, indirect_table, - priv->rqtn[MLX5E_INDIRECTION_RQT]); - mlx5e_build_tir_ctx_hash(tirc, priv); - break; - } + MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn); + mlx5e_build_tir_ctx_hash(tirc, priv); switch (tt) { case MLX5E_TT_IPV4_TCP: @@ -1798,64 +2011,107 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) MLX5_SET(rx_hash_field_select, hfso, selected_fields, MLX5_HASH_IP); break; + default: + WARN_ONCE(true, + "mlx5e_build_indir_tir_ctx: bad traffic type!\n"); } } -static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt) +static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, + u32 rqtn) { - struct mlx5_core_dev *mdev = priv->mdev; - u32 *in; + MLX5_SET(tirc, tirc, transport_domain, priv->tdn); + + mlx5e_build_tir_ctx_lro(tirc, priv); + + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, rqtn); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); +} + +static int mlx5e_create_tirs(struct mlx5e_priv *priv) +{ + int nch = mlx5e_get_max_num_channels(priv->mdev); void *tirc; int inlen; + u32 *tirn; int err; + u32 *in; + int ix; + int tt; inlen = MLX5_ST_SZ_BYTES(create_tir_in); in = mlx5_vzalloc(inlen); if (!in) return -ENOMEM; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + /* indirect tirs */ + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + memset(in, 0, inlen); + tirn = &priv->indir_tirn[tt]; + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + mlx5e_build_indir_tir_ctx(priv, tirc, tt); + err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + if (err) + goto err_destroy_tirs; + } + + /* direct tirs */ + for (ix = 0; ix < nch; ix++) { + memset(in, 0, inlen); + tirn = &priv->direct_tir[ix].tirn; + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + mlx5e_build_direct_tir_ctx(priv, tirc, + priv->direct_tir[ix].rqtn); + err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn); + if (err) + goto err_destroy_ch_tirs; + } + + kvfree(in); + + return 0; - mlx5e_build_tir_ctx(priv, tirc, tt); +err_destroy_ch_tirs: + for (ix--; ix >= 0; ix--) + mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn); - err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]); +err_destroy_tirs: + for (tt--; tt >= 0; tt--) + mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]); kvfree(in); return err; } -static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt) +static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) { - mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]); + int nch = mlx5e_get_max_num_channels(priv->mdev); + int i; + + for (i = 0; i < nch; i++) + mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn); + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) + mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]); } -static int mlx5e_create_tirs(struct mlx5e_priv *priv) +int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd) { - int err; + int err = 0; int i; - for (i = 0; i < MLX5E_NUM_TT; i++) { - err = mlx5e_create_tir(priv, i); + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return 0; + + for (i = 0; i < priv->params.num_channels; i++) { + err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd); if (err) - goto err_destroy_tirs; + return err; } return 0; - -err_destroy_tirs: - for (i--; i >= 0; i--) - mlx5e_destroy_tir(priv, i); - - return err; -} - -static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) -{ - int i; - - for (i = 0; i < MLX5E_NUM_TT; i++) - mlx5e_destroy_tir(priv, i); } static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) @@ -1898,6 +2154,8 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, return mlx5e_configure_flower(priv, proto, tc->cls_flower); case TC_CLSFLOWER_DESTROY: return mlx5e_delete_flower(priv, tc->cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, tc->cls_flower); } default: return -EOPNOTSUPP; @@ -1914,19 +2172,37 @@ static struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_sw_stats *sstats = &priv->stats.sw; struct mlx5e_vport_stats *vstats = &priv->stats.vport; - - stats->rx_packets = vstats->rx_packets; - stats->rx_bytes = vstats->rx_bytes; - stats->tx_packets = vstats->tx_packets; - stats->tx_bytes = vstats->tx_bytes; - stats->multicast = vstats->rx_multicast_packets + - vstats->tx_multicast_packets; - stats->tx_errors = vstats->tx_error_packets; - stats->rx_errors = vstats->rx_error_packets; - stats->tx_dropped = vstats->tx_queue_dropped; - stats->rx_crc_errors = 0; - stats->rx_length_errors = 0; + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + + stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer; + stats->tx_dropped = sstats->tx_queue_dropped; + + stats->rx_length_errors = + PPORT_802_3_GET(pstats, a_in_range_length_errors) + + PPORT_802_3_GET(pstats, a_out_of_range_length_field) + + PPORT_802_3_GET(pstats, a_frame_too_long_errors); + stats->rx_crc_errors = + PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); + stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); + stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards); + stats->tx_carrier_errors = + PPORT_802_3_GET(pstats, a_symbol_error_during_carrier); + stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + + stats->rx_frame_errors; + stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; + + /* vport multicast also counts packets that are dropped due to steering + * or rx out of buffer + */ + stats->multicast = + VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); return stats; } @@ -1935,7 +2211,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - schedule_work(&priv->set_rx_mode_work); + queue_work(priv->wq, &priv->set_rx_mode_work); } static int mlx5e_set_mac(struct net_device *netdev, void *addr) @@ -1950,71 +2226,180 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr) ether_addr_copy(netdev->dev_addr, saddr->sa_data); netif_addr_unlock_bh(netdev); - schedule_work(&priv->set_rx_mode_work); + queue_work(priv->wq, &priv->set_rx_mode_work); return 0; } -static int mlx5e_set_features(struct net_device *netdev, - netdev_features_t features) +#define MLX5E_SET_FEATURE(netdev, feature, enable) \ + do { \ + if (enable) \ + netdev->features |= feature; \ + else \ + netdev->features &= ~feature; \ + } while (0) + +typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable); + +static int set_feature_lro(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); - int err = 0; - netdev_features_t changes = features ^ netdev->features; + bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + int err; mutex_lock(&priv->state_lock); - if (changes & NETIF_F_LRO) { - bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - - if (was_opened) - mlx5e_close_locked(priv->netdev); - - priv->params.lro_en = !!(features & NETIF_F_LRO); - err = mlx5e_modify_tirs_lro(priv); - if (err) - mlx5_core_warn(priv->mdev, "lro modify failed, %d\n", - err); + if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)) + mlx5e_close_locked(priv->netdev); - if (was_opened) - err = mlx5e_open_locked(priv->netdev); + priv->params.lro_en = enable; + err = mlx5e_modify_tirs_lro(priv); + if (err) { + netdev_err(netdev, "lro modify failed, %d\n", err); + priv->params.lro_en = !enable; } + if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST)) + mlx5e_open_locked(priv->netdev); + mutex_unlock(&priv->state_lock); - if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) { - if (features & NETIF_F_HW_VLAN_CTAG_FILTER) - mlx5e_enable_vlan_filter(priv); - else - mlx5e_disable_vlan_filter(priv); - } + return err; +} + +static int set_feature_vlan_filter(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (enable) + mlx5e_enable_vlan_filter(priv); + else + mlx5e_disable_vlan_filter(priv); - if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) && - mlx5e_tc_num_filters(priv)) { + return 0; +} + +static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!enable && mlx5e_tc_num_filters(priv)) { netdev_err(netdev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); return -EINVAL; } + return 0; +} + +static int set_feature_rx_all(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_set_port_fcs(mdev, !enable); +} + +static int set_feature_rx_vlan(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + mutex_lock(&priv->state_lock); + + priv->params.vlan_strip_disable = !enable; + err = mlx5e_modify_rqs_vsd(priv, !enable); + if (err) + priv->params.vlan_strip_disable = enable; + + mutex_unlock(&priv->state_lock); + + return err; +} + +#ifdef CONFIG_RFS_ACCEL +static int set_feature_arfs(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + if (enable) + err = mlx5e_arfs_enable(priv); + else + err = mlx5e_arfs_disable(priv); + return err; } +#endif + +static int mlx5e_handle_feature(struct net_device *netdev, + netdev_features_t wanted_features, + netdev_features_t feature, + mlx5e_feature_handler feature_handler) +{ + netdev_features_t changes = wanted_features ^ netdev->features; + bool enable = !!(wanted_features & feature); + int err; + + if (!(changes & feature)) + return 0; + + err = feature_handler(netdev, enable); + if (err) { + netdev_err(netdev, "%s feature 0x%llx failed err %d\n", + enable ? "Enable" : "Disable", feature, err); + return err; + } + + MLX5E_SET_FEATURE(netdev, feature, enable); + return 0; +} + +static int mlx5e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + int err; + + err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO, + set_feature_lro); + err |= mlx5e_handle_feature(netdev, features, + NETIF_F_HW_VLAN_CTAG_FILTER, + set_feature_vlan_filter); + err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, + set_feature_tc_num_filters); + err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, + set_feature_rx_all); + err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX, + set_feature_rx_vlan); +#ifdef CONFIG_RFS_ACCEL + err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE, + set_feature_arfs); +#endif + + return err ? -EINVAL : 0; +} + +#define MXL5_HW_MIN_MTU 64 +#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN) static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; bool was_opened; - int max_mtu; + u16 max_mtu; + u16 min_mtu; int err = 0; mlx5_query_port_max_mtu(mdev, &max_mtu, 1); max_mtu = MLX5E_HW2SW_MTU(max_mtu); + min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU); - if (new_mtu > max_mtu) { + if (new_mtu > max_mtu || new_mtu < min_mtu) { netdev_err(netdev, - "%s: Bad MTU (%d) > (%d) Max\n", - __func__, new_mtu, max_mtu); + "%s: Bad MTU (%d), valid range is: [%d..%d]\n", + __func__, new_mtu, min_mtu, max_mtu); return -EINVAL; } @@ -2063,6 +2448,21 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos) vlan, qos); } +static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting); +} + +static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); +} static int mlx5_vport_link2ifla(u8 esw_link) { switch (esw_link) { @@ -2127,7 +2527,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, if (!mlx5e_vxlan_allowed(priv->mdev)) return; - mlx5e_vxlan_add_port(priv, be16_to_cpu(port)); + mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1); } static void mlx5e_del_vxlan_port(struct net_device *netdev, @@ -2138,7 +2538,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, if (!mlx5e_vxlan_allowed(priv->mdev)) return; - mlx5e_vxlan_del_port(priv, be16_to_cpu(port)); + mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0); } static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, @@ -2205,6 +2605,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx5e_rx_flow_steer, +#endif }; static const struct net_device_ops mlx5e_netdev_ops_sriov = { @@ -2224,8 +2627,13 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_add_vxlan_port = mlx5e_add_vxlan_port, .ndo_del_vxlan_port = mlx5e_del_vxlan_port, .ndo_features_check = mlx5e_features_check, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = mlx5e_rx_flow_steer, +#endif .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, + .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, + .ndo_set_vf_trust = mlx5e_set_vf_trust, .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, @@ -2283,25 +2691,121 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv) } #endif -void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, +void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, + u32 *indirection_rqt, int len, int num_channels) { + int node = mdev->priv.numa_node; + int node_num_of_cores; int i; + if (node == -1) + node = first_online_node; + + node_num_of_cores = cpumask_weight(cpumask_of_node(node)); + + if (node_num_of_cores) + num_channels = min_t(int, num_channels, node_num_of_cores); + for (i = 0; i < len; i++) indirection_rqt[i] = i % num_channels; } +static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) +{ + return MLX5_CAP_GEN(mdev, striding_rq) && + MLX5_CAP_GEN(mdev, umr_ptr_rlky) && + MLX5_CAP_ETH(mdev, reg_umr_sq); +} + +static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw) +{ + enum pcie_link_width width; + enum pci_bus_speed speed; + int err = 0; + + err = pcie_get_minimum_link(mdev->pdev, &speed, &width); + if (err) + return err; + + if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) + return -EINVAL; + + switch (speed) { + case PCIE_SPEED_2_5GT: + *pci_bw = 2500 * width; + break; + case PCIE_SPEED_5_0GT: + *pci_bw = 5000 * width; + break; + case PCIE_SPEED_8_0GT: + *pci_bw = 8000 * width; + break; + default: + return -EINVAL; + } + + return 0; +} + +static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw) +{ + return (link_speed && pci_bw && + (pci_bw < 40000) && (pci_bw < link_speed)); +} + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) { struct mlx5e_priv *priv = netdev_priv(netdev); + u32 link_speed = 0; + u32 pci_bw = 0; priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; - priv->params.log_rq_size = - MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ? + MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ : + MLX5_WQ_TYPE_LINKED_LIST; + + /* set CQE compression */ + priv->params.rx_cqe_compress_admin = false; + if (MLX5_CAP_GEN(mdev, cqe_compression) && + MLX5_CAP_GEN(mdev, vport_group_manager)) { + mlx5e_get_max_linkspeed(mdev, &link_speed); + mlx5e_get_pci_bw(mdev, &pci_bw); + mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n", + link_speed, pci_bw); + priv->params.rx_cqe_compress_admin = + cqe_compress_heuristic(link_speed, pci_bw); + } + + priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin; + + switch (priv->params.rq_wq_type) { + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: + priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW; + priv->params.mpwqe_log_stride_sz = + priv->params.rx_cqe_compress ? + MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS : + MLX5_MPWRQ_LOG_STRIDE_SIZE; + priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - + priv->params.mpwqe_log_stride_sz; + priv->params.lro_en = true; + break; + default: /* MLX5_WQ_TYPE_LINKED_LIST */ + priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE; + } + + mlx5_core_info(mdev, + "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n", + priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ, + BIT(priv->params.log_rq_size), + BIT(priv->params.mpwqe_log_stride_sz), + priv->params.rx_cqe_compress_admin); + + priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type, + BIT(priv->params.log_rq_size)); priv->params.rx_cq_moderation_usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; priv->params.rx_cq_moderation_pkts = @@ -2311,15 +2815,13 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.tx_cq_moderation_pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev); - priv->params.min_rx_wqes = - MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; priv->params.num_tc = 1; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; netdev_rss_key_fill(priv->params.toeplitz_hash_key, sizeof(priv->params.toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(priv->params.indirection_rqt, + mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, num_channels); priv->params.lro_wqe_sz = @@ -2356,6 +2858,8 @@ static void mlx5e_build_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; + bool fcs_supported; + bool fcs_enabled; SET_NETDEV_DEV(netdev, &mdev->pdev->dev); @@ -2390,25 +2894,41 @@ static void mlx5e_build_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; if (mlx5e_vxlan_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; netdev->hw_enc_features |= NETIF_F_IP_CSUM; - netdev->hw_enc_features |= NETIF_F_RXCSUM; + netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; - netdev->hw_enc_features |= NETIF_F_RXHASH; netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_GSO_PARTIAL; + netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } + mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); + + if (fcs_supported) + netdev->hw_features |= NETIF_F_RXALL; + netdev->features = netdev->hw_features; if (!priv->params.lro_en) netdev->features &= ~NETIF_F_LRO; + if (fcs_enabled) + netdev->features &= ~NETIF_F_RXALL; + #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) if (FT_CAP(flow_modify_en) && FT_CAP(modify_root) && FT_CAP(identified_miss_table_mode) && - FT_CAP(flow_table_modify)) - priv->netdev->hw_features |= NETIF_F_HW_TC; + FT_CAP(flow_table_modify)) { + netdev->hw_features |= NETIF_F_HW_TC; +#ifdef CONFIG_RFS_ACCEL + netdev->hw_features |= NETIF_F_NTUPLE; +#endif + } netdev->features |= NETIF_F_HIGHDMA; @@ -2442,6 +2962,61 @@ static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn, return err; } +static void mlx5e_create_q_counter(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter); + if (err) { + mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err); + priv->q_counter = 0; + } +} + +static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv) +{ + if (!priv->q_counter) + return; + + mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); +} + +static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_create_mkey_mbox_in *in; + struct mlx5_mkey_seg *mkc; + int inlen = sizeof(*in); + u64 npages = + mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + mkc = &in->seg; + mkc->status = MLX5_MKEY_STATUS_FREE; + mkc->flags = MLX5_PERM_UMR_EN | + MLX5_PERM_LOCAL_READ | + MLX5_PERM_LOCAL_WRITE | + MLX5_ACCESS_MODE_MTT; + + mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); + mkc->flags_pd = cpu_to_be32(priv->pdn); + mkc->len = cpu_to_be64(npages << PAGE_SHIFT); + mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages)); + mkc->log2_page_size = PAGE_SHIFT; + + err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL, + NULL, NULL); + + kvfree(in); + + return err; +} + static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) { struct net_device *netdev; @@ -2467,10 +3042,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) priv = netdev_priv(netdev); + priv->wq = create_singlethread_workqueue("mlx5e"); + if (!priv->wq) + goto err_free_netdev; + err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); if (err) { mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); - goto err_free_netdev; + goto err_destroy_wq; } err = mlx5_core_alloc_pd(mdev, &priv->pdn); @@ -2491,10 +3070,16 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_dealloc_transport_domain; } + err = mlx5e_create_umr_mkey(priv); + if (err) { + mlx5_core_err(mdev, "create umr mkey failed, %d\n", err); + goto err_destroy_mkey; + } + err = mlx5e_create_tises(priv); if (err) { mlx5_core_warn(mdev, "create tises failed, %d\n", err); - goto err_destroy_mkey; + goto err_destroy_umr_mkey; } err = mlx5e_open_drop_rq(priv); @@ -2503,37 +3088,33 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_destroy_tises; } - err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT); + err = mlx5e_create_rqts(priv); if (err) { - mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err); + mlx5_core_warn(mdev, "create rqts failed, %d\n", err); goto err_close_drop_rq; } - err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT); - if (err) { - mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err); - goto err_destroy_rqt_indir; - } - err = mlx5e_create_tirs(priv); if (err) { mlx5_core_warn(mdev, "create tirs failed, %d\n", err); - goto err_destroy_rqt_single; + goto err_destroy_rqts; } - err = mlx5e_create_flow_tables(priv); + err = mlx5e_create_flow_steering(priv); if (err) { - mlx5_core_warn(mdev, "create flow tables failed, %d\n", err); + mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); goto err_destroy_tirs; } - mlx5e_init_eth_addr(priv); + mlx5e_create_q_counter(priv); + + mlx5e_init_l2_addr(priv); mlx5e_vxlan_init(priv); err = mlx5e_tc_init(priv); if (err) - goto err_destroy_flow_tables; + goto err_dealloc_q_counters; #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); @@ -2545,28 +3126,29 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_tc_cleanup; } - if (mlx5e_vxlan_allowed(mdev)) + if (mlx5e_vxlan_allowed(mdev)) { + rtnl_lock(); vxlan_get_rx_port(netdev); + rtnl_unlock(); + } mlx5e_enable_async_events(priv); - schedule_work(&priv->set_rx_mode_work); + queue_work(priv->wq, &priv->set_rx_mode_work); return priv; err_tc_cleanup: mlx5e_tc_cleanup(priv); -err_destroy_flow_tables: - mlx5e_destroy_flow_tables(priv); +err_dealloc_q_counters: + mlx5e_destroy_q_counter(priv); + mlx5e_destroy_flow_steering(priv); err_destroy_tirs: mlx5e_destroy_tirs(priv); -err_destroy_rqt_single: - mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); - -err_destroy_rqt_indir: - mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); +err_destroy_rqts: + mlx5e_destroy_rqts(priv); err_close_drop_rq: mlx5e_close_drop_rq(priv); @@ -2574,6 +3156,9 @@ err_close_drop_rq: err_destroy_tises: mlx5e_destroy_tises(priv); +err_destroy_umr_mkey: + mlx5_core_destroy_mkey(mdev, &priv->umr_mkey); + err_destroy_mkey: mlx5_core_destroy_mkey(mdev, &priv->mkey); @@ -2586,6 +3171,9 @@ err_dealloc_pd: err_unmap_free_uar: mlx5_unmap_free_uar(mdev, &priv->cq_uar); +err_destroy_wq: + destroy_workqueue(priv->wq); + err_free_netdev: free_netdev(netdev); @@ -2599,23 +3187,37 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) set_bit(MLX5E_STATE_DESTROYING, &priv->state); - schedule_work(&priv->set_rx_mode_work); + queue_work(priv->wq, &priv->set_rx_mode_work); mlx5e_disable_async_events(priv); - flush_scheduled_work(); - unregister_netdev(netdev); + flush_workqueue(priv->wq); + if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { + netif_device_detach(netdev); + mutex_lock(&priv->state_lock); + if (test_bit(MLX5E_STATE_OPENED, &priv->state)) + mlx5e_close_locked(netdev); + mutex_unlock(&priv->state_lock); + } else { + unregister_netdev(netdev); + } + mlx5e_tc_cleanup(priv); mlx5e_vxlan_cleanup(priv); - mlx5e_destroy_flow_tables(priv); + mlx5e_destroy_q_counter(priv); + mlx5e_destroy_flow_steering(priv); mlx5e_destroy_tirs(priv); - mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); - mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT); + mlx5e_destroy_rqts(priv); mlx5e_close_drop_rq(priv); mlx5e_destroy_tises(priv); + mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey); mlx5_core_destroy_mkey(priv->mdev, &priv->mkey); mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); - free_netdev(netdev); + cancel_delayed_work_sync(&priv->update_stats_work); + destroy_workqueue(priv->wq); + + if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) + free_netdev(netdev); } static void *mlx5e_get_netdev(void *vpriv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 58d4e2f962c3..bd947704b59c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -42,13 +42,149 @@ static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; } -static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, - struct mlx5e_rx_wqe *wqe, u16 ix) +static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, + void *data) +{ + u32 ci = cqcc & cq->wq.sz_m1; + + memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); +} + +static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, u32 cqcc) +{ + mlx5e_read_cqe_slot(cq, cqcc, &cq->title); + cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); + cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); + rq->stats.cqe_compress_blks++; +} + +static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) +{ + mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr); + cq->mini_arr_idx = 0; +} + +static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) +{ + u8 op_own = (cqcc >> cq->wq.log_sz) & 1; + u32 wq_sz = 1 << cq->wq.log_sz; + u32 ci = cqcc & cq->wq.sz_m1; + u32 ci_top = min_t(u32, wq_sz, ci + n); + + for (; ci < ci_top; ci++, n--) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); + + cqe->op_own = op_own; + } + + if (unlikely(ci == wq_sz)) { + op_own = !op_own; + for (ci = 0; ci < n; ci++) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); + + cqe->op_own = op_own; + } + } +} + +static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, u32 cqcc) +{ + u16 wqe_cnt_step; + + cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; + cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; + cq->title.op_own &= 0xf0; + cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); + cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); + + wqe_cnt_step = + rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ? + mpwrq_get_cqe_consumed_strides(&cq->title) : 1; + cq->decmprs_wqe_counter = + (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1; +} + +static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, u32 cqcc) +{ + mlx5e_decompress_cqe(rq, cq, cqcc); + cq->title.rss_hash_type = 0; + cq->title.rss_hash_result = 0; +} + +static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, + int update_owner_only, + int budget_rem) +{ + u32 cqcc = cq->wq.cc + update_owner_only; + u32 cqe_count; + u32 i; + + cqe_count = min_t(u32, cq->decmprs_left, budget_rem); + + for (i = update_owner_only; i < cqe_count; + i++, cq->mini_arr_idx++, cqcc++) { + if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) + mlx5e_read_mini_arr_slot(cq, cqcc); + + mlx5e_decompress_cqe_no_hash(rq, cq, cqcc); + rq->handle_rx_cqe(rq, &cq->title); + } + mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); + cq->wq.cc = cqcc; + cq->decmprs_left -= cqe_count; + rq->stats.cqe_compress_pkts += cqe_count; + + return cqe_count; +} + +static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, + struct mlx5e_cq *cq, + int budget_rem) +{ + mlx5e_read_title_slot(rq, cq, cq->wq.cc); + mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1); + mlx5e_decompress_cqe(rq, cq, cq->wq.cc); + rq->handle_rx_cqe(rq, &cq->title); + cq->mini_arr_idx++; + + return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; +} + +void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val) +{ + bool was_opened; + + if (!MLX5_CAP_GEN(priv->mdev, cqe_compression)) + return; + + mutex_lock(&priv->state_lock); + + if (priv->params.rx_cqe_compress == val) + goto unlock; + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(priv->netdev); + + priv->params.rx_cqe_compress = val; + + if (was_opened) + mlx5e_open_locked(priv->netdev); + +unlock: + mutex_unlock(&priv->state_lock); +} + +int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) { struct sk_buff *skb; dma_addr_t dma_addr; - skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); + skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz); if (unlikely(!skb)) return -ENOMEM; @@ -62,10 +198,9 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) goto err_free_skb; - skb_reserve(skb, MLX5E_NET_IP_ALIGN); - *((dma_addr_t *)skb->cb) = dma_addr; - wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); + wqe->data.addr = cpu_to_be64(dma_addr); + wqe->data.lkey = rq->mkey_be; rq->skb[ix] = skb; @@ -77,18 +212,389 @@ err_free_skb: return -ENOMEM; } +static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq) +{ + return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER; +} + +static inline void +mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev, + struct mlx5e_mpw_info *wi, + u32 wqe_offset, u32 len) +{ + dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset, + len, DMA_FROM_DEVICE); +} + +static inline void +mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev, + struct mlx5e_mpw_info *wi, + u32 wqe_offset, u32 len) +{ + /* No dma pre sync for fragmented MPWQE */ +} + +static inline void +mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 frag_offset, + u32 len) +{ + unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz); + + wi->skbs_frags[page_idx]++; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + &wi->dma_info.page[page_idx], frag_offset, + len, truesize); +} + +static inline void +mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 frag_offset, + u32 len) +{ + unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz); + + dma_sync_single_for_cpu(rq->pdev, + wi->umr.dma_info[page_idx].addr + frag_offset, + len, DMA_FROM_DEVICE); + wi->skbs_frags[page_idx]++; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + wi->umr.dma_info[page_idx].page, frag_offset, + len, truesize); +} + +static inline void +mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 offset, + u32 headlen) +{ + struct page *page = &wi->dma_info.page[page_idx]; + + skb_copy_to_linear_data(skb, page_address(page) + offset, + ALIGN(headlen, sizeof(long))); +} + +static inline void +mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev, + struct sk_buff *skb, + struct mlx5e_mpw_info *wi, + u32 page_idx, u32 offset, + u32 headlen) +{ + u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); + struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx]; + unsigned int len; + + /* Aligning len to sizeof(long) optimizes memcpy performance */ + len = ALIGN(headlen_pg, sizeof(long)); + dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data_offset(skb, 0, + page_address(dma_info->page) + offset, + len); + if (unlikely(offset + headlen > PAGE_SIZE)) { + dma_info++; + headlen_pg = len; + len = ALIGN(headlen - headlen_pg, sizeof(long)); + dma_sync_single_for_cpu(pdev, dma_info->addr, len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data_offset(skb, headlen_pg, + page_address(dma_info->page), + len); + } +} + +static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix) +{ + return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS + + wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8); +} + +static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, + struct mlx5e_sq *sq, + struct mlx5e_umr_wqe *wqe, + u16 ix) +{ + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; + struct mlx5_wqe_data_seg *dseg = &wqe->data; + struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; + u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS); + u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix); + + memset(wqe, 0, sizeof(*wqe)); + cseg->opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | + MLX5_OPCODE_UMR); + cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | + ds_cnt); + cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; + cseg->imm = rq->umr_mkey_be; + + ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN; + ucseg->klm_octowords = + cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE)); + ucseg->bsf_octowords = + cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset)); + ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); + + dseg->lkey = sq->mkey_be; + dseg->addr = cpu_to_be64(wi->umr.mtt_addr); +} + +static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix) +{ + struct mlx5e_sq *sq = &rq->channel->icosq; + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5e_umr_wqe *wqe; + u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB); + u16 pi; + + /* fill sq edge with nops to avoid wqe wrap around */ + while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) { + sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP; + sq->ico_wqe_info[pi].num_wqebbs = 1; + mlx5e_send_nop(sq, true); + } + + wqe = mlx5_wq_cyc_get_wqe(wq, pi); + mlx5e_build_umr_wqe(rq, sq, wqe, ix); + sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR; + sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs; + sq->pc += num_wqebbs; + mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); +} + +static inline int mlx5e_get_wqe_mtt_sz(void) +{ + /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes. + * To avoid copying garbage after the mtt array, we allocate + * a little more. + */ + return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64), + MLX5_UMR_MTT_ALIGNMENT); +} + +static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi, + int i) +{ + struct page *page; + + page = dev_alloc_page(); + if (unlikely(!page)) + return -ENOMEM; + + wi->umr.dma_info[i].page = page; + wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) { + put_page(page); + return -ENOMEM; + } + wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR); + + return 0; +} + +static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_rx_wqe *wqe, + u16 ix) +{ + struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; + int mtt_sz = mlx5e_get_wqe_mtt_sz(); + u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT; + int i; + + wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) * + MLX5_MPWRQ_PAGES_PER_WQE, + GFP_ATOMIC); + if (unlikely(!wi->umr.dma_info)) + goto err_out; + + /* We allocate more than mtt_sz as we will align the pointer */ + wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1, + GFP_ATOMIC); + if (unlikely(!wi->umr.mtt_no_align)) + goto err_free_umr; + + wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN); + wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz, + PCI_DMA_TODEVICE); + if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr))) + goto err_free_mtt; + + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i))) + goto err_unmap; + page_ref_add(wi->umr.dma_info[i].page, + mlx5e_mpwqe_strides_per_page(rq)); + wi->skbs_frags[i] = 0; + } + + wi->consumed_strides = 0; + wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe; + wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe; + wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe; + wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe; + wqe->data.lkey = rq->umr_mkey_be; + wqe->data.addr = cpu_to_be64(dma_offset); + + return 0; + +err_unmap: + while (--i >= 0) { + dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + page_ref_sub(wi->umr.dma_info[i].page, + mlx5e_mpwqe_strides_per_page(rq)); + put_page(wi->umr.dma_info[i].page); + } + dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE); + +err_free_mtt: + kfree(wi->umr.mtt_no_align); + +err_free_umr: + kfree(wi->umr.dma_info); + +err_out: + return -ENOMEM; +} + +void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi) +{ + int mtt_sz = mlx5e_get_wqe_mtt_sz(); + int i; + + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + page_ref_sub(wi->umr.dma_info[i].page, + mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]); + put_page(wi->umr.dma_info[i].page); + } + dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE); + kfree(wi->umr.mtt_no_align); + kfree(wi->umr.dma_info); +} + +void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq) +{ + struct mlx5_wq_ll *wq = &rq->wq; + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + + clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); + mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); + rq->stats.mpwqe_frag++; + + /* ensure wqes are visible to device before updating doorbell record */ + dma_wmb(); + + mlx5_wq_ll_update_db_record(wq); +} + +static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_rx_wqe *wqe, + u16 ix) +{ + struct mlx5e_mpw_info *wi = &rq->wqe_info[ix]; + gfp_t gfp_mask; + int i; + + gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC; + wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, + MLX5_MPWRQ_WQE_PAGE_ORDER); + if (unlikely(!wi->dma_info.page)) + return -ENOMEM; + + wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0, + rq->wqe_sz, PCI_DMA_FROMDEVICE); + if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) { + put_page(wi->dma_info.page); + return -ENOMEM; + } + + /* We split the high-order page into order-0 ones and manage their + * reference counter to minimize the memory held by small skb fragments + */ + split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + page_ref_add(&wi->dma_info.page[i], + mlx5e_mpwqe_strides_per_page(rq)); + wi->skbs_frags[i] = 0; + } + + wi->consumed_strides = 0; + wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe; + wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe; + wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe; + wi->free_wqe = mlx5e_free_rx_linear_mpwqe; + wqe->data.lkey = rq->mkey_be; + wqe->data.addr = cpu_to_be64(wi->dma_info.addr); + + return 0; +} + +void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq, + struct mlx5e_mpw_info *wi) +{ + int i; + + dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz, + PCI_DMA_FROMDEVICE); + for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) { + page_ref_sub(&wi->dma_info.page[i], + mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]); + put_page(&wi->dma_info.page[i]); + } +} + +int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix) +{ + int err; + + err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix); + if (unlikely(err)) { + err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix); + if (unlikely(err)) + return err; + set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state); + mlx5e_post_umr_wqe(rq, ix); + return -EBUSY; + } + + return 0; +} + +#define RQ_CANNOT_POST(rq) \ + (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \ + test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) + bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) { struct mlx5_wq_ll *wq = &rq->wq; - if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state))) + if (unlikely(RQ_CANNOT_POST(rq))) return false; while (!mlx5_wq_ll_is_full(wq)) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); + int err; - if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) + err = rq->alloc_wqe(rq, wqe, wq->head); + if (unlikely(err)) { + if (err != -EBUSY) + rq->stats.buff_alloc_err++; break; + } mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); } @@ -101,7 +607,8 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) return !mlx5_wq_ll_is_full(wq); } -static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) +static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, + u32 cqe_bcnt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN); @@ -112,7 +619,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe) int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); - u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN; + u16 tot_len = cqe_bcnt - ETH_HLEN; if (eth->h_proto == htons(ETH_P_IP)) { tcp = (struct tcphdr *)(skb->data + ETH_HLEN + @@ -176,35 +683,43 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if (likely(is_first_ethertype_ip(skb))) { + return; + } + + if (is_first_ethertype_ip(skb)) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); rq->stats.csum_sw++; - } else { - goto csum_none; + return; } - return; - + if (likely((cqe->hds_ip_ext & CQE_L3_OK) && + (cqe->hds_ip_ext & CQE_L4_OK))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (cqe_is_tunneled(cqe)) { + skb->csum_level = 1; + skb->encapsulation = 1; + rq->stats.csum_inner++; + } + return; + } csum_none: skb->ip_summed = CHECKSUM_NONE; rq->stats.csum_none++; } static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, + u32 cqe_bcnt, struct mlx5e_rq *rq, struct sk_buff *skb) { struct net_device *netdev = rq->netdev; - u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt); struct mlx5e_tstamp *tstamp = rq->tstamp; int lro_num_seg; - skb_put(skb, cqe_bcnt); - lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { - mlx5e_lro_update_hdr(skb, cqe); + mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); rq->stats.lro_packets++; rq->stats.lro_bytes += cqe_bcnt; @@ -213,10 +728,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (unlikely(mlx5e_rx_hw_stamp(tstamp))) mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); - mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); - - skb->protocol = eth_type_trans(skb, netdev); - skb_record_rx_queue(skb, rq->ix); if (likely(netdev->features & NETIF_F_RXHASH)) @@ -227,52 +738,165 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, be16_to_cpu(cqe->vlan_info)); skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; + + mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); + skb->protocol = eth_type_trans(skb, netdev); +} + +static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + u32 cqe_bcnt, + struct sk_buff *skb) +{ + rq->stats.packets++; + rq->stats.bytes += cqe_bcnt; + mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); + napi_gro_receive(rq->cq.napi, skb); +} + +void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + struct mlx5e_rx_wqe *wqe; + struct sk_buff *skb; + __be16 wqe_counter_be; + u16 wqe_counter; + u32 cqe_bcnt; + + wqe_counter_be = cqe->wqe_counter; + wqe_counter = be16_to_cpu(wqe_counter_be); + wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); + skb = rq->skb[wqe_counter]; + prefetch(skb->data); + rq->skb[wqe_counter] = NULL; + + dma_unmap_single(rq->pdev, + *((dma_addr_t *)skb->cb), + rq->wqe_sz, + DMA_FROM_DEVICE); + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats.wqe_err++; + dev_kfree_skb(skb); + goto wq_ll_pop; + } + + cqe_bcnt = be32_to_cpu(cqe->byte_cnt); + skb_put(skb, cqe_bcnt); + + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + +wq_ll_pop: + mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, + &wqe->next.next_wqe_index); +} + +static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, + struct mlx5_cqe64 *cqe, + struct mlx5e_mpw_info *wi, + u32 cqe_bcnt, + struct sk_buff *skb) +{ + u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz); + u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); + u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz; + u32 head_offset = wqe_offset & (PAGE_SIZE - 1); + u32 page_idx = wqe_offset >> PAGE_SHIFT; + u32 head_page_idx = page_idx; + u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt); + u32 frag_offset = head_offset + headlen; + u16 byte_cnt = cqe_bcnt - headlen; + + if (unlikely(frag_offset >= PAGE_SIZE)) { + page_idx++; + frag_offset -= PAGE_SIZE; + } + wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes); + + while (byte_cnt) { + u32 pg_consumed_bytes = + min_t(u32, PAGE_SIZE - frag_offset, byte_cnt); + + wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset, + pg_consumed_bytes); + byte_cnt -= pg_consumed_bytes; + frag_offset = 0; + page_idx++; + } + /* copy header */ + wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset, + headlen); + /* skb linear part was allocated with headlen and aligned to long */ + skb->tail += headlen; + skb->len += headlen; +} + +void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); + u16 wqe_id = be16_to_cpu(cqe->wqe_id); + struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id]; + struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id); + struct sk_buff *skb; + u16 cqe_bcnt; + + wi->consumed_strides += cstrides; + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + rq->stats.wqe_err++; + goto mpwrq_cqe_out; + } + + if (unlikely(mpwrq_is_filler_cqe(cqe))) { + rq->stats.mpwqe_filler++; + goto mpwrq_cqe_out; + } + + skb = napi_alloc_skb(rq->cq.napi, + ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, + sizeof(long))); + if (unlikely(!skb)) { + rq->stats.buff_alloc_err++; + goto mpwrq_cqe_out; + } + + prefetch(skb->data); + cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); + + mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb); + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + +mpwrq_cqe_out: + if (likely(wi->consumed_strides < rq->mpwqe_num_strides)) + return; + + wi->free_wqe(rq, wi); + mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index); } int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - int work_done; + int work_done = 0; + + if (cq->decmprs_left) + work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); - for (work_done = 0; work_done < budget; work_done++) { - struct mlx5e_rx_wqe *wqe; - struct mlx5_cqe64 *cqe; - struct sk_buff *skb; - __be16 wqe_counter_be; - u16 wqe_counter; + for (; work_done < budget; work_done++) { + struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq); - cqe = mlx5e_get_cqe(cq); if (!cqe) break; - mlx5_cqwq_pop(&cq->wq); - - wqe_counter_be = cqe->wqe_counter; - wqe_counter = be16_to_cpu(wqe_counter_be); - wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); - skb = rq->skb[wqe_counter]; - prefetch(skb->data); - rq->skb[wqe_counter] = NULL; - - dma_unmap_single(rq->pdev, - *((dma_addr_t *)skb->cb), - rq->wqe_sz, - DMA_FROM_DEVICE); - - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { - rq->stats.wqe_err++; - dev_kfree_skb(skb); - goto wq_ll_pop; + if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { + work_done += + mlx5e_decompress_cqes_start(rq, cq, + budget - work_done); + continue; } - mlx5e_build_rx_skb(cqe, rq, skb); - rq->stats.packets++; - rq->stats.bytes += be32_to_cpu(cqe->byte_cnt); - napi_gro_receive(cq->napi, skb); + mlx5_cqwq_pop(&cq->wq); -wq_ll_pop: - mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, - &wqe->next.next_wqe_index); + rq->handle_rx_cqe(rq, cqe); } mlx5_cqwq_update_db_record(&cq->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h new file mode 100644 index 000000000000..83bc32b25849 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __MLX5_EN_STATS_H__ +#define __MLX5_EN_STATS_H__ + +#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \ + (*(u64 *)((char *)ptr + dsc[i].offset)) +#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \ + be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset)) +#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ + (*(u32 *)((char *)ptr + dsc[i].offset)) +#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ + be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) + +#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) + +struct counter_desc { + char name[ETH_GSTRING_LEN]; + int offset; /* Byte offset */ +}; + +struct mlx5e_sw_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 lro_packets; + u64 lro_bytes; + u64 rx_csum_good; + u64 rx_csum_none; + u64 rx_csum_sw; + u64 rx_csum_inner; + u64 tx_csum_offload; + u64 tx_csum_inner; + u64 tx_queue_stopped; + u64 tx_queue_wake; + u64 tx_queue_dropped; + u64 rx_wqe_err; + u64 rx_mpwqe_filler; + u64 rx_mpwqe_frag; + u64 rx_buff_alloc_err; + u64 rx_cqe_compress_blks; + u64 rx_cqe_compress_pkts; + + /* Special handling counters */ + u64 link_down_events; +}; + +static const struct counter_desc sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tso_inner_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_good) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_sw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_offload) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events) }, +}; + +struct mlx5e_qcounter_stats { + u32 rx_out_of_buffer; +}; + +static const struct counter_desc q_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, +}; + +#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) +#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ + vstats->query_vport_out, c) + +struct mlx5e_vport_stats { + __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)]; +}; + +static const struct counter_desc vport_stats_desc[] = { + { "rx_vport_error_packets", + VPORT_COUNTER_OFF(received_errors.packets) }, + { "rx_vport_error_bytes", VPORT_COUNTER_OFF(received_errors.octets) }, + { "tx_vport_error_packets", + VPORT_COUNTER_OFF(transmit_errors.packets) }, + { "tx_vport_error_bytes", VPORT_COUNTER_OFF(transmit_errors.octets) }, + { "rx_vport_unicast_packets", + VPORT_COUNTER_OFF(received_eth_unicast.packets) }, + { "rx_vport_unicast_bytes", + VPORT_COUNTER_OFF(received_eth_unicast.octets) }, + { "tx_vport_unicast_packets", + VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, + { "tx_vport_unicast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, + { "rx_vport_multicast_packets", + VPORT_COUNTER_OFF(received_eth_multicast.packets) }, + { "rx_vport_multicast_bytes", + VPORT_COUNTER_OFF(received_eth_multicast.octets) }, + { "tx_vport_multicast_packets", + VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, + { "tx_vport_multicast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, + { "rx_vport_broadcast_packets", + VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, + { "rx_vport_broadcast_bytes", + VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, + { "tx_vport_broadcast_packets", + VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, + { "tx_vport_broadcast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, +}; + +#define PPORT_802_3_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) +#define PPORT_802_3_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \ + counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) +#define PPORT_2863_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_2863_cntrs_grp_data_layout.c##_high) +#define PPORT_2863_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \ + counter_set.eth_2863_cntrs_grp_data_layout.c##_high) +#define PPORT_2819_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +#define PPORT_2819_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ + counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +#define PPORT_PER_PRIO_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_per_prio_grp_data_layout.c##_high) +#define PPORT_PER_PRIO_GET(pstats, prio, c) \ + MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ + counter_set.eth_per_prio_grp_data_layout.c##_high) +#define NUM_PPORT_PRIO 8 + +struct mlx5e_pport_stats { + __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; +}; + +static const struct counter_desc pport_802_3_stats_desc[] = { + { "frames_tx", PPORT_802_3_OFF(a_frames_transmitted_ok) }, + { "frames_rx", PPORT_802_3_OFF(a_frames_received_ok) }, + { "check_seq_err", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, + { "alignment_err", PPORT_802_3_OFF(a_alignment_errors) }, + { "octets_tx", PPORT_802_3_OFF(a_octets_transmitted_ok) }, + { "octets_received", PPORT_802_3_OFF(a_octets_received_ok) }, + { "multicast_xmitted", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, + { "broadcast_xmitted", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, + { "multicast_rx", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, + { "broadcast_rx", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, + { "in_range_len_errors", PPORT_802_3_OFF(a_in_range_length_errors) }, + { "out_of_range_len", PPORT_802_3_OFF(a_out_of_range_length_field) }, + { "too_long_errors", PPORT_802_3_OFF(a_frame_too_long_errors) }, + { "symbol_err", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, + { "mac_control_tx", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, + { "mac_control_rx", PPORT_802_3_OFF(a_mac_control_frames_received) }, + { "unsupported_op_rx", + PPORT_802_3_OFF(a_unsupported_opcodes_received) }, + { "pause_ctrl_rx", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, + { "pause_ctrl_tx", + PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, +}; + +static const struct counter_desc pport_2863_stats_desc[] = { + { "in_octets", PPORT_2863_OFF(if_in_octets) }, + { "in_ucast_pkts", PPORT_2863_OFF(if_in_ucast_pkts) }, + { "in_discards", PPORT_2863_OFF(if_in_discards) }, + { "in_errors", PPORT_2863_OFF(if_in_errors) }, + { "in_unknown_protos", PPORT_2863_OFF(if_in_unknown_protos) }, + { "out_octets", PPORT_2863_OFF(if_out_octets) }, + { "out_ucast_pkts", PPORT_2863_OFF(if_out_ucast_pkts) }, + { "out_discards", PPORT_2863_OFF(if_out_discards) }, + { "out_errors", PPORT_2863_OFF(if_out_errors) }, + { "in_multicast_pkts", PPORT_2863_OFF(if_in_multicast_pkts) }, + { "in_broadcast_pkts", PPORT_2863_OFF(if_in_broadcast_pkts) }, + { "out_multicast_pkts", PPORT_2863_OFF(if_out_multicast_pkts) }, + { "out_broadcast_pkts", PPORT_2863_OFF(if_out_broadcast_pkts) }, +}; + +static const struct counter_desc pport_2819_stats_desc[] = { + { "drop_events", PPORT_2819_OFF(ether_stats_drop_events) }, + { "octets", PPORT_2819_OFF(ether_stats_octets) }, + { "pkts", PPORT_2819_OFF(ether_stats_pkts) }, + { "broadcast_pkts", PPORT_2819_OFF(ether_stats_broadcast_pkts) }, + { "multicast_pkts", PPORT_2819_OFF(ether_stats_multicast_pkts) }, + { "crc_align_errors", PPORT_2819_OFF(ether_stats_crc_align_errors) }, + { "undersize_pkts", PPORT_2819_OFF(ether_stats_undersize_pkts) }, + { "oversize_pkts", PPORT_2819_OFF(ether_stats_oversize_pkts) }, + { "fragments", PPORT_2819_OFF(ether_stats_fragments) }, + { "jabbers", PPORT_2819_OFF(ether_stats_jabbers) }, + { "collisions", PPORT_2819_OFF(ether_stats_collisions) }, + { "p64octets", PPORT_2819_OFF(ether_stats_pkts64octets) }, + { "p65to127octets", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, + { "p128to255octets", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, + { "p256to511octets", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, + { "p512to1023octets", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, + { "p1024to1518octets", + PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, + { "p1519to2047octets", + PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, + { "p2048to4095octets", + PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, + { "p4096to8191octets", + PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, + { "p8192to10239octets", + PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, +}; + +static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { + { "rx_octets", PPORT_PER_PRIO_OFF(rx_octets) }, + { "rx_frames", PPORT_PER_PRIO_OFF(rx_frames) }, + { "tx_octets", PPORT_PER_PRIO_OFF(tx_octets) }, + { "tx_frames", PPORT_PER_PRIO_OFF(tx_frames) }, +}; + +static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { + { "rx_pause", PPORT_PER_PRIO_OFF(rx_pause) }, + { "rx_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, + { "tx_pause", PPORT_PER_PRIO_OFF(tx_pause) }, + { "tx_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, + { "rx_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, +}; + +struct mlx5e_rq_stats { + u64 packets; + u64 bytes; + u64 csum_sw; + u64 csum_inner; + u64 csum_none; + u64 lro_packets; + u64 lro_bytes; + u64 wqe_err; + u64 mpwqe_filler; + u64 mpwqe_frag; + u64 buff_alloc_err; + u64 cqe_compress_blks; + u64 cqe_compress_pkts; +}; + +static const struct counter_desc rq_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_sw) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, csum_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, wqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, mpwqe_frag) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, + { MLX5E_DECLARE_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, +}; + +struct mlx5e_sq_stats { + /* commonly accessed in data path */ + u64 packets; + u64 bytes; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 csum_offload_inner; + u64 nop; + /* less likely accessed in data path */ + u64 csum_offload_none; + u64 stopped; + u64 wake; + u64 dropped; +}; + +static const struct counter_desc sq_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, csum_offload_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, stopped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sq_stats, dropped) }, +}; + +#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) +#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) +#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) +#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) +#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) +#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) +#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ + ARRAY_SIZE(pport_per_prio_traffic_stats_desc) +#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ + ARRAY_SIZE(pport_per_prio_pfc_stats_desc) +#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \ + NUM_PPORT_2863_COUNTERS + \ + NUM_PPORT_2819_COUNTERS + \ + NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ + NUM_PPORT_PRIO) +#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) +#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) + +struct mlx5e_stats { + struct mlx5e_sw_stats sw; + struct mlx5e_qcounter_stats qcnt; + struct mlx5e_vport_stats vport; + struct mlx5e_pport_stats pport; +}; + +#endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index b3de09f13425..704c3d30493e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -46,43 +46,65 @@ struct mlx5e_tc_flow { struct mlx5_flow_rule *rule; }; -#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024 -#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4 +#define MLX5E_TC_TABLE_NUM_ENTRIES 1024 +#define MLX5E_TC_TABLE_NUM_GROUPS 4 static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, u32 *match_c, u32 *match_v, u32 action, u32 flow_tag) { - struct mlx5_flow_destination dest = { - .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, - {.ft = priv->fts.vlan.t}, - }; + struct mlx5_core_dev *dev = priv->mdev; + struct mlx5_flow_destination dest = { 0 }; + struct mlx5_fc *counter = NULL; struct mlx5_flow_rule *rule; bool table_created = false; - if (IS_ERR_OR_NULL(priv->fts.tc.t)) { - priv->fts.tc.t = - mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0, - MLX5E_TC_FLOW_TABLE_NUM_ENTRIES, - MLX5E_TC_FLOW_TABLE_NUM_GROUPS); - if (IS_ERR(priv->fts.tc.t)) { + if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = priv->fs.vlan.ft.t; + } else { + counter = mlx5_fc_create(dev, true); + if (IS_ERR(counter)) + return ERR_CAST(counter); + + dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest.counter = counter; + } + + if (IS_ERR_OR_NULL(priv->fs.tc.t)) { + priv->fs.tc.t = + mlx5_create_auto_grouped_flow_table(priv->fs.ns, + MLX5E_TC_PRIO, + MLX5E_TC_TABLE_NUM_ENTRIES, + MLX5E_TC_TABLE_NUM_GROUPS, + 0); + if (IS_ERR(priv->fs.tc.t)) { netdev_err(priv->netdev, "Failed to create tc offload table\n"); - return ERR_CAST(priv->fts.tc.t); + rule = ERR_CAST(priv->fs.tc.t); + goto err_create_ft; } table_created = true; } - rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS, + rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS, match_c, match_v, action, flow_tag, - action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL); + &dest); + + if (IS_ERR(rule)) + goto err_add_rule; + + return rule; - if (IS_ERR(rule) && table_created) { - mlx5_destroy_flow_table(priv->fts.tc.t); - priv->fts.tc.t = NULL; +err_add_rule: + if (table_created) { + mlx5_destroy_flow_table(priv->fs.tc.t); + priv->fs.tc.t = NULL; } +err_create_ft: + mlx5_fc_destroy(dev, counter); return rule; } @@ -90,11 +112,17 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, struct mlx5_flow_rule *rule) { + struct mlx5_fc *counter = NULL; + + counter = mlx5_flow_rule_counter(rule); + mlx5_del_flow_rule(rule); + mlx5_fc_destroy(priv->mdev, counter); + if (!mlx5e_tc_num_filters(priv)) { - mlx5_destroy_flow_table(priv->fts.tc.t); - priv->fts.tc.t = NULL; + mlx5_destroy_flow_table(priv->fs.tc.t); + priv->fs.tc.t = NULL; } } @@ -284,6 +312,9 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (is_tcf_gact_shot(a)) { *action |= MLX5_FLOW_CONTEXT_ACTION_DROP; + if (MLX5_CAP_FLOWTABLE(priv->mdev, + flow_table_properties_nic_receive.flow_counter)) + *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; continue; } @@ -310,7 +341,7 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, struct tc_cls_flower_offload *f) { - struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + struct mlx5e_tc_table *tc = &priv->fs.tc; u32 *match_c; u32 *match_v; int err = 0; @@ -376,7 +407,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f) { struct mlx5e_tc_flow *flow; - struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + struct mlx5e_tc_table *tc = &priv->fs.tc; flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, tc->ht_params); @@ -392,6 +423,34 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, return 0; } +int mlx5e_stats_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f) +{ + struct mlx5e_tc_table *tc = &priv->fs.tc; + struct mlx5e_tc_flow *flow; + struct tc_action *a; + struct mlx5_fc *counter; + u64 bytes; + u64 packets; + u64 lastuse; + + flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, + tc->ht_params); + if (!flow) + return -EINVAL; + + counter = mlx5_flow_rule_counter(flow->rule); + if (!counter) + return 0; + + mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); + + tc_for_each_action(a, f->exts) + tcf_action_stats_update(a, bytes, packets, lastuse); + + return 0; +} + static const struct rhashtable_params mlx5e_tc_flow_ht_params = { .head_offset = offsetof(struct mlx5e_tc_flow, node), .key_offset = offsetof(struct mlx5e_tc_flow, cookie), @@ -401,7 +460,7 @@ static const struct rhashtable_params mlx5e_tc_flow_ht_params = { int mlx5e_tc_init(struct mlx5e_priv *priv) { - struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + struct mlx5e_tc_table *tc = &priv->fs.tc; tc->ht_params = mlx5e_tc_flow_ht_params; return rhashtable_init(&tc->ht, &tc->ht_params); @@ -418,12 +477,12 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg) void mlx5e_tc_cleanup(struct mlx5e_priv *priv) { - struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + struct mlx5e_tc_table *tc = &priv->fs.tc; rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); - if (!IS_ERR_OR_NULL(priv->fts.tc.t)) { - mlx5_destroy_flow_table(priv->fts.tc.t); - priv->fts.tc.t = NULL; + if (!IS_ERR_OR_NULL(tc->t)) { + mlx5_destroy_flow_table(tc->t); + tc->t = NULL; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index d677428dc10f..34bf903fc886 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -43,9 +43,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); +int mlx5e_stats_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f); + static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { - return atomic_read(&priv->fts.tc.ht.nelems); + return atomic_read(&priv->fs.tc.ht.nelems); } #endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1ffc7cb6f78c..229ab16fb8d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -54,10 +54,11 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) sq->skb[pi] = NULL; sq->pc++; + sq->stats.nop++; if (notify_hw) { cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, wqe, 0); + mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); } } @@ -309,7 +310,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) bf_sz = wi->num_wqebbs << 3; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; - mlx5e_tx_notify_hw(sq, wqe, bf_sz); + mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz); } /* fill sq edge with nops to avoid wqe wrap around */ @@ -387,7 +388,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) wi = &sq->wqe_info[ci]; if (unlikely(!skb)) { /* nop */ - sq->stats.nop++; sqcc++; continue; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 9bb4395aceeb..c38781fa567d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -49,6 +49,60 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq) return cqe; } +static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq) +{ + struct mlx5_wq_cyc *wq; + struct mlx5_cqe64 *cqe; + struct mlx5e_sq *sq; + u16 sqcc; + + cqe = mlx5e_get_cqe(cq); + if (likely(!cqe)) + return; + + sq = container_of(cq, struct mlx5e_sq, cq); + wq = &sq->wq; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + sqcc = sq->cc; + + do { + u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1; + struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci]; + + mlx5_cqwq_pop(&cq->wq); + sqcc += icowi->num_wqebbs; + + if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { + WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n", + cqe->op_own); + break; + } + + switch (icowi->opcode) { + case MLX5_OPCODE_NOP: + break; + case MLX5_OPCODE_UMR: + mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq); + break; + default: + WARN_ONCE(true, + "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n", + icowi->opcode); + } + + } while ((cqe = mlx5e_get_cqe(cq))); + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->cc = sqcc; +} + int mlx5e_napi_poll(struct napi_struct *napi, int budget) { struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, @@ -64,6 +118,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); busy |= work_done == budget; + + mlx5e_poll_ico_cq(&c->icosq.cq); + busy |= mlx5e_post_rx_wqes(&c->rq); if (busy) @@ -80,6 +137,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) mlx5e_cq_arm(&c->sq[i].cq); mlx5e_cq_arm(&c->rq.cq); + mlx5e_cq_arm(&c->icosq.cq); return work_done; } @@ -89,7 +147,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq) struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); - barrier(); napi_schedule(cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index bc3d9f8a75c1..b84a6918a700 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -77,16 +77,20 @@ struct vport_addr { u8 action; u32 vport; struct mlx5_flow_rule *flow_rule; /* SRIOV only */ + /* A flag indicating that mac was added due to mc promiscuous vport */ + bool mc_promisc; }; enum { UC_ADDR_CHANGE = BIT(0), MC_ADDR_CHANGE = BIT(1), + PROMISC_CHANGE = BIT(3), }; /* Vport context events */ #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \ - MC_ADDR_CHANGE) + MC_ADDR_CHANGE | \ + PROMISC_CHANGE) static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, u32 events_mask) @@ -116,6 +120,9 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport, if (events_mask & MC_ADDR_CHANGE) MLX5_SET(nic_vport_context, nic_vport_ctx, event_on_mc_address_change, 1); + if (events_mask & PROMISC_CHANGE) + MLX5_SET(nic_vport_context, nic_vport_ctx, + event_on_promisc_change, 1); err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); if (err) @@ -323,30 +330,45 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) /* E-Switch FDB */ static struct mlx5_flow_rule * -esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) +__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, + u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) { - int match_header = MLX5_MATCH_OUTER_HEADERS; - struct mlx5_flow_destination dest; + int match_header = (is_zero_ether_addr(mac_c) ? 0 : + MLX5_MATCH_OUTER_HEADERS); struct mlx5_flow_rule *flow_rule = NULL; + struct mlx5_flow_destination dest; + void *mv_misc = NULL; + void *mc_misc = NULL; + u8 *dmac_v = NULL; + u8 *dmac_c = NULL; u32 *match_v; u32 *match_c; - u8 *dmac_v; - u8 *dmac_c; + if (rx_rule) + match_header |= MLX5_MATCH_MISC_PARAMETERS; match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); if (!match_v || !match_c) { pr_warn("FDB: Failed to alloc match parameters\n"); goto out; } + dmac_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers.dmac_47_16); dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers.dmac_47_16); - ether_addr_copy(dmac_v, mac); - /* Match criteria mask */ - memset(dmac_c, 0xff, 6); + if (match_header & MLX5_MATCH_OUTER_HEADERS) { + ether_addr_copy(dmac_v, mac_v); + ether_addr_copy(dmac_c, mac_c); + } + + if (match_header & MLX5_MATCH_MISC_PARAMETERS) { + mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); + mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); + MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); + MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); + } dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport_num = vport; @@ -373,6 +395,39 @@ out: return flow_rule; } +static struct mlx5_flow_rule * +esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) +{ + u8 mac_c[ETH_ALEN]; + + eth_broadcast_addr(mac_c); + return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); +} + +static struct mlx5_flow_rule * +esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) +{ + u8 mac_c[ETH_ALEN]; + u8 mac_v[ETH_ALEN]; + + eth_zero_addr(mac_c); + eth_zero_addr(mac_v); + mac_c[0] = 0x01; + mac_v[0] = 0x01; + return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); +} + +static struct mlx5_flow_rule * +esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) +{ + u8 mac_c[ETH_ALEN]; + u8 mac_v[ETH_ALEN]; + + eth_zero_addr(mac_c); + eth_zero_addr(mac_v); + return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v); +} + static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); @@ -401,34 +456,80 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) memset(flow_group_in, 0, inlen); table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - fdb = mlx5_create_flow_table(root_ns, 0, table_size); + fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); if (IS_ERR_OR_NULL(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create FDB Table err %d\n", err); goto out; } + esw->fdb_table.fdb = fdb; + /* Addresses group : Full match unicast/multicast addresses */ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); + /* Preserve 2 entries for allmulti and promisc rules*/ + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); eth_broadcast_addr(dmac); - g = mlx5_create_flow_group(fdb, flow_group_in); if (IS_ERR_OR_NULL(g)) { err = PTR_ERR(g); esw_warn(dev, "Failed to create flow group err(%d)\n", err); goto out; } - esw->fdb_table.addr_grp = g; - esw->fdb_table.fdb = fdb; + + /* Allmulti group : One rule that forwards any mcast traffic */ + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_OUTER_HEADERS); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2); + eth_zero_addr(dmac); + dmac[0] = 0x01; + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.allmulti_grp = g; + + /* Promiscuous group : + * One rule that forward all unmatched traffic from previous groups + */ + eth_zero_addr(dmac); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); + goto out; + } + esw->fdb_table.promisc_grp = g; + out: + if (err) { + if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) { + mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); + esw->fdb_table.allmulti_grp = NULL; + } + if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) { + mlx5_destroy_flow_group(esw->fdb_table.addr_grp); + esw->fdb_table.addr_grp = NULL; + } + if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) { + mlx5_destroy_flow_table(esw->fdb_table.fdb); + esw->fdb_table.fdb = NULL; + } + } + kfree(flow_group_in); - if (err && !IS_ERR_OR_NULL(fdb)) - mlx5_destroy_flow_table(fdb); return err; } @@ -438,10 +539,14 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw) return; esw_debug(esw->dev, "Destroy FDB Table\n"); + mlx5_destroy_flow_group(esw->fdb_table.promisc_grp); + mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp); mlx5_destroy_flow_group(esw->fdb_table.addr_grp); mlx5_destroy_flow_table(esw->fdb_table.fdb); esw->fdb_table.fdb = NULL; esw->fdb_table.addr_grp = NULL; + esw->fdb_table.allmulti_grp = NULL; + esw->fdb_table.promisc_grp = NULL; } /* E-Switch vport UC/MC lists management */ @@ -511,6 +616,52 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) return 0; } +static void update_allmulti_vports(struct mlx5_eswitch *esw, + struct vport_addr *vaddr, + struct esw_mc_addr *esw_mc) +{ + u8 *mac = vaddr->node.addr; + u32 vport_idx = 0; + + for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) { + struct mlx5_vport *vport = &esw->vports[vport_idx]; + struct hlist_head *vport_hash = vport->mc_list; + struct vport_addr *iter_vaddr = + l2addr_hash_find(vport_hash, + mac, + struct vport_addr); + if (IS_ERR_OR_NULL(vport->allmulti_rule) || + vaddr->vport == vport_idx) + continue; + switch (vaddr->action) { + case MLX5_ACTION_ADD: + if (iter_vaddr) + continue; + iter_vaddr = l2addr_hash_add(vport_hash, mac, + struct vport_addr, + GFP_KERNEL); + if (!iter_vaddr) { + esw_warn(esw->dev, + "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n", + mac, vport_idx); + continue; + } + iter_vaddr->vport = vport_idx; + iter_vaddr->flow_rule = + esw_fdb_set_vport_rule(esw, + mac, + vport_idx); + break; + case MLX5_ACTION_DEL: + if (!iter_vaddr) + continue; + mlx5_del_flow_rule(iter_vaddr->flow_rule); + l2addr_hash_del(iter_vaddr); + break; + } + } +} + static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { struct hlist_head *hash = esw->mc_table; @@ -531,8 +682,17 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) esw_mc->uplink_rule = /* Forward MC MAC to Uplink */ esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT); + + /* Add this multicast mac to all the mc promiscuous vports */ + update_allmulti_vports(esw, vaddr, esw_mc); + add: - esw_mc->refcnt++; + /* If the multicast mac is added as a result of mc promiscuous vport, + * don't increment the multicast ref count + */ + if (!vaddr->mc_promisc) + esw_mc->refcnt++; + /* Forward MC MAC to vport */ vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); esw_debug(esw->dev, @@ -568,9 +728,15 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) mlx5_del_flow_rule(vaddr->flow_rule); vaddr->flow_rule = NULL; - if (--esw_mc->refcnt) + /* If the multicast mac is added as a result of mc promiscuous vport, + * don't decrement the multicast ref count. + */ + if (vaddr->mc_promisc || (--esw_mc->refcnt > 0)) return 0; + /* Remove this multicast mac from all the mc promiscuous vports */ + update_allmulti_vports(esw, vaddr, esw_mc); + if (esw_mc->uplink_rule) mlx5_del_flow_rule(esw_mc->uplink_rule); @@ -643,10 +809,13 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, addr->action = MLX5_ACTION_DEL; } + if (!vport->enabled) + goto out; + err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type, mac_list, &size); if (err) - return; + goto out; esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n", vport_num, is_uc ? "UC" : "MC", size); @@ -660,6 +829,24 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr); if (addr) { addr->action = MLX5_ACTION_NONE; + /* If this mac was previously added because of allmulti + * promiscuous rx mode, its now converted to be original + * vport mac. + */ + if (addr->mc_promisc) { + struct esw_mc_addr *esw_mc = + l2addr_hash_find(esw->mc_table, + mac_list[i], + struct esw_mc_addr); + if (!esw_mc) { + esw_warn(esw->dev, + "Failed to MAC(%pM) in mcast DB\n", + mac_list[i]); + continue; + } + esw_mc->refcnt++; + addr->mc_promisc = false; + } continue; } @@ -674,13 +861,121 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw, addr->vport = vport_num; addr->action = MLX5_ACTION_ADD; } +out: kfree(mac_list); } -static void esw_vport_change_handler(struct work_struct *work) +/* Sync vport UC/MC list from vport context + * Must be called after esw_update_vport_addr_list + */ +static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + struct l2addr_node *node; + struct vport_addr *addr; + struct hlist_head *hash; + struct hlist_node *tmp; + int hi; + + hash = vport->mc_list; + + for_each_l2hash_node(node, tmp, esw->mc_table, hi) { + u8 *mac = node->addr; + + addr = l2addr_hash_find(hash, mac, struct vport_addr); + if (addr) { + if (addr->action == MLX5_ACTION_DEL) + addr->action = MLX5_ACTION_NONE; + continue; + } + addr = l2addr_hash_add(hash, mac, struct vport_addr, + GFP_KERNEL); + if (!addr) { + esw_warn(esw->dev, + "Failed to add allmulti MAC(%pM) to vport[%d] DB\n", + mac, vport_num); + continue; + } + addr->vport = vport_num; + addr->action = MLX5_ACTION_ADD; + addr->mc_promisc = true; + } +} + +/* Apply vport rx mode to HW FDB table */ +static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, + bool promisc, bool mc_promisc) +{ + struct esw_mc_addr *allmulti_addr = esw->mc_promisc; + struct mlx5_vport *vport = &esw->vports[vport_num]; + + if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc) + goto promisc; + + if (mc_promisc) { + vport->allmulti_rule = + esw_fdb_set_vport_allmulti_rule(esw, vport_num); + if (!allmulti_addr->uplink_rule) + allmulti_addr->uplink_rule = + esw_fdb_set_vport_allmulti_rule(esw, + UPLINK_VPORT); + allmulti_addr->refcnt++; + } else if (vport->allmulti_rule) { + mlx5_del_flow_rule(vport->allmulti_rule); + vport->allmulti_rule = NULL; + + if (--allmulti_addr->refcnt > 0) + goto promisc; + + if (allmulti_addr->uplink_rule) + mlx5_del_flow_rule(allmulti_addr->uplink_rule); + allmulti_addr->uplink_rule = NULL; + } + +promisc: + if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc) + return; + + if (promisc) { + vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw, + vport_num); + } else if (vport->promisc_rule) { + mlx5_del_flow_rule(vport->promisc_rule); + vport->promisc_rule = NULL; + } +} + +/* Sync vport rx mode from vport context */ +static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + int promisc_all = 0; + int promisc_uc = 0; + int promisc_mc = 0; + int err; + + err = mlx5_query_nic_vport_promisc(esw->dev, + vport_num, + &promisc_uc, + &promisc_mc, + &promisc_all); + if (err) + return; + esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n", + vport_num, promisc_all, promisc_mc); + + if (!vport->trusted || !vport->enabled) { + promisc_uc = 0; + promisc_mc = 0; + promisc_all = 0; + } + + esw_apply_vport_rx_mode(esw, vport_num, promisc_all, + (promisc_all || promisc_mc)); +} + +static void esw_vport_change_handle_locked(struct mlx5_vport *vport) { - struct mlx5_vport *vport = - container_of(work, struct mlx5_vport, vport_change_handler); struct mlx5_core_dev *dev = vport->dev; struct mlx5_eswitch *esw = dev->priv.eswitch; u8 mac[ETH_ALEN]; @@ -699,6 +994,15 @@ static void esw_vport_change_handler(struct work_struct *work) if (vport->enabled_events & MC_ADDR_CHANGE) { esw_update_vport_addr_list(esw, vport->vport, MLX5_NVPRT_LIST_TYPE_MC); + } + + if (vport->enabled_events & PROMISC_CHANGE) { + esw_update_vport_rx_mode(esw, vport->vport); + if (!IS_ERR_OR_NULL(vport->allmulti_rule)) + esw_update_vport_mc_promisc(esw, vport->vport); + } + + if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) { esw_apply_vport_addr_list(esw, vport->vport, MLX5_NVPRT_LIST_TYPE_MC); } @@ -709,15 +1013,477 @@ static void esw_vport_change_handler(struct work_struct *work) vport->enabled_events); } +static void esw_vport_change_handler(struct work_struct *work) +{ + struct mlx5_vport *vport = + container_of(work, struct mlx5_vport, vport_change_handler); + struct mlx5_eswitch *esw = vport->dev->priv.eswitch; + + mutex_lock(&esw->state_lock); + esw_vport_change_handle_locked(vport); + mutex_unlock(&esw->state_lock); +} + +static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *vlan_grp = NULL; + struct mlx5_flow_group *drop_grp = NULL; + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *acl; + void *match_criteria; + u32 *flow_group_in; + /* The egress acl table contains 2 rules: + * 1)Allow traffic with vlan_tag=vst_vlan_id + * 2)Drop all other traffic. + */ + int table_size = 2; + int err = 0; + + if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) || + !IS_ERR_OR_NULL(vport->egress.acl)) + return; + + esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", + vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); + if (!root_ns) { + esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); + return; + } + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return; + + acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); + if (IS_ERR_OR_NULL(acl)) { + err = PTR_ERR(acl); + esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", + vport->vport, err); + goto out; + } + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + + vlan_grp = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(vlan_grp)) { + err = PTR_ERR(vlan_grp); + esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", + vport->vport, err); + goto out; + } + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + drop_grp = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(drop_grp)) { + err = PTR_ERR(drop_grp); + esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", + vport->vport, err); + goto out; + } + + vport->egress.acl = acl; + vport->egress.drop_grp = drop_grp; + vport->egress.allowed_vlans_grp = vlan_grp; +out: + kfree(flow_group_in); + if (err && !IS_ERR_OR_NULL(vlan_grp)) + mlx5_destroy_flow_group(vlan_grp); + if (err && !IS_ERR_OR_NULL(acl)) + mlx5_destroy_flow_table(acl); +} + +static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) + mlx5_del_flow_rule(vport->egress.allowed_vlan); + + if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) + mlx5_del_flow_rule(vport->egress.drop_rule); + + vport->egress.allowed_vlan = NULL; + vport->egress.drop_rule = NULL; +} + +static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (IS_ERR_OR_NULL(vport->egress.acl)) + return; + + esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport); + + esw_vport_cleanup_egress_rules(esw, vport); + mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp); + mlx5_destroy_flow_group(vport->egress.drop_grp); + mlx5_destroy_flow_table(vport->egress.acl); + vport->egress.allowed_vlans_grp = NULL; + vport->egress.drop_grp = NULL; + vport->egress.acl = NULL; +} + +static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_core_dev *dev = esw->dev; + struct mlx5_flow_namespace *root_ns; + struct mlx5_flow_table *acl; + struct mlx5_flow_group *g; + void *match_criteria; + u32 *flow_group_in; + /* The ingress acl table contains 4 groups + * (2 active rules at the same time - + * 1 allow rule from one of the first 3 groups. + * 1 drop rule from the last group): + * 1)Allow untagged traffic with smac=original mac. + * 2)Allow untagged traffic. + * 3)Allow traffic with smac=original mac. + * 4)Drop all other traffic. + */ + int table_size = 4; + int err = 0; + + if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) || + !IS_ERR_OR_NULL(vport->ingress.acl)) + return; + + esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", + vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); + + root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); + if (!root_ns) { + esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); + return; + } + + flow_group_in = mlx5_vzalloc(inlen); + if (!flow_group_in) + return; + + acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); + if (IS_ERR_OR_NULL(acl)) { + err = PTR_ERR(acl); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.acl = acl; + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); + + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.allow_untagged_spoofchk_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.allow_untagged_only_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.allow_spoofchk_only_grp = g; + + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR_OR_NULL(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", + vport->vport, err); + goto out; + } + vport->ingress.drop_grp = g; + +out: + if (err) { + if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp)) + mlx5_destroy_flow_group( + vport->ingress.allow_spoofchk_only_grp); + if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp)) + mlx5_destroy_flow_group( + vport->ingress.allow_untagged_only_grp); + if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp)) + mlx5_destroy_flow_group( + vport->ingress.allow_untagged_spoofchk_grp); + if (!IS_ERR_OR_NULL(vport->ingress.acl)) + mlx5_destroy_flow_table(vport->ingress.acl); + } + + kfree(flow_group_in); +} + +static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) + mlx5_del_flow_rule(vport->ingress.drop_rule); + + if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) + mlx5_del_flow_rule(vport->ingress.allow_rule); + + vport->ingress.drop_rule = NULL; + vport->ingress.allow_rule = NULL; +} + +static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + if (IS_ERR_OR_NULL(vport->ingress.acl)) + return; + + esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport); + + esw_vport_cleanup_ingress_rules(esw, vport); + mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp); + mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp); + mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp); + mlx5_destroy_flow_group(vport->ingress.drop_grp); + mlx5_destroy_flow_table(vport->ingress.acl); + vport->ingress.acl = NULL; + vport->ingress.drop_grp = NULL; + vport->ingress.allow_spoofchk_only_grp = NULL; + vport->ingress.allow_untagged_only_grp = NULL; + vport->ingress.allow_untagged_spoofchk_grp = NULL; +} + +static int esw_vport_ingress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + u8 smac[ETH_ALEN]; + u32 *match_v; + u32 *match_c; + int err = 0; + u8 *smac_v; + + if (vport->spoofchk) { + err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac); + if (err) { + esw_warn(esw->dev, + "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n", + vport->vport, err); + return err; + } + + if (!is_valid_ether_addr(smac)) { + mlx5_core_warn(esw->dev, + "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", + vport->vport); + return -EPERM; + } + } + + esw_vport_cleanup_ingress_rules(esw, vport); + + if (!vport->vlan && !vport->qos && !vport->spoofchk) { + esw_vport_disable_ingress_acl(esw, vport); + return 0; + } + + esw_vport_enable_ingress_acl(esw, vport); + + esw_debug(esw->dev, + "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", + vport->vport, vport->vlan, vport->qos); + + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!match_v || !match_c) { + err = -ENOMEM; + esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", + vport->vport, err); + goto out; + } + + if (vport->vlan || vport->qos) + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); + + if (vport->spoofchk) { + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16); + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0); + smac_v = MLX5_ADDR_OF(fte_match_param, + match_v, + outer_headers.smac_47_16); + ether_addr_copy(smac_v, smac); + } + + vport->ingress.allow_rule = + mlx5_add_flow_rule(vport->ingress.acl, + MLX5_MATCH_OUTER_HEADERS, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_ALLOW, + 0, NULL); + if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) { + err = PTR_ERR(vport->ingress.allow_rule); + pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", + vport->vport, err); + vport->ingress.allow_rule = NULL; + goto out; + } + + memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + vport->ingress.drop_rule = + mlx5_add_flow_rule(vport->ingress.acl, + 0, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_DROP, + 0, NULL); + if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { + err = PTR_ERR(vport->ingress.drop_rule); + pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", + vport->vport, err); + vport->ingress.drop_rule = NULL; + goto out; + } + +out: + if (err) + esw_vport_cleanup_ingress_rules(esw, vport); + + kfree(match_v); + kfree(match_c); + return err; +} + +static int esw_vport_egress_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport) +{ + u32 *match_v; + u32 *match_c; + int err = 0; + + esw_vport_cleanup_egress_rules(esw, vport); + + if (!vport->vlan && !vport->qos) { + esw_vport_disable_egress_acl(esw, vport); + return 0; + } + + esw_vport_enable_egress_acl(esw, vport); + + esw_debug(esw->dev, + "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", + vport->vport, vport->vlan, vport->qos); + + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!match_v || !match_c) { + err = -ENOMEM; + esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", + vport->vport, err); + goto out; + } + + /* Allowed vlan rule */ + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); + MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid); + MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan); + + vport->egress.allowed_vlan = + mlx5_add_flow_rule(vport->egress.acl, + MLX5_MATCH_OUTER_HEADERS, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_ALLOW, + 0, NULL); + if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { + err = PTR_ERR(vport->egress.allowed_vlan); + pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", + vport->vport, err); + vport->egress.allowed_vlan = NULL; + goto out; + } + + /* Drop others rule (star rule) */ + memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param)); + vport->egress.drop_rule = + mlx5_add_flow_rule(vport->egress.acl, + 0, + match_c, + match_v, + MLX5_FLOW_CONTEXT_ACTION_DROP, + 0, NULL); + if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { + err = PTR_ERR(vport->egress.drop_rule); + pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", + vport->vport, err); + vport->egress.drop_rule = NULL; + } +out: + kfree(match_v); + kfree(match_c); + return err; +} + static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { struct mlx5_vport *vport = &esw->vports[vport_num]; - unsigned long flags; + mutex_lock(&esw->state_lock); WARN_ON(vport->enabled); esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num); + + if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */ + esw_vport_ingress_config(esw, vport); + esw_vport_egress_config(esw, vport); + } + mlx5_modify_vport_admin_state(esw->dev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, vport_num, @@ -725,53 +1491,32 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, /* Sync with current vport context */ vport->enabled_events = enable_events; - esw_vport_change_handler(&vport->vport_change_handler); + esw_vport_change_handle_locked(vport); - spin_lock_irqsave(&vport->lock, flags); vport->enabled = true; - spin_unlock_irqrestore(&vport->lock, flags); + + /* only PF is trusted by default */ + vport->trusted = (vport_num) ? false : true; arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); esw->enabled_vports++; esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); -} - -static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num) -{ - struct mlx5_vport *vport = &esw->vports[vport_num]; - struct l2addr_node *node; - struct vport_addr *addr; - struct hlist_node *tmp; - int hi; - - for_each_l2hash_node(node, tmp, vport->uc_list, hi) { - addr = container_of(node, struct vport_addr, node); - addr->action = MLX5_ACTION_DEL; - } - esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC); - - for_each_l2hash_node(node, tmp, vport->mc_list, hi) { - addr = container_of(node, struct vport_addr, node); - addr->action = MLX5_ACTION_DEL; - } - esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC); + mutex_unlock(&esw->state_lock); } static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) { struct mlx5_vport *vport = &esw->vports[vport_num]; - unsigned long flags; if (!vport->enabled) return; esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num); /* Mark this vport as disabled to discard new events */ - spin_lock_irqsave(&vport->lock, flags); vport->enabled = false; - vport->enabled_events = 0; - spin_unlock_irqrestore(&vport->lock, flags); + + synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC)); mlx5_modify_vport_admin_state(esw->dev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, @@ -781,9 +1526,19 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) flush_workqueue(esw->work_queue); /* Disable events from this vport */ arm_vport_context_events_cmd(esw->dev, vport->vport, 0); - /* We don't assume VFs will cleanup after themselves */ - esw_cleanup_vport(esw, vport_num); + mutex_lock(&esw->state_lock); + /* We don't assume VFs will cleanup after themselves. + * Calling vport change handler while vport is disabled will cleanup + * the vport resources. + */ + esw_vport_change_handle_locked(vport); + vport->enabled_events = 0; + if (vport_num) { + esw_vport_disable_egress_acl(esw, vport); + esw_vport_disable_ingress_acl(esw, vport); + } esw->enabled_vports--; + mutex_unlock(&esw->state_lock); } /* Public E-Switch API */ @@ -802,6 +1557,12 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs) return -ENOTSUPP; } + if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) + esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n"); + + if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support)) + esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n"); + esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs); esw_disable_vport(esw, 0); @@ -824,6 +1585,7 @@ abort: void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) { + struct esw_mc_addr *mc_promisc; int i; if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || @@ -833,9 +1595,14 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_info(esw->dev, "disable SRIOV: active vports(%d)\n", esw->enabled_vports); + mc_promisc = esw->mc_promisc; + for (i = 0; i < esw->total_vports; i++) esw_disable_vport(esw, i); + if (mc_promisc && mc_promisc->uplink_rule) + mlx5_del_flow_rule(mc_promisc->uplink_rule); + esw_destroy_fdb_table(esw); /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ @@ -845,7 +1612,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) int mlx5_eswitch_init(struct mlx5_core_dev *dev) { int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); - int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev); + int total_vports = MLX5_TOTAL_VPORTS(dev); + struct esw_mc_addr *mc_promisc; struct mlx5_eswitch *esw; int vport_num; int err; @@ -874,6 +1642,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) } esw->l2_table.size = l2_table_size; + mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL); + if (!mc_promisc) { + err = -ENOMEM; + goto abort; + } + esw->mc_promisc = mc_promisc; + esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); if (!esw->work_queue) { err = -ENOMEM; @@ -887,6 +1662,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } + mutex_init(&esw->state_lock); + for (vport_num = 0; vport_num < total_vports; vport_num++) { struct mlx5_vport *vport = &esw->vports[vport_num]; @@ -894,7 +1671,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) vport->dev = dev; INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler); - spin_lock_init(&vport->lock); } esw->total_vports = total_vports; @@ -925,6 +1701,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); kfree(esw->l2_table.bitmap); + kfree(esw->mc_promisc); kfree(esw->vports); kfree(esw); } @@ -942,10 +1719,8 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) } vport = &esw->vports[vport_num]; - spin_lock(&vport->lock); if (vport->enabled) queue_work(esw->work_queue, &vport->vport_change_handler); - spin_unlock(&vport->lock); } /* Vport Administration */ @@ -957,12 +1732,22 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, int vport, u8 mac[ETH_ALEN]) { int err = 0; + struct mlx5_vport *evport; if (!ESW_ALLOWED(esw)) return -EPERM; if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + evport = &esw->vports[vport]; + + if (evport->spoofchk && !is_valid_ether_addr(mac)) { + mlx5_core_warn(esw->dev, + "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", + vport); + return -EPERM; + } + err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); if (err) { mlx5_core_warn(esw->dev, @@ -971,6 +1756,11 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, return err; } + mutex_lock(&esw->state_lock); + if (evport->enabled) + err = esw_vport_ingress_config(esw, evport); + mutex_unlock(&esw->state_lock); + return err; } @@ -990,6 +1780,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi) { + struct mlx5_vport *evport; u16 vlan; u8 qos; @@ -998,6 +1789,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, if (!LEGAL_VPORT(esw, vport)) return -EINVAL; + evport = &esw->vports[vport]; + memset(ivi, 0, sizeof(*ivi)); ivi->vf = vport - 1; @@ -1008,7 +1801,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos); ivi->vlan = vlan; ivi->qos = qos; - ivi->spoofchk = 0; + ivi->spoofchk = evport->spoofchk; return 0; } @@ -1016,6 +1809,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos) { + struct mlx5_vport *evport; + int err = 0; int set = 0; if (!ESW_ALLOWED(esw)) @@ -1026,7 +1821,72 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, if (vlan || qos) set = 1; - return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); + evport = &esw->vports[vport]; + + err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set); + if (err) + return err; + + mutex_lock(&esw->state_lock); + evport->vlan = vlan; + evport->qos = qos; + if (evport->enabled) { + err = esw_vport_ingress_config(esw, evport); + if (err) + goto out; + err = esw_vport_egress_config(esw, evport); + } + +out: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, + int vport, bool spoofchk) +{ + struct mlx5_vport *evport; + bool pschk; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + evport = &esw->vports[vport]; + + mutex_lock(&esw->state_lock); + pschk = evport->spoofchk; + evport->spoofchk = spoofchk; + if (evport->enabled) + err = esw_vport_ingress_config(esw, evport); + if (err) + evport->spoofchk = pschk; + mutex_unlock(&esw->state_lock); + + return err; +} + +int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, + int vport, bool setting) +{ + struct mlx5_vport *evport; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + evport = &esw->vports[vport]; + + mutex_lock(&esw->state_lock); + evport->trusted = setting; + if (evport->enabled) + esw_vport_change_handle_locked(evport); + mutex_unlock(&esw->state_lock); + + return 0; } int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 3416a428f70f..fd6800256d4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -88,18 +88,40 @@ struct l2addr_node { kfree(ptr); \ }) +struct vport_ingress { + struct mlx5_flow_table *acl; + struct mlx5_flow_group *allow_untagged_spoofchk_grp; + struct mlx5_flow_group *allow_spoofchk_only_grp; + struct mlx5_flow_group *allow_untagged_only_grp; + struct mlx5_flow_group *drop_grp; + struct mlx5_flow_rule *allow_rule; + struct mlx5_flow_rule *drop_rule; +}; + +struct vport_egress { + struct mlx5_flow_table *acl; + struct mlx5_flow_group *allowed_vlans_grp; + struct mlx5_flow_group *drop_grp; + struct mlx5_flow_rule *allowed_vlan; + struct mlx5_flow_rule *drop_rule; +}; + struct mlx5_vport { struct mlx5_core_dev *dev; int vport; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; + struct mlx5_flow_rule *promisc_rule; + struct mlx5_flow_rule *allmulti_rule; struct work_struct vport_change_handler; - /* This spinlock protects access to vport data, between - * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event" - * once vport marked as disabled new interrupts are discarded. - */ - spinlock_t lock; /* vport events sync */ + struct vport_ingress ingress; + struct vport_egress egress; + + u16 vlan; + u8 qos; + bool spoofchk; + bool trusted; bool enabled; u16 enabled_events; }; @@ -113,6 +135,8 @@ struct mlx5_l2_table { struct mlx5_eswitch_fdb { void *fdb; struct mlx5_flow_group *addr_grp; + struct mlx5_flow_group *allmulti_grp; + struct mlx5_flow_group *promisc_grp; }; struct mlx5_eswitch { @@ -124,6 +148,11 @@ struct mlx5_eswitch { struct mlx5_vport *vports; int total_vports; int enabled_vports; + /* Synchronize between vport change events + * and async SRIOV admin state changes + */ + struct mutex state_lock; + struct esw_mc_addr *mc_promisc; }; /* E-Switch API */ @@ -138,6 +167,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, int vport, int link_state); int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, int vport, u16 vlan, u8 qos); +int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, + int vport, bool spoofchk); +int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, + int vport_num, bool setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, int vport, struct ifla_vf_info *ivi); int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index f46f1db0fc00..a5bb6b695242 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -50,6 +50,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); + if (ft->vport) { + MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); + MLX5_SET(set_flow_table_root_in, in, other_vport, 1); + } memset(out, 0, sizeof(out)); return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, @@ -57,6 +61,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, } int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + u16 vport, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id) @@ -77,6 +82,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, MLX5_SET(create_flow_table_in, in, table_type, type); MLX5_SET(create_flow_table_in, in, level, level); MLX5_SET(create_flow_table_in, in, log_size, log_size); + if (vport) { + MLX5_SET(create_flow_table_in, in, vport_number, vport); + MLX5_SET(create_flow_table_in, in, other_vport, 1); + } memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, @@ -101,6 +110,10 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev, MLX5_CMD_OP_DESTROY_FLOW_TABLE); MLX5_SET(destroy_flow_table_in, in, table_type, ft->type); MLX5_SET(destroy_flow_table_in, in, table_id, ft->id); + if (ft->vport) { + MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport); + MLX5_SET(destroy_flow_table_in, in, other_vport, 1); + } return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); @@ -120,6 +133,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev, MLX5_CMD_OP_MODIFY_FLOW_TABLE); MLX5_SET(modify_flow_table_in, in, table_type, ft->type); MLX5_SET(modify_flow_table_in, in, table_id, ft->id); + if (ft->vport) { + MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport); + MLX5_SET(modify_flow_table_in, in, other_vport, 1); + } MLX5_SET(modify_flow_table_in, in, modify_field_select, MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID); if (next_ft) { @@ -148,6 +165,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev, MLX5_CMD_OP_CREATE_FLOW_GROUP); MLX5_SET(create_flow_group_in, in, table_type, ft->type); MLX5_SET(create_flow_group_in, in, table_id, ft->id); + if (ft->vport) { + MLX5_SET(create_flow_group_in, in, vport_number, ft->vport); + MLX5_SET(create_flow_group_in, in, other_vport, 1); + } err = mlx5_cmd_exec_check_status(dev, in, inlen, out, @@ -174,6 +195,10 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, MLX5_SET(destroy_flow_group_in, in, table_type, ft->type); MLX5_SET(destroy_flow_group_in, in, table_id, ft->id); MLX5_SET(destroy_flow_group_in, in, group_id, group_id); + if (ft->vport) { + MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport); + MLX5_SET(destroy_flow_group_in, in, other_vport, 1); + } return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); @@ -207,22 +232,29 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, table_type, ft->type); MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, flow_index, fte->index); + if (ft->vport) { + MLX5_SET(set_fte_in, in, vport_number, ft->vport); + MLX5_SET(set_fte_in, in, other_vport, 1); + } in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context); MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag); MLX5_SET(flow_context, in_flow_context, action, fte->action); - MLX5_SET(flow_context, in_flow_context, destination_list_size, - fte->dests_size); in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, match_value); memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); + in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { - in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); + int list_size = 0; + list_for_each_entry(dst, &fte->node.children, node.list) { unsigned int id; + if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + MLX5_SET(dest_format_struct, in_dests, destination_type, dst->dest_attr.type); if (dst->dest_attr.type == @@ -233,8 +265,31 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, } MLX5_SET(dest_format_struct, in_dests, destination_id, id); in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + list_size++; } + + MLX5_SET(flow_context, in_flow_context, destination_list_size, + list_size); } + + if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + int list_size = 0; + + list_for_each_entry(dst, &fte->node.children, node.list) { + if (dst->dest_attr.type != + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + MLX5_SET(flow_counter_list, in_dests, flow_counter_id, + dst->dest_attr.counter->id); + in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + list_size++; + } + + MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, + list_size); + } + memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out)); @@ -254,18 +309,16 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned group_id, + int modify_mask, struct fs_fte *fte) { int opmod; - int modify_mask; int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev, flow_table_properties_nic_receive. flow_modify_en); if (!atomic_mod_cap) return -ENOTSUPP; opmod = 1; - modify_mask = 1 << - MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST; return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); } @@ -285,8 +338,78 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, MLX5_SET(delete_fte_in, in, table_type, ft->type); MLX5_SET(delete_fte_in, in, table_id, ft->id); MLX5_SET(delete_fte_in, in, flow_index, index); + if (ft->vport) { + MLX5_SET(delete_fte_in, in, vport_number, ft->vport); + MLX5_SET(delete_fte_in, in, other_vport, 1); + } err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); return err; } + +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) +{ + u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]; + u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(alloc_flow_counter_in, in, opcode, + MLX5_CMD_OP_ALLOC_FLOW_COUNTER); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); + if (err) + return err; + + *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); + + return 0; +} + +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(dealloc_flow_counter_in, in, opcode, + MLX5_CMD_OP_DEALLOC_FLOW_COUNTER); + MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id); + + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} + +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, + u64 *packets, u64 *bytes) +{ + u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + + MLX5_ST_SZ_BYTES(traffic_counter)]; + u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)]; + void *stats; + int err = 0; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(query_flow_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_FLOW_COUNTER); + MLX5_SET(query_flow_counter_in, in, op_mod, 0); + MLX5_SET(query_flow_counter_in, in, flow_counter_id, id); + + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics); + *packets = MLX5_GET64(traffic_counter, stats, packets); + *bytes = MLX5_GET64(traffic_counter, stats, octets); + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 9814d4784803..fc4f7b83fe0a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -34,6 +34,7 @@ #define _MLX5_FS_CMD_ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev, + u16 vport, enum fs_flow_table_type type, unsigned int level, unsigned int log_size, struct mlx5_flow_table *next_ft, unsigned int *table_id); @@ -61,6 +62,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev, int mlx5_cmd_update_fte(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, unsigned group_id, + int modify_mask, struct fs_fte *fte); int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, @@ -69,4 +71,9 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft); + +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, + u64 *packets, u64 *bytes); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5121be4675d1..8b5f0b2c0d5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -40,18 +40,18 @@ #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\ sizeof(struct init_tree_node)) -#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\ +#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\ ...) {.type = FS_TYPE_PRIO,\ .min_ft_level = min_level_val,\ - .max_ft = max_ft_val,\ + .num_levels = num_levels_val,\ .num_leaf_prios = num_prios_val,\ .caps = caps_val,\ .children = (struct init_tree_node[]) {__VA_ARGS__},\ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \ } -#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\ - ADD_PRIO(num_prios_val, 0, max_ft_val, {},\ +#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\ + ADD_PRIO(num_prios_val, 0, num_levels_val, {},\ __VA_ARGS__)\ #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\ @@ -67,17 +67,20 @@ #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \ .caps = (long[]) {__VA_ARGS__} } -#define LEFTOVERS_MAX_FT 1 +#define LEFTOVERS_NUM_LEVELS 1 #define LEFTOVERS_NUM_PRIOS 1 -#define BY_PASS_PRIO_MAX_FT 1 -#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ - LEFTOVERS_MAX_FT) -#define KERNEL_MAX_FT 3 -#define KERNEL_NUM_PRIOS 2 -#define KENREL_MIN_LEVEL 2 +#define BY_PASS_PRIO_NUM_LEVELS 1 +#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ + LEFTOVERS_NUM_PRIOS) -#define ANCHOR_MAX_FT 1 +/* Vlan, mac, ttc, aRFS */ +#define KERNEL_NIC_PRIO_NUM_LEVELS 4 +#define KERNEL_NIC_NUM_PRIOS 1 +/* One more level for tc */ +#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) + +#define ANCHOR_NUM_LEVELS 1 #define ANCHOR_NUM_PRIOS 1 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) struct node_caps { @@ -92,7 +95,7 @@ static struct init_tree_node { int min_ft_level; int num_leaf_prios; int prio; - int max_ft; + int num_levels; } root_fs = { .type = FS_TYPE_NAMESPACE, .ar_size = 4, @@ -102,17 +105,20 @@ static struct init_tree_node { FS_CAP(flow_table_properties_nic_receive.modify_root), FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), - ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))), - ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))), + ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, + ADD_NS(ADD_MULTIPLE_PRIO(1, 1), + ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, + KERNEL_NIC_PRIO_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), FS_CAP(flow_table_properties_nic_receive.modify_root), FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), FS_CAP(flow_table_properties_nic_receive.flow_table_modify)), - ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))), + ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))), ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {}, - ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))), + ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))), } }; @@ -222,19 +228,6 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns, return NULL; } -static unsigned int find_next_free_level(struct fs_prio *prio) -{ - if (!list_empty(&prio->node.children)) { - struct mlx5_flow_table *ft; - - ft = list_last_entry(&prio->node.children, - struct mlx5_flow_table, - node.list); - return ft->level + 1; - } - return prio->start_level; -} - static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size) { unsigned int i; @@ -351,6 +344,7 @@ static void del_rule(struct fs_node *node) struct mlx5_flow_group *fg; struct fs_fte *fte; u32 *match_value; + int modify_mask; struct mlx5_core_dev *dev = get_dev(node); int match_len = MLX5_ST_SZ_BYTES(fte_match_param); int err; @@ -374,8 +368,11 @@ static void del_rule(struct fs_node *node) } if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && --fte->dests_size) { + modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST), err = mlx5_cmd_update_fte(dev, ft, - fg->id, fte); + fg->id, + modify_mask, + fte); if (err) pr_warn("%s can't del rule fg id=%d fte_index=%d\n", __func__, fg->id, fte->index); @@ -464,7 +461,7 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) return fg; } -static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte, +static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte, enum fs_flow_table_type table_type) { struct mlx5_flow_table *ft; @@ -476,6 +473,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte, ft->level = level; ft->node.type = FS_TYPE_FLOW_TABLE; ft->type = table_type; + ft->vport = vport; ft->max_fte = max_fte; INIT_LIST_HEAD(&ft->fwd_rules); mutex_init(&ft->lock); @@ -615,12 +613,13 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio return err; } -static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, - struct mlx5_flow_destination *dest) +int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, + struct mlx5_flow_destination *dest) { struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; struct fs_fte *fte; + int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); int err = 0; fs_get_obj(fte, rule->node.parent); @@ -632,7 +631,9 @@ static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, memcpy(&rule->dest_attr, dest, sizeof(*dest)); err = mlx5_cmd_update_fte(get_dev(&ft->node), - ft, fg->id, fte); + ft, fg->id, + modify_mask, + fte); unlock_ref_node(&fte->node); return err; @@ -693,9 +694,23 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table return err; } -struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, - int prio, - int max_fte) +static void list_add_flow_table(struct mlx5_flow_table *ft, + struct fs_prio *prio) +{ + struct list_head *prev = &prio->node.children; + struct mlx5_flow_table *iter; + + fs_for_each_ft(iter, prio) { + if (iter->level > ft->level) + break; + prev = &iter->node.list; + } + list_add(&ft->node.list, prev); +} + +static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + u16 vport, int prio, + int max_fte, u32 level) { struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_table *ft; @@ -716,12 +731,16 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, err = -EINVAL; goto unlock_root; } - if (fs_prio->num_ft == fs_prio->max_ft) { + if (level >= fs_prio->num_levels) { err = -ENOSPC; goto unlock_root; } - - ft = alloc_flow_table(find_next_free_level(fs_prio), + /* The level is related to the + * priority level range. + */ + level += fs_prio->start_level; + ft = alloc_flow_table(level, + vport, roundup_pow_of_two(max_fte), root->table_type); if (!ft) { @@ -732,7 +751,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, tree_init_node(&ft->node, 1, del_flow_table); log_table_sz = ilog2(ft->max_fte); next_ft = find_next_chained_ft(fs_prio); - err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level, + err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level, log_table_sz, next_ft, &ft->id); if (err) goto free_ft; @@ -742,7 +761,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, goto destroy_ft; lock_ref_node(&fs_prio->node); tree_add_node(&ft->node, &fs_prio->node); - list_add_tail(&ft->node.list, &fs_prio->node.children); + list_add_flow_table(ft, fs_prio); fs_prio->num_ft++; unlock_ref_node(&fs_prio->node); mutex_unlock(&root->chain_lock); @@ -756,17 +775,32 @@ unlock_root: return ERR_PTR(err); } +struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, + int prio, int max_fte, + u32 level) +{ + return __mlx5_create_flow_table(ns, 0, prio, max_fte, level); +} + +struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, + int prio, int max_fte, + u32 level, u16 vport) +{ + return __mlx5_create_flow_table(ns, vport, prio, max_fte, level); +} + struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, int prio, int num_flow_table_entries, - int max_num_groups) + int max_num_groups, + u32 level) { struct mlx5_flow_table *ft; if (max_num_groups > num_flow_table_entries) return ERR_PTR(-EINVAL); - ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries); + ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level); if (IS_ERR(ft)) return ft; @@ -850,6 +884,7 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, { struct mlx5_flow_table *ft; struct mlx5_flow_rule *rule; + int modify_mask = 0; int err; rule = alloc_rule(dest); @@ -865,14 +900,20 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, list_add(&rule->node.list, &fte->node.children); else list_add_tail(&rule->node.list, &fte->node.children); - if (dest) + if (dest) { fte->dests_size++; + + modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ? + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) : + BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); + } + if (fte->dests_size == 1 || !dest) err = mlx5_cmd_create_fte(get_dev(&ft->node), ft, fg->id, fte); else err = mlx5_cmd_update_fte(get_dev(&ft->node), - ft, fg->id, fte); + ft, fg->id, modify_mask, fte); if (err) goto free_rule; @@ -1065,31 +1106,48 @@ unlock_fg: return rule; } -static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria, - u32 *match_value, - u8 action, - u32 flow_tag, - struct mlx5_flow_destination *dest) +struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule) { - struct mlx5_flow_rule *rule; - struct mlx5_flow_group *g; + struct mlx5_flow_rule *dst; + struct fs_fte *fte; - g = create_autogroup(ft, match_criteria_enable, match_criteria); - if (IS_ERR(g)) - return (void *)g; + fs_get_obj(fte, rule->node.parent); - rule = add_rule_fg(g, match_value, - action, flow_tag, dest); - if (IS_ERR(rule)) { - /* Remove assumes refcount > 0 and autogroup creates a group - * with a refcount = 0. - */ - tree_get_node(&g->node); - tree_remove_node(&g->node); + fs_for_each_dst(dst, fte) { + if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + return dst->dest_attr.counter; } - return rule; + + return NULL; +} + +static bool counter_is_valid(struct mlx5_fc *counter, u32 action) +{ + if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT)) + return !counter; + + if (!counter) + return false; + + /* Hardware support counter for a drop action only */ + return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT); +} + +static bool dest_is_valid(struct mlx5_flow_destination *dest, + u32 action, + struct mlx5_flow_table *ft) +{ + if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)) + return counter_is_valid(dest->counter, action); + + if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + return true; + + if (!dest || ((dest->type == + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) && + (dest->ft->level <= ft->level))) + return false; + return true; } static struct mlx5_flow_rule * @@ -1104,7 +1162,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, struct mlx5_flow_group *g; struct mlx5_flow_rule *rule; - if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest) + if (!dest_is_valid(dest, action, ft)) return ERR_PTR(-EINVAL); nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); @@ -1119,8 +1177,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft, goto unlock; } - rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria, - match_value, action, flow_tag, dest); + g = create_autogroup(ft, match_criteria_enable, match_criteria); + if (IS_ERR(g)) { + rule = (void *)g; + goto unlock; + } + + rule = add_rule_fg(g, match_value, + action, flow_tag, dest); + if (IS_ERR(rule)) { + /* Remove assumes refcount > 0 and autogroup creates a group + * with a refcount = 0. + */ + unlock_ref_node(&ft->node); + tree_get_node(&g->node); + tree_remove_node(&g->node); + return rule; + } unlock: unlock_ref_node(&ft->node); return rule; @@ -1288,7 +1361,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, { struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns; int prio; - static struct fs_prio *fs_prio; + struct fs_prio *fs_prio; struct mlx5_flow_namespace *ns; if (!root_ns) @@ -1306,6 +1379,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, return &dev->priv.fdb_root_ns->ns; else return NULL; + case MLX5_FLOW_NAMESPACE_ESW_EGRESS: + if (dev->priv.esw_egress_root_ns) + return &dev->priv.esw_egress_root_ns->ns; + else + return NULL; + case MLX5_FLOW_NAMESPACE_ESW_INGRESS: + if (dev->priv.esw_ingress_root_ns) + return &dev->priv.esw_ingress_root_ns->ns; + else + return NULL; default: return NULL; } @@ -1323,7 +1406,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, EXPORT_SYMBOL(mlx5_get_flow_namespace); static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, - unsigned prio, int max_ft) + unsigned int prio, int num_levels) { struct fs_prio *fs_prio; @@ -1334,7 +1417,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, fs_prio->node.type = FS_TYPE_PRIO; tree_init_node(&fs_prio->node, 1, NULL); tree_add_node(&fs_prio->node, &ns->node); - fs_prio->max_ft = max_ft; + fs_prio->num_levels = num_levels; fs_prio->prio = prio; list_add_tail(&fs_prio->node.list, &ns->node.children); @@ -1365,14 +1448,14 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) return ns; } -static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node - *prio_metadata) +static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, + struct init_tree_node *prio_metadata) { struct fs_prio *fs_prio; int i; for (i = 0; i < prio_metadata->num_leaf_prios; i++) { - fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft); + fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels); if (IS_ERR(fs_prio)) return PTR_ERR(fs_prio); } @@ -1399,7 +1482,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, struct init_tree_node *init_node, struct fs_node *fs_parent_node, struct init_tree_node *init_parent_node, - int index) + int prio) { int max_ft_level = MLX5_CAP_FLOWTABLE(dev, flow_table_properties_nic_receive. @@ -1417,8 +1500,8 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, fs_get_obj(fs_ns, fs_parent_node); if (init_node->num_leaf_prios) - return create_leaf_prios(fs_ns, init_node); - fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft); + return create_leaf_prios(fs_ns, prio, init_node); + fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels); if (IS_ERR(fs_prio)) return PTR_ERR(fs_prio); base = &fs_prio->node; @@ -1431,11 +1514,16 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev, } else { return -EINVAL; } + prio = 0; for (i = 0; i < init_node->ar_size; i++) { err = init_root_tree_recursive(dev, &init_node->children[i], - base, init_node, i); + base, init_node, prio); if (err) return err; + if (init_node->children[i].type == FS_TYPE_PRIO && + init_node->children[i].num_leaf_prios) { + prio += init_node->children[i].num_leaf_prios; + } } return 0; @@ -1491,9 +1579,9 @@ static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level) struct fs_prio *prio; fs_for_each_prio(prio, ns) { - /* This updates prio start_level and max_ft */ + /* This updates prio start_level and num_levels */ set_prio_attrs_in_prio(prio, acc_level); - acc_level += prio->max_ft; + acc_level += prio->num_levels; } return acc_level; } @@ -1505,11 +1593,11 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) prio->start_level = acc_level; fs_for_each_ns(ns, prio) - /* This updates start_level and max_ft of ns's priority descendants */ + /* This updates start_level and num_levels of ns's priority descendants */ acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); - if (!prio->max_ft) - prio->max_ft = acc_level_ns - prio->start_level; - WARN_ON(prio->max_ft < acc_level_ns - prio->start_level); + if (!prio->num_levels) + prio->num_levels = acc_level_ns - prio->start_level; + WARN_ON(prio->num_levels < acc_level_ns - prio->start_level); } static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) @@ -1520,12 +1608,13 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns) fs_for_each_prio(prio, ns) { set_prio_attrs_in_prio(prio, start_level); - start_level += prio->max_ft; + start_level += prio->num_levels; } } #define ANCHOR_PRIO 0 #define ANCHOR_SIZE 1 +#define ANCHOR_LEVEL 0 static int create_anchor_flow_table(struct mlx5_core_dev *dev) { @@ -1535,7 +1624,7 @@ static int create_anchor_flow_table(struct mlx5_core_dev ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR); if (!ns) return -EINVAL; - ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE); + ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL); if (IS_ERR(ft)) { mlx5_core_err(dev, "Failed to create last anchor flow table"); return PTR_ERR(ft); @@ -1680,6 +1769,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) { cleanup_root_ns(dev); cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); + cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); + cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns); + mlx5_cleanup_fc_stats(dev); } static int init_fdb_root_ns(struct mlx5_core_dev *dev) @@ -1700,20 +1792,69 @@ static int init_fdb_root_ns(struct mlx5_core_dev *dev) } } +static int init_egress_acl_root_ns(struct mlx5_core_dev *dev) +{ + struct fs_prio *prio; + + dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL); + if (!dev->priv.esw_egress_root_ns) + return -ENOMEM; + + /* create 1 prio*/ + prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); + if (IS_ERR(prio)) + return PTR_ERR(prio); + else + return 0; +} + +static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev) +{ + struct fs_prio *prio; + + dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL); + if (!dev->priv.esw_ingress_root_ns) + return -ENOMEM; + + /* create 1 prio*/ + prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev)); + if (IS_ERR(prio)) + return PTR_ERR(prio); + else + return 0; +} + int mlx5_init_fs(struct mlx5_core_dev *dev) { int err = 0; + err = mlx5_init_fc_stats(dev); + if (err) + return err; + if (MLX5_CAP_GEN(dev, nic_flow_table)) { err = init_root_ns(dev); if (err) - return err; + goto err; } if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { err = init_fdb_root_ns(dev); if (err) - cleanup_root_ns(dev); + goto err; + } + if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { + err = init_egress_acl_root_ns(dev); + if (err) + goto err; + } + if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { + err = init_ingress_acl_root_ns(dev); + if (err) + goto err; } + return 0; +err: + mlx5_cleanup_fs(dev); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index f37a6248a27b..aa41a7314691 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -45,8 +45,10 @@ enum fs_node_type { }; enum fs_flow_table_type { - FS_FT_NIC_RX = 0x0, - FS_FT_FDB = 0X4, + FS_FT_NIC_RX = 0x0, + FS_FT_ESW_EGRESS_ACL = 0x2, + FS_FT_ESW_INGRESS_ACL = 0x3, + FS_FT_FDB = 0X4, }; enum fs_fte_status { @@ -79,6 +81,7 @@ struct mlx5_flow_rule { struct mlx5_flow_table { struct fs_node node; u32 id; + u16 vport; unsigned int max_fte; unsigned int level; enum fs_flow_table_type type; @@ -93,6 +96,28 @@ struct mlx5_flow_table { struct list_head fwd_rules; }; +struct mlx5_fc_cache { + u64 packets; + u64 bytes; + u64 lastuse; +}; + +struct mlx5_fc { + struct list_head list; + + /* last{packets,bytes} members are used when calculating the delta since + * last reading + */ + u64 lastpackets; + u64 lastbytes; + + u16 id; + bool deleted; + bool aging; + + struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; +}; + /* Type of children is mlx5_flow_rule */ struct fs_fte { struct fs_node node; @@ -102,12 +127,13 @@ struct fs_fte { u32 index; u32 action; enum fs_fte_status status; + struct mlx5_fc *counter; }; /* Type of children is mlx5_flow_table/namespace */ struct fs_prio { struct fs_node node; - unsigned int max_ft; + unsigned int num_levels; unsigned int start_level; unsigned int prio; unsigned int num_ft; @@ -143,6 +169,9 @@ struct mlx5_flow_root_namespace { struct mutex chain_lock; }; +int mlx5_init_fc_stats(struct mlx5_core_dev *dev); +void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev); + int mlx5_init_fs(struct mlx5_core_dev *dev); void mlx5_cleanup_fs(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c new file mode 100644 index 000000000000..164dc37fda72 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include <linux/mlx5/driver.h> +#include <linux/mlx5/fs.h> +#include "mlx5_core.h" +#include "fs_core.h" +#include "fs_cmd.h" + +#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) + +/* locking scheme: + * + * It is the responsibility of the user to prevent concurrent calls or bad + * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference + * to struct mlx5_fc. + * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a + * dump (access to struct mlx5_fc) after a counter is destroyed. + * + * access to counter list: + * - create (user context) + * - mlx5_fc_create() only adds to an addlist to be used by + * mlx5_fc_stats_query_work(). addlist is protected by a spinlock. + * - spawn thread to do the actual destroy + * + * - destroy (user context) + * - mark a counter as deleted + * - spawn thread to do the actual del + * + * - dump (user context) + * user should not call dump after destroy + * + * - query (single thread workqueue context) + * destroy/dump - no conflict (see destroy) + * query/dump - packets and bytes might be inconsistent (since update is not + * atomic) + * query/create - no conflict (see create) + * since every create/destroy spawn the work, only after necessary time has + * elapsed, the thread will actually query the hardware. + */ + +static void mlx5_fc_stats_work(struct work_struct *work) +{ + struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, + priv.fc_stats.work.work); + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + unsigned long now = jiffies; + struct mlx5_fc *counter; + struct mlx5_fc *tmp; + int err = 0; + + spin_lock(&fc_stats->addlist_lock); + + list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); + + if (!list_empty(&fc_stats->list)) + queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD); + + spin_unlock(&fc_stats->addlist_lock); + + list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { + struct mlx5_fc_cache *c = &counter->cache; + u64 packets; + u64 bytes; + + if (counter->deleted) { + list_del(&counter->list); + + mlx5_cmd_fc_free(dev, counter->id); + + kfree(counter); + continue; + } + + if (time_before(now, fc_stats->next_query)) + continue; + + err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes); + if (err) { + pr_err("Error querying stats for counter id %d\n", + counter->id); + continue; + } + + if (packets == c->packets) + continue; + + c->lastuse = jiffies; + c->packets = packets; + c->bytes = bytes; + } + + if (time_after_eq(now, fc_stats->next_query)) + fc_stats->next_query = now + MLX5_FC_STATS_PERIOD; +} + +struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter; + int err; + + counter = kzalloc(sizeof(*counter), GFP_KERNEL); + if (!counter) + return ERR_PTR(-ENOMEM); + + err = mlx5_cmd_fc_alloc(dev, &counter->id); + if (err) + goto err_out; + + if (aging) { + counter->aging = true; + + spin_lock(&fc_stats->addlist_lock); + list_add(&counter->list, &fc_stats->addlist); + spin_unlock(&fc_stats->addlist_lock); + + mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); + } + + return counter; + +err_out: + kfree(counter); + + return ERR_PTR(err); +} + +void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + if (!counter) + return; + + if (counter->aging) { + counter->deleted = true; + mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); + return; + } + + mlx5_cmd_fc_free(dev, counter->id); + kfree(counter); +} + +int mlx5_init_fc_stats(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + + INIT_LIST_HEAD(&fc_stats->list); + INIT_LIST_HEAD(&fc_stats->addlist); + spin_lock_init(&fc_stats->addlist_lock); + + fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); + if (!fc_stats->wq) + return -ENOMEM; + + INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work); + + return 0; +} + +void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) +{ + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter; + struct mlx5_fc *tmp; + + cancel_delayed_work_sync(&dev->priv.fc_stats.work); + destroy_workqueue(dev->priv.fc_stats.wq); + dev->priv.fc_stats.wq = NULL; + + list_splice_tail_init(&fc_stats->addlist, &fc_stats->list); + + list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) { + list_del(&counter->list); + + mlx5_cmd_fc_free(dev, counter->id); + + kfree(counter); + } +} + +void mlx5_fc_query_cached(struct mlx5_fc *counter, + u64 *bytes, u64 *packets, u64 *lastuse) +{ + struct mlx5_fc_cache c; + + c = counter->cache; + + *bytes = c.bytes - counter->lastbytes; + *packets = c.packets - counter->lastpackets; + *lastuse = c.lastuse; + + counter->lastbytes = c.bytes; + counter->lastpackets = c.packets; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index f5deb642d0d6..42d16b9458e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -176,11 +176,11 @@ static const char *hsynd_str(u8 synd) case MLX5_HEALTH_SYNDR_EQ_ERR: return "EQ error"; case MLX5_HEALTH_SYNDR_EQ_INV: - return "Invalid EQ refrenced"; + return "Invalid EQ referenced"; case MLX5_HEALTH_SYNDR_FFSER_ERR: return "FFSER error"; case MLX5_HEALTH_SYNDR_HIGH_TEMP: - return "High temprature"; + return "High temperature"; default: return "unrecognized error"; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3f3b2fae4991..6feef7fb9d6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -48,6 +48,9 @@ #include <linux/kmod.h> #include <linux/delay.h> #include <linux/mlx5/mlx5_ifc.h> +#ifdef CONFIG_RFS_ACCEL +#include <linux/cpu_rmap.h> +#endif #include "mlx5_core.h" #include "fs_core.h" #ifdef CONFIG_MLX5_CORE_EN @@ -665,6 +668,12 @@ static void free_comp_eqs(struct mlx5_core_dev *dev) struct mlx5_eq_table *table = &dev->priv.eq_table; struct mlx5_eq *eq, *n; +#ifdef CONFIG_RFS_ACCEL + if (dev->rmap) { + free_irq_cpu_rmap(dev->rmap); + dev->rmap = NULL; + } +#endif spin_lock(&table->lock); list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { list_del(&eq->list); @@ -691,6 +700,11 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) INIT_LIST_HEAD(&table->comp_eqs_list); ncomp_vec = table->num_comp_vectors; nent = MLX5_COMP_EQ_SIZE; +#ifdef CONFIG_RFS_ACCEL + dev->rmap = alloc_irq_cpu_rmap(ncomp_vec); + if (!dev->rmap) + return -ENOMEM; +#endif for (i = 0; i < ncomp_vec; i++) { eq = kzalloc(sizeof(*eq), GFP_KERNEL); if (!eq) { @@ -698,6 +712,10 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev) goto clean; } +#ifdef CONFIG_RFS_ACCEL + irq_cpu_rmap_add(dev->rmap, + dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector); +#endif snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); err = mlx5_create_map_eq(dev, eq, i + MLX5_EQ_VEC_COMP_BASE, nent, 0, @@ -966,7 +984,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) int err; mutex_lock(&dev->intf_state_mutex); - if (dev->interface_state == MLX5_INTERFACE_STATE_UP) { + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n", __func__); goto out; @@ -1133,7 +1151,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) if (err) pr_info("failed request module on %s\n", MLX5_IB_MOD); - dev->interface_state = MLX5_INTERFACE_STATE_UP; + clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); + set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); out: mutex_unlock(&dev->intf_state_mutex); @@ -1207,7 +1226,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) } mutex_lock(&dev->intf_state_mutex); - if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) { + if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) { dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n", __func__); goto out; @@ -1241,7 +1260,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_cmd_cleanup(dev); out: - dev->interface_state = MLX5_INTERFACE_STATE_DOWN; + clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); + set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state); mutex_unlock(&dev->intf_state_mutex); return err; } @@ -1452,6 +1472,18 @@ static const struct pci_error_handlers mlx5_err_handler = { .resume = mlx5_pci_resume }; +static void shutdown(struct pci_dev *pdev) +{ + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); + struct mlx5_priv *priv = &dev->priv; + + dev_info(&pdev->dev, "Shutdown was called\n"); + /* Notify mlx5 clients that the kernel is being shut down */ + set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state); + mlx5_unload_one(dev, priv); + mlx5_pci_disable_device(dev); +} + static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ @@ -1459,6 +1491,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */ { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */ + { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */ + { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */ { 0, } }; @@ -1469,6 +1503,7 @@ static struct pci_driver mlx5_core_driver = { .id_table = mlx5_core_pci_table, .probe = init_one, .remove = remove_one, + .shutdown = shutdown, .err_handler = &mlx5_err_handler, .sriov_configure = mlx5_core_sriov_configure, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 0b0b226c789e..482604bd051c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -42,6 +42,8 @@ #define DRIVER_VERSION "3.0-1" #define DRIVER_RELDATE "January 2015" +#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) + extern int mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index ae378c575deb..3e35611b19c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -115,6 +115,19 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, } EXPORT_SYMBOL_GPL(mlx5_query_port_ptys); +int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration) +{ + u32 out[MLX5_ST_SZ_DW(mlcr_reg)]; + u32 in[MLX5_ST_SZ_DW(mlcr_reg)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(mlcr_reg, in, local_port, 1); + MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MLCR, 0, 1); +} + int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, u32 *proto_cap, int proto_mask) { @@ -247,8 +260,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, } EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status); -static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, - int *max_mtu, int *oper_mtu, u8 port) +static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu, + u16 *max_mtu, u16 *oper_mtu, u8 port) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; @@ -268,7 +281,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu, *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); } -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port) { u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; @@ -283,20 +296,96 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port) } EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port) { mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port); } EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu, u8 port) { mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port); } EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); +static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num) +{ + u32 out[MLX5_ST_SZ_DW(pmlp_reg)]; + u32 in[MLX5_ST_SZ_DW(pmlp_reg)]; + int module_mapping; + int err; + + memset(in, 0, sizeof(in)); + + MLX5_SET(pmlp_reg, in, local_port, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), + MLX5_REG_PMLP, 0, 0); + if (err) + return err; + + module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping); + *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK; + + return 0; +} + +int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, + u16 offset, u16 size, u8 *data) +{ + u32 out[MLX5_ST_SZ_DW(mcia_reg)]; + u32 in[MLX5_ST_SZ_DW(mcia_reg)]; + int module_num; + u16 i2c_addr; + int status; + int err; + void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0); + + err = mlx5_query_module_num(dev, &module_num); + if (err) + return err; + + memset(in, 0, sizeof(in)); + size = min_t(int, size, MLX5_EEPROM_MAX_BYTES); + + if (offset < MLX5_EEPROM_PAGE_LENGTH && + offset + size > MLX5_EEPROM_PAGE_LENGTH) + /* Cross pages read, read until offset 256 in low page */ + size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; + + i2c_addr = MLX5_I2C_ADDR_LOW; + if (offset >= MLX5_EEPROM_PAGE_LENGTH) { + i2c_addr = MLX5_I2C_ADDR_HIGH; + offset -= MLX5_EEPROM_PAGE_LENGTH; + } + + MLX5_SET(mcia_reg, in, l, 0); + MLX5_SET(mcia_reg, in, module, module_num); + MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr); + MLX5_SET(mcia_reg, in, page_number, 0); + MLX5_SET(mcia_reg, in, device_address, offset); + MLX5_SET(mcia_reg, in, size, size); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_MCIA, 0, 0); + if (err) + return err; + + status = MLX5_GET(mcia_reg, out, status); + if (status) { + mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n", + status); + return -EIO; + } + + memcpy(data, ptr, size); + + return size; +} +EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom); + static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc, int pvlc_size, u8 local_port) { @@ -607,3 +696,52 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) return err; } EXPORT_SYMBOL_GPL(mlx5_query_port_wol); + +static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, + int outlen) +{ + u32 in[MLX5_ST_SZ_DW(pcmr_reg)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(pcmr_reg, in, local_port, 1); + + return mlx5_core_access_reg(mdev, in, sizeof(in), out, + outlen, MLX5_REG_PCMR, 0, 0); +} + +static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen) +{ + u32 out[MLX5_ST_SZ_DW(pcmr_reg)]; + + return mlx5_core_access_reg(mdev, in, inlen, out, + sizeof(out), MLX5_REG_PCMR, 0, 1); +} + +int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable) +{ + u32 in[MLX5_ST_SZ_DW(pcmr_reg)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(pcmr_reg, in, local_port, 1); + MLX5_SET(pcmr_reg, in, fcs_chk, enable); + + return mlx5_set_ports_check(mdev, in, sizeof(in)); +} + +void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, + bool *enabled) +{ + u32 out[MLX5_ST_SZ_DW(pcmr_reg)]; + /* Default values for FW which do not support MLX5_REG_PCMR */ + *supported = false; + *enabled = true; + + if (!MLX5_CAP_GEN(mdev, ports_check)) + return; + + if (mlx5_query_ports_check(mdev, out, sizeof(out))) + return; + + *supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap)); + *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk)); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index def289375ecb..b720a274220d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -538,3 +538,71 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, mlx5_core_destroy_sq(dev, sq->qpn); } EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked); + +int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id) +{ + u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]; + u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); + err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out)); + if (!err) + *counter_id = MLX5_GET(alloc_q_counter_out, out, + counter_set_id); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter); + +int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]; + u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(dealloc_q_counter_in, in, opcode, + MLX5_CMD_OP_DEALLOC_Q_COUNTER); + MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, + sizeof(out)); +} +EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter); + +int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, + int reset, void *out, int out_size) +{ + u32 in[MLX5_ST_SZ_DW(query_q_counter_in)]; + + memset(in, 0, sizeof(in)); + + MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER); + MLX5_SET(query_q_counter_in, in, clear, reset); + MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id); + return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size); +} +EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter); + +int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id, + u32 *out_of_buffer) +{ + int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); + void *out; + int err; + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen); + if (!err) + *out_of_buffer = MLX5_GET(query_q_counter_out, out, + out_of_buffer); + + kfree(out); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 7b24386794f9..d6a3f412ba9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -140,7 +140,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) struct mlx5_core_sriov *sriov = &dev->priv.sriov; int err; - mlx5_core_dbg(dev, "requsted num_vfs %d\n", num_vfs); + mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs); if (!mlx5_core_is_pf(dev)) return -EPERM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 8ba080e441a1..5ff8af472bf5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) { - iounmap(uar->map); - iounmap(uar->bf_map); + if (uar->map) + iounmap(uar->map); + else + iounmap(uar->bf_map); mlx5_cmd_free_uar(mdev, uar->index); } EXPORT_SYMBOL(mlx5_unmap_free_uar); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index bd518405859e..b69dadcfb897 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev, } EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address); +int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu) +{ + int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); + u32 *out; + int err; + + out = mlx5_vzalloc(outlen); + if (!out) + return -ENOMEM; + + err = mlx5_query_nic_vport_context(mdev, 0, out, outlen); + if (!err) + *mtu = MLX5_GET(query_nic_vport_context_out, out, + nic_vport_context.mtu); + + kvfree(out); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu); + +int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu) +{ + int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); + void *in; + int err; + + in = mlx5_vzalloc(inlen); + if (!in) + return -ENOMEM; + + MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1); + MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu); + + err = mlx5_modify_nic_vport_context(mdev, in, inlen); + + kvfree(in); + return err; +} +EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu); + int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, u32 vport, enum mlx5_list_type list_type, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c index 9f10df25f3cd..f2fd1ef16da7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) return vxlan; } -int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) +static void mlx5e_vxlan_add_port(struct work_struct *work) { + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + u16 port = vxlan_work->port; struct mlx5e_vxlan *vxlan; int err; - err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port); - if (err) - return err; + if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port)) + goto free_work; vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); - if (!vxlan) { - err = -ENOMEM; + if (!vxlan) goto err_delete_port; - } vxlan->udp_port = port; @@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) if (err) goto err_free; - return 0; + goto free_work; err_free: kfree(vxlan); err_delete_port: mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); - return err; +free_work: + kfree(vxlan_work); } static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) @@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) kfree(vxlan); } -void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) +static void mlx5e_vxlan_del_port(struct work_struct *work) { - if (!mlx5e_vxlan_lookup_port(priv, port)) - return; + struct mlx5e_vxlan_work *vxlan_work = + container_of(work, struct mlx5e_vxlan_work, work); + struct mlx5e_priv *priv = vxlan_work->priv; + u16 port = vxlan_work->port; __mlx5e_vxlan_core_del_port(priv, port); + + kfree(vxlan_work); +} + +void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, + u16 port, int add) +{ + struct mlx5e_vxlan_work *vxlan_work; + + vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC); + if (!vxlan_work) + return; + + if (add) + INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port); + else + INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port); + + vxlan_work->priv = priv; + vxlan_work->port = port; + vxlan_work->sa_family = sa_family; + queue_work(priv->wq, &vxlan_work->work); } void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h index a01685056ab1..5def12c048e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -39,6 +39,13 @@ struct mlx5e_vxlan { u16 udp_port; }; +struct mlx5e_vxlan_work { + struct work_struct work; + struct mlx5e_priv *priv; + sa_family_t sa_family; + u16 port; +}; + static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) { return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && @@ -46,9 +53,10 @@ static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) } void mlx5e_vxlan_init(struct mlx5e_priv *priv); -int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); -void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); -struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); +void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family, + u16 port, int add); +struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); + #endif /* __MLX5_VXLAN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 2ad7f67854d5..5989f7cb5462 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -50,3 +50,11 @@ config MLXSW_SPECTRUM To compile this driver as a module, choose M here: the module will be called mlxsw_spectrum. + +config MLXSW_SPECTRUM_DCB + bool "Data Center Bridging (DCB) support" + depends on MLXSW_SPECTRUM && DCB + default y + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 584cac444852..9b5ebf84c051 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -8,3 +8,4 @@ mlxsw_switchx2-objs := switchx2.o obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_switchdev.o +mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f69f6280519f..b0a0b01bb4ef 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -44,7 +44,7 @@ #include <linux/seq_file.h> #include <linux/u64_stats_sync.h> #include <linux/netdevice.h> -#include <linux/wait.h> +#include <linux/completion.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/types.h> @@ -55,6 +55,7 @@ #include <linux/mutex.h> #include <linux/rcupdate.h> #include <linux/slab.h> +#include <linux/workqueue.h> #include <asm/byteorder.h> #include <net/devlink.h> @@ -73,6 +74,8 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core"; static struct dentry *mlxsw_core_dbg_root; +static struct workqueue_struct *mlxsw_wq; + struct mlxsw_core_pcpu_stats { u64 trap_rx_packets[MLXSW_TRAP_ID_MAX]; u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX]; @@ -93,11 +96,9 @@ struct mlxsw_core { struct list_head rx_listener_list; struct list_head event_listener_list; struct { - struct sk_buff *resp_skb; - u64 tid; - wait_queue_head_t wait; - bool trans_active; - struct mutex lock; /* One EMAD transaction at a time. */ + atomic64_t tid; + struct list_head trans_list; + spinlock_t trans_list_lock; /* protects trans_list writes */ bool use_emad; } emad; struct mlxsw_core_pcpu_stats __percpu *pcpu_stats; @@ -114,6 +115,12 @@ struct mlxsw_core { /* driver_priv has to be always the last item */ }; +void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core) +{ + return mlxsw_core->driver_priv; +} +EXPORT_SYMBOL(mlxsw_core_driver_priv); + struct mlxsw_rx_listener_item { struct list_head list; struct mlxsw_rx_listener rxl; @@ -284,7 +291,7 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv, static void mlxsw_emad_pack_op_tlv(char *op_tlv, const struct mlxsw_reg_info *reg, enum mlxsw_core_reg_access_type type, - struct mlxsw_core *mlxsw_core) + u64 tid) { mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP); mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN); @@ -300,7 +307,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv, MLXSW_EMAD_OP_TLV_METHOD_WRITE); mlxsw_emad_op_tlv_class_set(op_tlv, MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS); - mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid); + mlxsw_emad_op_tlv_tid_set(op_tlv, tid); } static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb) @@ -322,7 +329,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb, const struct mlxsw_reg_info *reg, char *payload, enum mlxsw_core_reg_access_type type, - struct mlxsw_core *mlxsw_core) + u64 tid) { char *buf; @@ -333,7 +340,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb, mlxsw_emad_pack_reg_tlv(buf, reg, payload); buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)); - mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core); + mlxsw_emad_pack_op_tlv(buf, reg, type, tid); mlxsw_emad_construct_eth_hdr(skb); } @@ -370,58 +377,16 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb) return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE); } -#define MLXSW_EMAD_TIMEOUT_MS 200 - -static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) -{ - int err; - int ret; - - mlxsw_core->emad.trans_active = true; - - err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info); - if (err) { - dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n", - mlxsw_core->emad.tid); - dev_kfree_skb(skb); - goto trans_inactive_out; - } - - ret = wait_event_timeout(mlxsw_core->emad.wait, - !(mlxsw_core->emad.trans_active), - msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS)); - if (!ret) { - dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n", - mlxsw_core->emad.tid); - err = -EIO; - goto trans_inactive_out; - } - - return 0; - -trans_inactive_out: - mlxsw_core->emad.trans_active = false; - return err; -} - -static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, - char *op_tlv) +static int mlxsw_emad_process_status(char *op_tlv, + enum mlxsw_emad_op_tlv_status *p_status) { - enum mlxsw_emad_op_tlv_status status; - u64 tid; + *p_status = mlxsw_emad_op_tlv_status_get(op_tlv); - status = mlxsw_emad_op_tlv_status_get(op_tlv); - tid = mlxsw_emad_op_tlv_tid_get(op_tlv); - - switch (status) { + switch (*p_status) { case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS: return 0; case MLXSW_EMAD_OP_TLV_STATUS_BUSY: case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK: - dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n", - tid, status, mlxsw_emad_op_tlv_status_str(status)); return -EAGAIN; case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED: case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV: @@ -432,70 +397,150 @@ static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core, case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE: case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR: default: - dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n", - tid, status, mlxsw_emad_op_tlv_status_str(status)); return -EIO; } } -static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb) +static int +mlxsw_emad_process_status_skb(struct sk_buff *skb, + enum mlxsw_emad_op_tlv_status *p_status) +{ + return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status); +} + +struct mlxsw_reg_trans { + struct list_head list; + struct list_head bulk_list; + struct mlxsw_core *core; + struct sk_buff *tx_skb; + struct mlxsw_tx_info tx_info; + struct delayed_work timeout_dw; + unsigned int retries; + u64 tid; + struct completion completion; + atomic_t active; + mlxsw_reg_trans_cb_t *cb; + unsigned long cb_priv; + const struct mlxsw_reg_info *reg; + enum mlxsw_core_reg_access_type type; + int err; + enum mlxsw_emad_op_tlv_status emad_status; + struct rcu_head rcu; +}; + +#define MLXSW_EMAD_TIMEOUT_MS 200 + +static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) { - return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb)); + unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); + + mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); } static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, - struct sk_buff *skb, - const struct mlxsw_tx_info *tx_info) + struct mlxsw_reg_trans *trans) { - struct sk_buff *trans_skb; - int n_retry; + struct sk_buff *skb; int err; - n_retry = 0; -retry: - /* We copy the EMAD to a new skb, since we might need - * to retransmit it in case of failure. - */ - trans_skb = skb_copy(skb, GFP_KERNEL); - if (!trans_skb) { - err = -ENOMEM; - goto out; + skb = skb_copy(trans->tx_skb, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + atomic_set(&trans->active, 1); + err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info); + if (err) { + dev_kfree_skb(skb); + return err; } + mlxsw_emad_trans_timeout_schedule(trans); + return 0; +} - err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info); - if (!err) { - struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb; +static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err) +{ + struct mlxsw_core *mlxsw_core = trans->core; + + dev_kfree_skb(trans->tx_skb); + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); + list_del_rcu(&trans->list); + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); + trans->err = err; + complete(&trans->completion); +} - err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb); - if (err) - dev_kfree_skb(resp_skb); - if (!err || err != -EAGAIN) - goto out; +static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core, + struct mlxsw_reg_trans *trans) +{ + int err; + + if (trans->retries < MLXSW_EMAD_MAX_RETRY) { + trans->retries++; + err = mlxsw_emad_transmit(trans->core, trans); + if (err == 0) + return; + } else { + err = -EIO; } - if (n_retry++ < MLXSW_EMAD_MAX_RETRY) - goto retry; + mlxsw_emad_trans_finish(trans, err); +} -out: - dev_kfree_skb(skb); - mlxsw_core->emad.tid++; - return err; +static void mlxsw_emad_trans_timeout_work(struct work_struct *work) +{ + struct mlxsw_reg_trans *trans = container_of(work, + struct mlxsw_reg_trans, + timeout_dw.work); + + if (!atomic_dec_and_test(&trans->active)) + return; + + mlxsw_emad_transmit_retry(trans->core, trans); +} + +static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core, + struct mlxsw_reg_trans *trans, + struct sk_buff *skb) +{ + int err; + + if (!atomic_dec_and_test(&trans->active)) + return; + + err = mlxsw_emad_process_status_skb(skb, &trans->emad_status); + if (err == -EAGAIN) { + mlxsw_emad_transmit_retry(mlxsw_core, trans); + } else { + if (err == 0) { + char *op_tlv = mlxsw_emad_op_tlv(skb); + + if (trans->cb) + trans->cb(mlxsw_core, + mlxsw_emad_reg_payload(op_tlv), + trans->reg->len, trans->cb_priv); + } + mlxsw_emad_trans_finish(trans, err); + } } +/* called with rcu read lock held */ static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port, void *priv) { struct mlxsw_core *mlxsw_core = priv; + struct mlxsw_reg_trans *trans; - if (mlxsw_emad_is_resp(skb) && - mlxsw_core->emad.trans_active && - mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) { - mlxsw_core->emad.resp_skb = skb; - mlxsw_core->emad.trans_active = false; - wake_up(&mlxsw_core->emad.wait); - } else { - dev_kfree_skb(skb); + if (!mlxsw_emad_is_resp(skb)) + goto free_skb; + + list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) { + if (mlxsw_emad_get_tid(skb) == trans->tid) { + mlxsw_emad_process_response(mlxsw_core, trans, skb); + break; + } } + +free_skb: + dev_kfree_skb(skb); } static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = { @@ -522,18 +567,19 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core) static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) { + u64 tid; int err; /* Set the upper 32 bits of the transaction ID field to a random * number. This allows us to discard EMADs addressed to other * devices. */ - get_random_bytes(&mlxsw_core->emad.tid, 4); - mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32; + get_random_bytes(&tid, 4); + tid <<= 32; + atomic64_set(&mlxsw_core->emad.tid, tid); - init_waitqueue_head(&mlxsw_core->emad.wait); - mlxsw_core->emad.trans_active = false; - mutex_init(&mlxsw_core->emad.lock); + INIT_LIST_HEAD(&mlxsw_core->emad.trans_list); + spin_lock_init(&mlxsw_core->emad.trans_list_lock); err = mlxsw_core_rx_listener_register(mlxsw_core, &mlxsw_emad_rx_listener, @@ -591,6 +637,59 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, return skb; } +static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, + char *payload, + enum mlxsw_core_reg_access_type type, + struct mlxsw_reg_trans *trans, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, + unsigned long cb_priv, u64 tid) +{ + struct sk_buff *skb; + int err; + + dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", + trans->tid, reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + + skb = mlxsw_emad_alloc(mlxsw_core, reg->len); + if (!skb) + return -ENOMEM; + + list_add_tail(&trans->bulk_list, bulk_list); + trans->core = mlxsw_core; + trans->tx_skb = skb; + trans->tx_info.local_port = MLXSW_PORT_CPU_PORT; + trans->tx_info.is_emad = true; + INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work); + trans->tid = tid; + init_completion(&trans->completion); + trans->cb = cb; + trans->cb_priv = cb_priv; + trans->reg = reg; + trans->type = type; + + mlxsw_emad_construct(skb, reg, payload, type, trans->tid); + mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info); + + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); + list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list); + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); + err = mlxsw_emad_transmit(mlxsw_core, trans); + if (err) + goto err_out; + return 0; + +err_out: + spin_lock_bh(&mlxsw_core->emad.trans_list_lock); + list_del_rcu(&trans->list); + spin_unlock_bh(&mlxsw_core->emad.trans_list_lock); + list_del(&trans->bulk_list); + dev_kfree_skb(trans->tx_skb); + return err; +} + /***************** * Core functions *****************/ @@ -680,24 +779,6 @@ static const struct file_operations mlxsw_core_rx_stats_dbg_ops = { .llseek = seq_lseek }; -static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, - const char *buf, size_t size) -{ - __be32 *m = (__be32 *) buf; - int i; - int count = size / sizeof(__be32); - - for (i = count - 1; i >= 0; i--) - if (m[i]) - break; - i++; - count = i ? i : 1; - for (i = 0; i < count; i += 4) - dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", - i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), - be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); -} - int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver) { spin_lock(&mlxsw_core_driver_list_lock); @@ -795,8 +876,7 @@ static int mlxsw_devlink_port_split(struct devlink *devlink, return -EINVAL; if (!mlxsw_core->driver->port_split) return -EOPNOTSUPP; - return mlxsw_core->driver->port_split(mlxsw_core->driver_priv, - port_index, count); + return mlxsw_core->driver->port_split(mlxsw_core, port_index, count); } static int mlxsw_devlink_port_unsplit(struct devlink *devlink, @@ -808,13 +888,171 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink, return -EINVAL; if (!mlxsw_core->driver->port_unsplit) return -EOPNOTSUPP; - return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv, - port_index); + return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index); +} + +static int +mlxsw_devlink_sb_pool_get(struct devlink *devlink, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->sb_pool_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index, + pool_index, pool_info); +} + +static int +mlxsw_devlink_sb_pool_set(struct devlink *devlink, + unsigned int sb_index, u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->sb_pool_set) + return -EOPNOTSUPP; + return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index, + pool_index, size, threshold_type); +} + +static void *__dl_port(struct devlink_port *devlink_port) +{ + return container_of(devlink_port, struct mlxsw_core_port, devlink_port); +} + +static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_port_pool_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index, + pool_index, p_threshold); +} + +static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 threshold) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_port_pool_set) + return -EOPNOTSUPP; + return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index, + pool_index, threshold); +} + +static int +mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_tc_pool_bind_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index, + tc_index, pool_type, + p_pool_index, p_threshold); +} + +static int +mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_tc_pool_bind_set) + return -EOPNOTSUPP; + return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index, + tc_index, pool_type, + pool_index, threshold); +} + +static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink, + unsigned int sb_index) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->sb_occ_snapshot) + return -EOPNOTSUPP; + return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index); +} + +static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink, + unsigned int sb_index) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + + if (!mlxsw_driver->sb_occ_max_clear) + return -EOPNOTSUPP; + return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index); +} + +static int +mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_occ_port_pool_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index, + pool_index, p_cur, p_max); +} + +static int +mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink); + struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver; + struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port); + + if (!mlxsw_driver->sb_occ_tc_port_bind_get) + return -EOPNOTSUPP; + return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port, + sb_index, tc_index, + pool_type, p_cur, p_max); } static const struct devlink_ops mlxsw_devlink_ops = { - .port_split = mlxsw_devlink_port_split, - .port_unsplit = mlxsw_devlink_port_unsplit, + .port_split = mlxsw_devlink_port_split, + .port_unsplit = mlxsw_devlink_port_unsplit, + .sb_pool_get = mlxsw_devlink_sb_pool_get, + .sb_pool_set = mlxsw_devlink_sb_pool_set, + .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get, + .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, }; int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, @@ -880,8 +1118,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_devlink_register; - err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, - mlxsw_bus_info); + err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info); if (err) goto err_driver_init; @@ -892,7 +1129,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, return 0; err_debugfs_init: - mlxsw_core->driver->fini(mlxsw_core->driver_priv); + mlxsw_core->driver->fini(mlxsw_core); err_driver_init: devlink_unregister(devlink); err_devlink_register: @@ -918,7 +1155,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) struct devlink *devlink = priv_to_devlink(mlxsw_core); mlxsw_core_debugfs_fini(mlxsw_core); - mlxsw_core->driver->fini(mlxsw_core->driver_priv); + mlxsw_core->driver->fini(mlxsw_core); devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); @@ -929,26 +1166,17 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); -static struct mlxsw_core *__mlxsw_core_get(void *driver_priv) -{ - return container_of(driver_priv, struct mlxsw_core, driver_priv); -} - -bool mlxsw_core_skb_transmit_busy(void *driver_priv, +bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, const struct mlxsw_tx_info *tx_info) { - struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); - return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv, tx_info); } EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy); -int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, +int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info) { - struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv); - return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb, tx_info); } @@ -1108,56 +1336,112 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_event_listener_unregister); +static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core) +{ + return atomic64_inc_return(&mlxsw_core->emad.tid); +} + static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload, - enum mlxsw_core_reg_access_type type) + enum mlxsw_core_reg_access_type type, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, + unsigned long cb_priv) { + u64 tid = mlxsw_core_tid_get(mlxsw_core); + struct mlxsw_reg_trans *trans; int err; - char *op_tlv; - struct sk_buff *skb; - struct mlxsw_tx_info tx_info = { - .local_port = MLXSW_PORT_CPU_PORT, - .is_emad = true, - }; - skb = mlxsw_emad_alloc(mlxsw_core, reg->len); - if (!skb) + trans = kzalloc(sizeof(*trans), GFP_KERNEL); + if (!trans) return -ENOMEM; - mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core); - mlxsw_core->driver->txhdr_construct(skb, &tx_info); + err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans, + bulk_list, cb, cb_priv, tid); + if (err) { + kfree(trans); + return err; + } + return 0; +} - dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n", - mlxsw_core->emad.tid); - mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len); +int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) +{ + return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, + MLXSW_CORE_REG_ACCESS_TYPE_QUERY, + bulk_list, cb, cb_priv); +} +EXPORT_SYMBOL(mlxsw_reg_trans_query); - err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info); - if (!err) { - op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb); - memcpy(payload, mlxsw_emad_reg_payload(op_tlv), - reg->len); +int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv) +{ + return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload, + MLXSW_CORE_REG_ACCESS_TYPE_WRITE, + bulk_list, cb, cb_priv); +} +EXPORT_SYMBOL(mlxsw_reg_trans_write); - dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n", - mlxsw_core->emad.tid - 1); - mlxsw_core_buf_dump_dbg(mlxsw_core, - mlxsw_core->emad.resp_skb->data, - mlxsw_core->emad.resp_skb->len); +static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans) +{ + struct mlxsw_core *mlxsw_core = trans->core; + int err; - dev_kfree_skb(mlxsw_core->emad.resp_skb); - } + wait_for_completion(&trans->completion); + cancel_delayed_work_sync(&trans->timeout_dw); + err = trans->err; + if (trans->retries) + dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n", + trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid); + if (err) + dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n", + trans->tid, trans->reg->id, + mlxsw_reg_id_str(trans->reg->id), + mlxsw_core_reg_access_type_str(trans->type), + trans->emad_status, + mlxsw_emad_op_tlv_status_str(trans->emad_status)); + + list_del(&trans->bulk_list); + kfree_rcu(trans, rcu); return err; } +int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list) +{ + struct mlxsw_reg_trans *trans; + struct mlxsw_reg_trans *tmp; + int sum_err = 0; + int err; + + list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) { + err = mlxsw_reg_trans_wait(trans); + if (err && sum_err == 0) + sum_err = err; /* first error to be returned */ + } + return sum_err; +} +EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait); + static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload, enum mlxsw_core_reg_access_type type) { + enum mlxsw_emad_op_tlv_status status; int err, n_retry; char *in_mbox, *out_mbox, *tmp; + dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n", + reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); + in_mbox = mlxsw_cmd_mbox_alloc(); if (!in_mbox) return -ENOMEM; @@ -1168,7 +1452,8 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, goto free_in_mbox; } - mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core); + mlxsw_emad_pack_op_tlv(in_mbox, reg, type, + mlxsw_core_tid_get(mlxsw_core)); tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32); mlxsw_emad_pack_reg_tlv(tmp, reg, payload); @@ -1176,60 +1461,61 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core, retry: err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox); if (!err) { - err = mlxsw_emad_process_status(mlxsw_core, out_mbox); - if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) - goto retry; + err = mlxsw_emad_process_status(out_mbox, &status); + if (err) { + if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY) + goto retry; + dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n", + status, mlxsw_emad_op_tlv_status_str(status)); + } } if (!err) memcpy(payload, mlxsw_emad_reg_payload(out_mbox), reg->len); - mlxsw_core->emad.tid++; mlxsw_cmd_mbox_free(out_mbox); free_in_mbox: mlxsw_cmd_mbox_free(in_mbox); + if (err) + dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n", + reg->id, mlxsw_reg_id_str(reg->id), + mlxsw_core_reg_access_type_str(type)); return err; } +static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core, + char *payload, size_t payload_len, + unsigned long cb_priv) +{ + char *orig_payload = (char *) cb_priv; + + memcpy(orig_payload, payload, payload_len); +} + static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload, enum mlxsw_core_reg_access_type type) { - u64 cur_tid; + LIST_HEAD(bulk_list); int err; - if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) { - dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n", - reg->id, mlxsw_reg_id_str(reg->id), - mlxsw_core_reg_access_type_str(type)); - return -EINTR; - } - - cur_tid = mlxsw_core->emad.tid; - dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", - cur_tid, reg->id, mlxsw_reg_id_str(reg->id), - mlxsw_core_reg_access_type_str(type)); - /* During initialization EMAD interface is not available to us, * so we default to command interface. We switch to EMAD interface * after setting the appropriate traps. */ if (!mlxsw_core->emad.use_emad) - err = mlxsw_core_reg_access_cmd(mlxsw_core, reg, - payload, type); - else - err = mlxsw_core_reg_access_emad(mlxsw_core, reg, + return mlxsw_core_reg_access_cmd(mlxsw_core, reg, payload, type); + err = mlxsw_core_reg_access_emad(mlxsw_core, reg, + payload, type, &bulk_list, + mlxsw_core_reg_access_cb, + (unsigned long) payload); if (err) - dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n", - cur_tid, reg->id, mlxsw_reg_id_str(reg->id), - mlxsw_core_reg_access_type_str(type)); - - mutex_unlock(&mlxsw_core->emad.lock); - return err; + return err; + return mlxsw_reg_trans_bulk_wait(&bulk_list); } int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, @@ -1358,6 +1644,46 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear); +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, + struct mlxsw_core_port *mlxsw_core_port, u8 local_port, + struct net_device *dev, bool split, u32 split_group) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + if (split) + devlink_port_split_set(devlink_port, split_group); + devlink_port_type_eth_set(devlink_port, dev); + return devlink_port_register(devlink, devlink_port, local_port); +} +EXPORT_SYMBOL(mlxsw_core_port_init); + +void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port) +{ + struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port; + + devlink_port_unregister(devlink_port); +} +EXPORT_SYMBOL(mlxsw_core_port_fini); + +static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core, + const char *buf, size_t size) +{ + __be32 *m = (__be32 *) buf; + int i; + int count = size / sizeof(__be32); + + for (i = count - 1; i >= 0; i--) + if (m[i]) + break; + i++; + count = i ? i : 1; + for (i = 0; i < count; i += 4) + dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n", + i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]), + be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3])); +} + int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, u32 in_mod, bool out_mbox_direct, char *in_mbox, size_t in_mbox_size, @@ -1400,17 +1726,35 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod, } EXPORT_SYMBOL(mlxsw_cmd_exec); +int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay) +{ + return queue_delayed_work(mlxsw_wq, dwork, delay); +} +EXPORT_SYMBOL(mlxsw_core_schedule_dw); + static int __init mlxsw_core_module_init(void) { - mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); - if (!mlxsw_core_dbg_root) + int err; + + mlxsw_wq = create_workqueue(mlxsw_core_driver_name); + if (!mlxsw_wq) return -ENOMEM; + mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL); + if (!mlxsw_core_dbg_root) { + err = -ENOMEM; + goto err_debugfs_create_dir; + } return 0; + +err_debugfs_create_dir: + destroy_workqueue(mlxsw_wq); + return err; } static void __exit mlxsw_core_module_exit(void) { debugfs_remove_recursive(mlxsw_core_dbg_root); + destroy_workqueue(mlxsw_wq); } module_init(mlxsw_core_module_init); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c73d1c0792a6..436bc49df6ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -43,6 +43,8 @@ #include <linux/gfp.h> #include <linux/types.h> #include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <net/devlink.h> #include "trap.h" #include "reg.h" @@ -61,6 +63,8 @@ struct mlxsw_driver; struct mlxsw_bus; struct mlxsw_bus_info; +void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); + int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver); void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver); @@ -74,10 +78,9 @@ struct mlxsw_tx_info { bool is_emad; }; -bool mlxsw_core_skb_transmit_busy(void *driver_priv, +bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core, const struct mlxsw_tx_info *tx_info); - -int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb, +int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); struct mlxsw_rx_listener { @@ -106,6 +109,19 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core, const struct mlxsw_event_listener *el, void *priv); +typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload, + size_t payload_len, unsigned long cb_priv); + +int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv); +int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core, + const struct mlxsw_reg_info *reg, char *payload, + struct list_head *bulk_list, + mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv); +int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list); + int mlxsw_reg_query(struct mlxsw_core *mlxsw_core, const struct mlxsw_reg_info *reg, char *payload); int mlxsw_reg_write(struct mlxsw_core *mlxsw_core, @@ -131,6 +147,26 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core, void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core, u16 lag_id, u8 local_port); +struct mlxsw_core_port { + struct devlink_port devlink_port; +}; + +static inline void * +mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port) +{ + /* mlxsw_core_port is ensured to always be the first field in driver + * port structure. + */ + return mlxsw_core_port; +} + +int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, + struct mlxsw_core_port *mlxsw_core_port, u8 local_port, + struct net_device *dev, bool split, u32 split_group); +void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port); + +int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay); + #define MLXSW_CONFIG_PROFILE_SWID_COUNT 8 struct mlxsw_swid_config { @@ -183,11 +219,43 @@ struct mlxsw_driver { const char *kind; struct module *owner; size_t priv_size; - int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core, + int (*init)(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info); - void (*fini)(void *driver_priv); - int (*port_split)(void *driver_priv, u8 local_port, unsigned int count); - int (*port_unsplit)(void *driver_priv, u8 local_port); + void (*fini)(struct mlxsw_core *mlxsw_core); + int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port, + unsigned int count); + int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port); + int (*sb_pool_get)(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info); + int (*sb_pool_set)(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type); + int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); + int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 threshold); + int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); + int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold); + int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core, + unsigned int sb_index); + int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core, + unsigned int sb_index); + int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); + int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); void (*txhdr_construct)(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); u8 txhdr_len; diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index ffe4c0305733..1977e7a5c530 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1805,6 +1805,184 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* QTCT - QoS Switch Traffic Class Table + * ------------------------------------- + * Configures the mapping between the packet switch priority and the + * traffic class on the transmit port. + */ +#define MLXSW_REG_QTCT_ID 0x400A +#define MLXSW_REG_QTCT_LEN 0x08 + +static const struct mlxsw_reg_info mlxsw_reg_qtct = { + .id = MLXSW_REG_QTCT_ID, + .len = MLXSW_REG_QTCT_LEN, +}; + +/* reg_qtct_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is not supported. + */ +MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8); + +/* reg_qtct_sub_port + * Virtual port within the physical port. + * Should be set to 0 when virtual ports are not enabled on the port. + * Access: Index + */ +MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8); + +/* reg_qtct_switch_prio + * Switch priority. + * Access: Index + */ +MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4); + +/* reg_qtct_tclass + * Traffic class. + * Default values: + * switch_prio 0 : tclass 1 + * switch_prio 1 : tclass 0 + * switch_prio i : tclass i, for i > 1 + * Access: RW + */ +MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4); + +static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port, + u8 switch_prio, u8 tclass) +{ + MLXSW_REG_ZERO(qtct, payload); + mlxsw_reg_qtct_local_port_set(payload, local_port); + mlxsw_reg_qtct_switch_prio_set(payload, switch_prio); + mlxsw_reg_qtct_tclass_set(payload, tclass); +} + +/* QEEC - QoS ETS Element Configuration Register + * --------------------------------------------- + * Configures the ETS elements. + */ +#define MLXSW_REG_QEEC_ID 0x400D +#define MLXSW_REG_QEEC_LEN 0x1C + +static const struct mlxsw_reg_info mlxsw_reg_qeec = { + .id = MLXSW_REG_QEEC_ID, + .len = MLXSW_REG_QEEC_LEN, +}; + +/* reg_qeec_local_port + * Local port number. + * Access: Index + * + * Note: CPU port is supported. + */ +MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8); + +enum mlxsw_reg_qeec_hr { + MLXSW_REG_QEEC_HIERARCY_PORT, + MLXSW_REG_QEEC_HIERARCY_GROUP, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + MLXSW_REG_QEEC_HIERARCY_TC, +}; + +/* reg_qeec_element_hierarchy + * 0 - Port + * 1 - Group + * 2 - Subgroup + * 3 - Traffic Class + * Access: Index + */ +MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4); + +/* reg_qeec_element_index + * The index of the element in the hierarchy. + * Access: Index + */ +MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8); + +/* reg_qeec_next_element_index + * The index of the next (lower) element in the hierarchy. + * Access: RW + * + * Note: Reserved for element_hierarchy 0. + */ +MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8); + +enum { + MLXSW_REG_QEEC_BYTES_MODE, + MLXSW_REG_QEEC_PACKETS_MODE, +}; + +/* reg_qeec_pb + * Packets or bytes mode. + * 0 - Bytes mode + * 1 - Packets mode + * Access: RW + * + * Note: Used for max shaper configuration. For Spectrum, packets mode + * is supported only for traffic classes of CPU port. + */ +MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1); + +/* reg_qeec_mase + * Max shaper configuration enable. Enables configuration of the max + * shaper on this ETS element. + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1); + +/* A large max rate will disable the max shaper. */ +#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */ + +/* reg_qeec_max_shaper_rate + * Max shaper information rate. + * For CPU port, can only be configured for port hierarchy. + * When in bytes mode, value is specified in units of 1000bps. + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28); + +/* reg_qeec_de + * DWRR configuration enable. Enables configuration of the dwrr and + * dwrr_weight. + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1); + +/* reg_qeec_dwrr + * Transmission selection algorithm to use on the link going down from + * the ETS element. + * 0 - Strict priority + * 1 - DWRR + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1); + +/* reg_qeec_dwrr_weight + * DWRR weight on the link going down from the ETS element. The + * percentage of bandwidth guaranteed to an ETS element within + * its hierarchy. The sum of all weights across all ETS elements + * within one hierarchy should be equal to 100. Reserved when + * transmission selection algorithm is strict priority. + * Access: RW + */ +MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8); + +static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index) +{ + MLXSW_REG_ZERO(qeec, payload); + mlxsw_reg_qeec_local_port_set(payload, local_port); + mlxsw_reg_qeec_element_hierarchy_set(payload, hr); + mlxsw_reg_qeec_element_index_set(payload, index); + mlxsw_reg_qeec_next_element_index_set(payload, next_index); +} + /* PMLP - Ports Module to Local Port Register * ------------------------------------------ * Configures the assignment of modules to local ports. @@ -2141,6 +2319,145 @@ static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port, mlxsw_reg_paos_e_set(payload, 1); } +/* PFCC - Ports Flow Control Configuration Register + * ------------------------------------------------ + * Configures and retrieves the per port flow control configuration. + */ +#define MLXSW_REG_PFCC_ID 0x5007 +#define MLXSW_REG_PFCC_LEN 0x20 + +static const struct mlxsw_reg_info mlxsw_reg_pfcc = { + .id = MLXSW_REG_PFCC_ID, + .len = MLXSW_REG_PFCC_LEN, +}; + +/* reg_pfcc_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8); + +/* reg_pfcc_pnat + * Port number access type. Determines the way local_port is interpreted: + * 0 - Local port number. + * 1 - IB / label port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2); + +/* reg_pfcc_shl_cap + * Send to higher layers capabilities: + * 0 - No capability of sending Pause and PFC frames to higher layers. + * 1 - Device has capability of sending Pause and PFC frames to higher + * layers. + * Access: RO + */ +MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1); + +/* reg_pfcc_shl_opr + * Send to higher layers operation: + * 0 - Pause and PFC frames are handled by the port (default). + * 1 - Pause and PFC frames are handled by the port and also sent to + * higher layers. Only valid if shl_cap = 1. + * Access: RW + */ +MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1); + +/* reg_pfcc_ppan + * Pause policy auto negotiation. + * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx. + * 1 - Enabled. When auto-negotiation is performed, set the Pause policy + * based on the auto-negotiation resolution. + * Access: RW + * + * Note: The auto-negotiation advertisement is set according to pptx and + * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0. + */ +MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4); + +/* reg_pfcc_prio_mask_tx + * Bit per priority indicating if Tx flow control policy should be + * updated based on bit pfctx. + * Access: WO + */ +MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8); + +/* reg_pfcc_prio_mask_rx + * Bit per priority indicating if Rx flow control policy should be + * updated based on bit pfcrx. + * Access: WO + */ +MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8); + +/* reg_pfcc_pptx + * Admin Pause policy on Tx. + * 0 - Never generate Pause frames (default). + * 1 - Generate Pause frames according to Rx buffer threshold. + * Access: RW + */ +MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1); + +/* reg_pfcc_aptx + * Active (operational) Pause policy on Tx. + * 0 - Never generate Pause frames. + * 1 - Generate Pause frames according to Rx buffer threshold. + * Access: RO + */ +MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1); + +/* reg_pfcc_pfctx + * Priority based flow control policy on Tx[7:0]. Per-priority bit mask: + * 0 - Never generate priority Pause frames on the specified priority + * (default). + * 1 - Generate priority Pause frames according to Rx buffer threshold on + * the specified priority. + * Access: RW + * + * Note: pfctx and pptx must be mutually exclusive. + */ +MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8); + +/* reg_pfcc_pprx + * Admin Pause policy on Rx. + * 0 - Ignore received Pause frames (default). + * 1 - Respect received Pause frames. + * Access: RW + */ +MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1); + +/* reg_pfcc_aprx + * Active (operational) Pause policy on Rx. + * 0 - Ignore received Pause frames. + * 1 - Respect received Pause frames. + * Access: RO + */ +MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1); + +/* reg_pfcc_pfcrx + * Priority based flow control policy on Rx[7:0]. Per-priority bit mask: + * 0 - Ignore incoming priority Pause frames on the specified priority + * (default). + * 1 - Respect incoming priority Pause frames on the specified priority. + * Access: RW + */ +MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8); + +#define MLXSW_REG_PFCC_ALL_PRIO 0xFF + +static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en) +{ + mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO); + mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO); + mlxsw_reg_pfcc_pfctx_set(payload, pfc_en); + mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en); +} + +static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(pfcc, payload); + mlxsw_reg_pfcc_local_port_set(payload, local_port); +} + /* PPCNT - Ports Performance Counters Register * ------------------------------------------- * The PPCNT register retrieves per port performance counters. @@ -2180,6 +2497,11 @@ MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); +enum mlxsw_reg_ppcnt_grp { + MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, + MLXSW_REG_PPCNT_PRIO_CNT = 0x10, +}; + /* reg_ppcnt_grp * Performance counter group. * Group 63 indicates all groups. Only valid on Set() operation with @@ -2215,6 +2537,8 @@ MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1); */ MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5); +/* Ethernet IEEE 802.3 Counter Group */ + /* reg_ppcnt_a_frames_transmitted_ok * Access: RO */ @@ -2329,15 +2653,145 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, 0x08 + 0x90, 0, 64); -static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) +/* Ethernet Per Priority Group Counters */ + +/* reg_ppcnt_rx_octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64); + +/* reg_ppcnt_rx_frames + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64); + +/* reg_ppcnt_tx_octets + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64); + +/* reg_ppcnt_tx_frames + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64); + +/* reg_ppcnt_rx_pause + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64); + +/* reg_ppcnt_rx_pause_duration + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64); + +/* reg_ppcnt_tx_pause + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64); + +/* reg_ppcnt_tx_pause_duration + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64); + +/* reg_ppcnt_rx_pause_transition + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64); + +static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, + enum mlxsw_reg_ppcnt_grp grp, + u8 prio_tc) { MLXSW_REG_ZERO(ppcnt, payload); mlxsw_reg_ppcnt_swid_set(payload, 0); mlxsw_reg_ppcnt_local_port_set(payload, local_port); mlxsw_reg_ppcnt_pnat_set(payload, 0); - mlxsw_reg_ppcnt_grp_set(payload, 0); + mlxsw_reg_ppcnt_grp_set(payload, grp); mlxsw_reg_ppcnt_clr_set(payload, 0); - mlxsw_reg_ppcnt_prio_tc_set(payload, 0); + mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc); +} + +/* PPTB - Port Prio To Buffer Register + * ----------------------------------- + * Configures the switch priority to buffer table. + */ +#define MLXSW_REG_PPTB_ID 0x500B +#define MLXSW_REG_PPTB_LEN 0x0C + +static const struct mlxsw_reg_info mlxsw_reg_pptb = { + .id = MLXSW_REG_PPTB_ID, + .len = MLXSW_REG_PPTB_LEN, +}; + +enum { + MLXSW_REG_PPTB_MM_UM, + MLXSW_REG_PPTB_MM_UNICAST, + MLXSW_REG_PPTB_MM_MULTICAST, +}; + +/* reg_pptb_mm + * Mapping mode. + * 0 - Map both unicast and multicast packets to the same buffer. + * 1 - Map only unicast packets. + * 2 - Map only multicast packets. + * Access: Index + * + * Note: SwitchX-2 only supports the first option. + */ +MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2); + +/* reg_pptb_local_port + * Local port number. + * Access: Index + */ +MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8); + +/* reg_pptb_um + * Enables the update of the untagged_buf field. + * Access: RW + */ +MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1); + +/* reg_pptb_pm + * Enables the update of the prio_to_buff field. + * Bit <i> is a flag for updating the mapping for switch priority <i>. + * Access: RW + */ +MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8); + +/* reg_pptb_prio_to_buff + * Mapping of switch priority <i> to one of the allocated receive port + * buffers. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4); + +/* reg_pptb_pm_msb + * Enables the update of the prio_to_buff field. + * Bit <i> is a flag for updating the mapping for switch priority <i+8>. + * Access: RW + */ +MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8); + +/* reg_pptb_untagged_buff + * Mapping of untagged frames to one of the allocated receive port buffers. + * Access: RW + * + * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for + * Spectrum, as it maps untagged packets based on the default switch priority. + */ +MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4); + +#define MLXSW_REG_PPTB_ALL_PRIO 0xFF + +static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port) +{ + MLXSW_REG_ZERO(pptb, payload); + mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM); + mlxsw_reg_pptb_local_port_set(payload, local_port); + mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO); } /* PBMC - Port Buffer Management Control Register @@ -2346,7 +2800,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port) * allocation for different Prios, and the Pause threshold management. */ #define MLXSW_REG_PBMC_ID 0x500C -#define MLXSW_REG_PBMC_LEN 0x68 +#define MLXSW_REG_PBMC_LEN 0x6C static const struct mlxsw_reg_info mlxsw_reg_pbmc = { .id = MLXSW_REG_PBMC_ID, @@ -2374,6 +2828,8 @@ MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16); */ MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16); +#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11 + /* reg_pbmc_buf_lossy * The field indicates if the buffer is lossy. * 0 - Lossless @@ -2398,6 +2854,30 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false); */ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false); +/* reg_pbmc_buf_xoff_threshold + * Once the amount of data in the buffer goes above this value, device + * starts sending PFC frames for all priorities associated with the + * buffer. Units are represented in cells. Reserved in case of lossy + * buffer. + * Access: RW + * + * Note: In Spectrum, reserved for buffer[9]. + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16, + 0x08, 0x04, false); + +/* reg_pbmc_buf_xon_threshold + * When the amount of data in the buffer goes below this value, device + * stops sending PFC frames for the priorities associated with the + * buffer. Units are represented in cells. Reserved in case of lossy + * buffer. + * Access: RW + * + * Note: In Spectrum, reserved for buffer[9]. + */ +MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16, + 0x08, 0x04, false); + static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port, u16 xoff_timer_value, u16 xoff_refresh) { @@ -2416,6 +2896,17 @@ static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload, mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size); } +static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload, + int buf_index, u16 size, + u16 threshold) +{ + mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0); + mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0); + mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size); + mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold); + mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold); +} + /* PSPA - Port Switch Partition Allocation * --------------------------------------- * Controls the association of a port with a switch partition and enables @@ -2985,9 +3476,10 @@ static const struct mlxsw_reg_info mlxsw_reg_sbpr = { .len = MLXSW_REG_SBPR_LEN, }; -enum mlxsw_reg_sbpr_dir { - MLXSW_REG_SBPR_DIR_INGRESS, - MLXSW_REG_SBPR_DIR_EGRESS, +/* shared direstion enum for SBPR, SBCM, SBPM */ +enum mlxsw_reg_sbxx_dir { + MLXSW_REG_SBXX_DIR_INGRESS, + MLXSW_REG_SBXX_DIR_EGRESS, }; /* reg_sbpr_dir @@ -3020,7 +3512,7 @@ enum mlxsw_reg_sbpr_mode { MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4); static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool, - enum mlxsw_reg_sbpr_dir dir, + enum mlxsw_reg_sbxx_dir dir, enum mlxsw_reg_sbpr_mode mode, u32 size) { MLXSW_REG_ZERO(sbpr, payload); @@ -3062,11 +3554,6 @@ MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6); -enum mlxsw_reg_sbcm_dir { - MLXSW_REG_SBCM_DIR_INGRESS, - MLXSW_REG_SBCM_DIR_EGRESS, -}; - /* reg_sbcm_dir * Direction. * Access: Index @@ -3079,6 +3566,10 @@ MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2); */ MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24); +/* shared max_buff limits for dynamic threshold for SBCM, SBPM */ +#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1 +#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14 + /* reg_sbcm_max_buff * When the pool associated to the port-pg/tclass is configured to * static, Maximum buffer size for the limiter configured in cells. @@ -3099,7 +3590,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24); MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4); static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, - enum mlxsw_reg_sbcm_dir dir, + enum mlxsw_reg_sbxx_dir dir, u32 min_buff, u32 max_buff, u8 pool) { MLXSW_REG_ZERO(sbcm, payload); @@ -3111,8 +3602,8 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff, mlxsw_reg_sbcm_pool_set(payload, pool); } -/* SBPM - Shared Buffer Class Management Register - * ---------------------------------------------- +/* SBPM - Shared Buffer Port Management Register + * --------------------------------------------- * The SBPM register configures and retrieves the shared buffer allocation * and configuration according to Port-Pool, including the definition * of the associated quota. @@ -3139,17 +3630,33 @@ MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8); */ MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4); -enum mlxsw_reg_sbpm_dir { - MLXSW_REG_SBPM_DIR_INGRESS, - MLXSW_REG_SBPM_DIR_EGRESS, -}; - /* reg_sbpm_dir * Direction. * Access: Index */ MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2); +/* reg_sbpm_buff_occupancy + * Current buffer occupancy in cells. + * Access: RO + */ +MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24); + +/* reg_sbpm_clr + * Clear Max Buffer Occupancy + * When this bit is set, max_buff_occupancy field is cleared (and a + * new max value is tracked from the time the clear was performed). + * Access: OP + */ +MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1); + +/* reg_sbpm_max_buff_occupancy + * Maximum value of buffer occupancy in cells monitored. Cleared by + * writing to the clr field. + * Access: RO + */ +MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24); + /* reg_sbpm_min_buff * Minimum buffer size for the limiter, in cells. * Access: RW @@ -3170,17 +3677,25 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24); MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24); static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool, - enum mlxsw_reg_sbpm_dir dir, + enum mlxsw_reg_sbxx_dir dir, bool clr, u32 min_buff, u32 max_buff) { MLXSW_REG_ZERO(sbpm, payload); mlxsw_reg_sbpm_local_port_set(payload, local_port); mlxsw_reg_sbpm_pool_set(payload, pool); mlxsw_reg_sbpm_dir_set(payload, dir); + mlxsw_reg_sbpm_clr_set(payload, clr); mlxsw_reg_sbpm_min_buff_set(payload, min_buff); mlxsw_reg_sbpm_max_buff_set(payload, max_buff); } +static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy, + u32 *p_max_buff_occupancy) +{ + *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload); + *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload); +} + /* SBMM - Shared Buffer Multicast Management Register * -------------------------------------------------- * The SBMM register configures and retrieves the shared buffer allocation @@ -3236,6 +3751,104 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff, mlxsw_reg_sbmm_pool_set(payload, pool); } +/* SBSR - Shared Buffer Status Register + * ------------------------------------ + * The SBSR register retrieves the shared buffer occupancy according to + * Port-Pool. Note that this register enables reading a large amount of data. + * It is the user's responsibility to limit the amount of data to ensure the + * response can match the maximum transfer unit. In case the response exceeds + * the maximum transport unit, it will be truncated with no special notice. + */ +#define MLXSW_REG_SBSR_ID 0xB005 +#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */ +#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */ +#define MLXSW_REG_SBSR_REC_MAX_COUNT 120 +#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \ + MLXSW_REG_SBSR_REC_LEN * \ + MLXSW_REG_SBSR_REC_MAX_COUNT) + +static const struct mlxsw_reg_info mlxsw_reg_sbsr = { + .id = MLXSW_REG_SBSR_ID, + .len = MLXSW_REG_SBSR_LEN, +}; + +/* reg_sbsr_clr + * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy + * field is cleared (and a new max value is tracked from the time the clear + * was performed). + * Access: OP + */ +MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1); + +/* reg_sbsr_ingress_port_mask + * Bit vector for all ingress network ports. + * Indicates which of the ports (for which the relevant bit is set) + * are affected by the set operation. Configuration of any other port + * does not change. + * Access: Index + */ +MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1); + +/* reg_sbsr_pg_buff_mask + * Bit vector for all switch priority groups. + * Indicates which of the priorities (for which the relevant bit is set) + * are affected by the set operation. Configuration of any other priority + * does not change. + * Range is 0..cap_max_pg_buffers - 1 + * Access: Index + */ +MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1); + +/* reg_sbsr_egress_port_mask + * Bit vector for all egress network ports. + * Indicates which of the ports (for which the relevant bit is set) + * are affected by the set operation. Configuration of any other port + * does not change. + * Access: Index + */ +MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1); + +/* reg_sbsr_tclass_mask + * Bit vector for all traffic classes. + * Indicates which of the traffic classes (for which the relevant bit is + * set) are affected by the set operation. Configuration of any other + * traffic class does not change. + * Range is 0..cap_max_tclass - 1 + * Access: Index + */ +MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1); + +static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr) +{ + MLXSW_REG_ZERO(sbsr, payload); + mlxsw_reg_sbsr_clr_set(payload, clr); +} + +/* reg_sbsr_rec_buff_occupancy + * Current buffer occupancy in cells. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN, + 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false); + +/* reg_sbsr_rec_max_buff_occupancy + * Maximum value of buffer occupancy in cells monitored. Cleared by + * writing to the clr field. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN, + 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false); + +static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index, + u32 *p_buff_occupancy, + u32 *p_max_buff_occupancy) +{ + *p_buff_occupancy = + mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index); + *p_max_buff_occupancy = + mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index); +} + static inline const char *mlxsw_reg_id_str(u16 reg_id) { switch (reg_id) { @@ -3283,6 +3896,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SFMR"; case MLXSW_REG_SPVMLR_ID: return "SPVMLR"; + case MLXSW_REG_QTCT_ID: + return "QTCT"; + case MLXSW_REG_QEEC_ID: + return "QEEC"; case MLXSW_REG_PMLP_ID: return "PMLP"; case MLXSW_REG_PMTU_ID: @@ -3293,8 +3910,12 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "PPAD"; case MLXSW_REG_PAOS_ID: return "PAOS"; + case MLXSW_REG_PFCC_ID: + return "PFCC"; case MLXSW_REG_PPCNT_ID: return "PPCNT"; + case MLXSW_REG_PPTB_ID: + return "PPTB"; case MLXSW_REG_PBMC_ID: return "PBMC"; case MLXSW_REG_PSPA_ID: @@ -3323,6 +3944,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SBPM"; case MLXSW_REG_SBMM_ID: return "SBMM"; + case MLXSW_REG_SBSR_ID: + return "SBSR"; default: return "*UNKNOWN*"; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4afbc3e9e381..4a7273771028 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -49,7 +49,7 @@ #include <linux/jiffies.h> #include <linux/bitops.h> #include <linux/list.h> -#include <net/devlink.h> +#include <linux/dcbnl.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@ -305,9 +305,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); } -static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, - u8 local_port, u8 *p_module, - u8 *p_width) +static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *p_module, + u8 *p_width, u8 *p_lane) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; int err; @@ -318,9 +318,20 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, return err; *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); + *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0); return 0; } +static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *p_module, + u8 *p_width) +{ + u8 lane; + + return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module, + p_width, &lane); +} + static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, u8 module, u8 width, u8 lane) { @@ -379,7 +390,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, u64 len; int err; - if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info)) + if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info)) return NETDEV_TX_BUSY; if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { @@ -403,7 +414,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ - err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info); + err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info); if (!err) { pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats); @@ -438,16 +449,89 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) return 0; } +static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu, + bool pause_en, bool pfc_en, u16 delay) +{ + u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu); + + delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) : + MLXSW_SP_PAUSE_DELAY; + + if (pause_en || pfc_en) + mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index, + pg_size + delay, pg_size); + else + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size); +} + +int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, + u8 *prio_tc, bool pause_en, + struct ieee_pfc *my_pfc) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0; + u16 delay = !!my_pfc ? my_pfc->delay : 0; + char pbmc_pl[MLXSW_REG_PBMC_LEN]; + int i, j, err; + + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); + if (err) + return err; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + bool configure = false; + bool pfc = false; + + for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { + if (prio_tc[j] == i) { + pfc = pfc_en & BIT(j); + configure = true; + break; + } + } + + if (!configure) + continue; + mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay); + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); +} + +static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, + int mtu, bool pause_en) +{ + u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0}; + bool dcb_en = !!mlxsw_sp_port->dcb.ets; + struct ieee_pfc *my_pfc; + u8 *prio_tc; + + prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc; + my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL; + + return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc, + pause_en, my_pfc); +} + static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); int err; - err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); + err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en); if (err) return err; + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); + if (err) + goto err_port_mtu_set; dev->mtu = mtu; return 0; + +err_port_mtu_set: + mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + return err; } static struct rtnl_link_stats64 * @@ -861,6 +945,33 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, return 0; } +static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, + size_t len) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + u8 module, width, lane; + int err; + + err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + &module, &width, &lane); + if (err) { + netdev_err(dev, "Failed to retrieve module information\n"); + return err; + } + + if (!mlxsw_sp_port->split) + err = snprintf(name, len, "p%d", module + 1); + else + err = snprintf(name, len, "p%ds%d", module + 1, + lane / width); + + if (err >= len) + return -EINVAL; + + return 0; +} + static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, @@ -877,6 +988,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_bridge_setlink = switchdev_port_bridge_setlink, .ndo_bridge_getlink = switchdev_port_bridge_getlink, .ndo_bridge_dellink = switchdev_port_bridge_dellink, + .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name, }; static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, @@ -897,6 +1009,68 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev, sizeof(drvinfo->bus_info)); } +static void mlxsw_sp_port_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + pause->rx_pause = mlxsw_sp_port->link.rx_pause; + pause->tx_pause = mlxsw_sp_port->link.tx_pause; +} + +static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct ethtool_pauseparam *pause) +{ + char pfcc_pl[MLXSW_REG_PFCC_LEN]; + + mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause); + mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause); + + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), + pfcc_pl); +} + +static int mlxsw_sp_port_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + bool pause_en = pause->tx_pause || pause->rx_pause; + int err; + + if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) { + netdev_err(dev, "PFC already enabled on port\n"); + return -EINVAL; + } + + if (pause->autoneg) { + netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + if (err) { + netdev_err(dev, "Failed to configure port's headroom\n"); + return err; + } + + err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause); + if (err) { + netdev_err(dev, "Failed to set PAUSE parameters\n"); + goto err_port_pause_configure; + } + + mlxsw_sp_port->link.rx_pause = pause->rx_pause; + mlxsw_sp_port->link.tx_pause = pause->tx_pause; + + return 0; + +err_port_pause_configure: + pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); + mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en); + return err; +} + struct mlxsw_sp_port_hw_stats { char str[ETH_GSTRING_LEN]; u64 (*getter)(char *payload); @@ -1032,7 +1206,8 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev, int i; int err; - mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port); + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, + MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0; @@ -1380,6 +1555,8 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev, static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_drvinfo = mlxsw_sp_port_get_drvinfo, .get_link = ethtool_op_get_link, + .get_pauseparam = mlxsw_sp_port_get_pauseparam, + .set_pauseparam = mlxsw_sp_port_set_pauseparam, .get_strings = mlxsw_sp_port_get_strings, .set_phys_id = mlxsw_sp_port_set_phys_id, .get_ethtool_stats = mlxsw_sp_port_get_stats, @@ -1402,12 +1579,112 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); } +int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, + bool dwrr, u8 dwrr_weight) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qeec_pl[MLXSW_REG_QEEC_LEN]; + + mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, + next_index); + mlxsw_reg_qeec_de_set(qeec_pl, true); + mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr); + mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); +} + +int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index, u32 maxrate) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qeec_pl[MLXSW_REG_QEEC_LEN]; + + mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index, + next_index); + mlxsw_reg_qeec_mase_set(qeec_pl, true); + mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl); +} + +int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, + u8 switch_prio, u8 tclass) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char qtct_pl[MLXSW_REG_QTCT_LEN]; + + mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio, + tclass); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl); +} + +static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err, i; + + /* Setup the elements hierarcy, so that each TC is linked to + * one subgroup, which are all member in the same group. + */ + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false, + 0); + if (err) + return err; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, + 0, false, 0); + if (err) + return err; + } + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, i, i, + false, 0); + if (err) + return err; + } + + /* Make sure the max shaper is disabled in all hierarcies that + * support it. + */ + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + i, 0, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; + } + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_TC, + i, i, + MLXSW_REG_QEEC_MAS_DIS); + if (err) + return err; + } + + /* Map all priorities to traffic class 0. */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0); + if (err) + return err; + } + + return 0; +} + static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, bool split, u8 module, u8 width) { - struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_port *mlxsw_sp_port; - struct devlink_port *devlink_port; struct net_device *dev; size_t bytes; int err; @@ -1460,16 +1737,6 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, */ dev->hard_header_len += MLXSW_TXHDR_LEN; - devlink_port = &mlxsw_sp_port->devlink_port; - if (mlxsw_sp_port->split) - devlink_port_split_set(devlink_port, module); - err = devlink_port_register(devlink, devlink_port, local_port); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n", - mlxsw_sp_port->local_port); - goto err_devlink_port_register; - } - err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", @@ -1509,6 +1776,21 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_buffers_init; } + err = mlxsw_sp_port_ets_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n", + mlxsw_sp_port->local_port); + goto err_port_ets_init; + } + + /* ETS and buffers must be initialized before DCB. */ + err = mlxsw_sp_port_dcb_init(mlxsw_sp_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n", + mlxsw_sp_port->local_port); + goto err_port_dcb_init; + } + mlxsw_sp_port_switchdev_init(mlxsw_sp_port); err = register_netdev(dev); if (err) { @@ -1517,7 +1799,14 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_register_netdev; } - devlink_port_type_eth_set(devlink_port, dev); + err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port, + mlxsw_sp_port->local_port, dev, + mlxsw_sp_port->split, module); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n", + mlxsw_sp_port->local_port); + goto err_core_port_init; + } err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); if (err) @@ -1527,16 +1816,18 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, return 0; err_port_vlan_init: + mlxsw_core_port_fini(&mlxsw_sp_port->core_port); +err_core_port_init: unregister_netdev(dev); err_register_netdev: +err_port_dcb_init: +err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: err_port_speed_by_width_set: err_port_swid_set: err_port_system_port_mapping_set: - devlink_port_unregister(&mlxsw_sp_port->devlink_port); -err_devlink_port_register: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: @@ -1590,15 +1881,13 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; - struct devlink_port *devlink_port; if (!mlxsw_sp_port) return; mlxsw_sp->ports[local_port] = NULL; - devlink_port = &mlxsw_sp_port->devlink_port; - devlink_port_type_clear(devlink_port); + mlxsw_core_port_fini(&mlxsw_sp_port->core_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ - devlink_port_unregister(devlink_port); + mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); @@ -1659,9 +1948,10 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) return local_port - offset; } -static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count) +static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, + unsigned int count) { - struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; u8 module, cur_width, base_port; @@ -1733,9 +2023,9 @@ err_port_create: return err; } -static int mlxsw_sp_port_unsplit(void *priv, u8 local_port) +static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) { - struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); struct mlxsw_sp_port *mlxsw_sp_port; u8 module, cur_width, base_port; unsigned int count; @@ -2080,10 +2370,10 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); } -static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, +static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { - struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); int err; mlxsw_sp->core = mlxsw_core; @@ -2144,6 +2434,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, err_switchdev_init: err_lag_init: + mlxsw_sp_buffers_fini(mlxsw_sp); err_buffers_init: err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); @@ -2154,11 +2445,12 @@ err_event_register: return err; } -static void mlxsw_sp_fini(void *priv) +static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) { - struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sp_switchdev_fini(mlxsw_sp); + mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_ports_remove(mlxsw_sp); @@ -2201,16 +2493,26 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { }; static struct mlxsw_driver mlxsw_sp_driver = { - .kind = MLXSW_DEVICE_KIND_SPECTRUM, - .owner = THIS_MODULE, - .priv_size = sizeof(struct mlxsw_sp), - .init = mlxsw_sp_init, - .fini = mlxsw_sp_fini, - .port_split = mlxsw_sp_port_split, - .port_unsplit = mlxsw_sp_port_unsplit, - .txhdr_construct = mlxsw_sp_txhdr_construct, - .txhdr_len = MLXSW_TXHDR_LEN, - .profile = &mlxsw_sp_config_profile, + .kind = MLXSW_DEVICE_KIND_SPECTRUM, + .owner = THIS_MODULE, + .priv_size = sizeof(struct mlxsw_sp), + .init = mlxsw_sp_init, + .fini = mlxsw_sp_fini, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, + .sb_pool_get = mlxsw_sp_sb_pool_get, + .sb_pool_set = mlxsw_sp_sb_pool_set, + .sb_port_pool_get = mlxsw_sp_sb_port_pool_get, + .sb_port_pool_set = mlxsw_sp_sb_port_pool_set, + .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get, + .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set, + .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot, + .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear, + .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get, + .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, + .txhdr_construct = mlxsw_sp_txhdr_construct, + .txhdr_len = MLXSW_TXHDR_LEN, + .profile = &mlxsw_sp_config_profile, }; static int @@ -2541,11 +2843,11 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, lag->ref_count++; return 0; +err_col_port_enable: + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); err_col_port_add: if (!lag->ref_count) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); -err_col_port_enable: - mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 4b8abaf06321..e2c022d3e2f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -42,15 +42,15 @@ #include <linux/bitops.h> #include <linux/if_vlan.h> #include <linux/list.h> +#include <linux/dcbnl.h> #include <net/switchdev.h> -#include <net/devlink.h> #include "port.h" #include "core.h" #define MLXSW_SP_VFID_BASE VLAN_N_VID #define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */ -#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */ +#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */ #define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX) #define MLXSW_SP_LAG_MAX 64 @@ -62,6 +62,24 @@ #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ +#define MLXSW_SP_BYTES_PER_CELL 96 + +#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL) +#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL) + +/* Maximum delay buffer needed in case of PAUSE frames, in cells. + * Assumes 100m cable and maximum MTU. + */ +#define MLXSW_SP_PAUSE_DELAY 612 + +#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */ + +static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay) +{ + delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE)); + return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu); +} + struct mlxsw_sp_port; struct mlxsw_sp_upper { @@ -100,6 +118,40 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid) return fid >= MLXSW_SP_VFID_BASE; } +struct mlxsw_sp_sb_pr { + enum mlxsw_reg_sbpr_mode mode; + u32 size; +}; + +struct mlxsw_cp_sb_occ { + u32 cur; + u32 max; +}; + +struct mlxsw_sp_sb_cm { + u32 min_buff; + u32 max_buff; + u8 pool; + struct mlxsw_cp_sb_occ occ; +}; + +struct mlxsw_sp_sb_pm { + u32 min_buff; + u32 max_buff; + struct mlxsw_cp_sb_occ occ; +}; + +#define MLXSW_SP_SB_POOL_COUNT 4 +#define MLXSW_SP_SB_TC_COUNT 8 + +struct mlxsw_sp_sb { + struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT]; + struct { + struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT]; + struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT]; + } ports[MLXSW_PORT_MAX_PORTS]; +}; + struct mlxsw_sp { struct { struct list_head list; @@ -130,6 +182,7 @@ struct mlxsw_sp { struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; u8 port_to_module[MLXSW_PORT_MAX_PORTS]; + struct mlxsw_sp_sb sb; }; static inline struct mlxsw_sp_upper * @@ -148,6 +201,7 @@ struct mlxsw_sp_port_pcpu_stats { }; struct mlxsw_sp_port { + struct mlxsw_core_port core_port; /* must be first */ struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sp *mlxsw_sp; @@ -166,14 +220,28 @@ struct mlxsw_sp_port { struct mlxsw_sp_vfid *vfid; u16 vid; } vport; + struct { + u8 tx_pause:1, + rx_pause:1; + } link; + struct { + struct ieee_ets *ets; + struct ieee_maxrate *maxrate; + struct ieee_pfc *pfc; + } dcb; /* 802.1Q bridge VLANs */ unsigned long *active_vlans; unsigned long *untagged_vlans; /* VLAN interfaces */ struct list_head vports_list; - struct devlink_port devlink_port; }; +static inline bool +mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause; +} + static inline struct mlxsw_sp_port * mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index) { @@ -245,7 +313,39 @@ enum mlxsw_sp_flood_table { }; int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port); +int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info); +int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type); +int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold); +int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 threshold); +int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold); +int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold); +int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, + unsigned int sb_index); +int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, + unsigned int sb_index); +int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max); +int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max); int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp); @@ -265,5 +365,33 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, bool set, bool only_uc); void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); +int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index, + bool dwrr, u8 dwrr_weight); +int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port, + u8 switch_prio, u8 tclass); +int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, + u8 *prio_tc, bool pause_en, + struct ieee_pfc *my_pfc); +int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port, + enum mlxsw_reg_qeec_hr hr, u8 index, + u8 next_index, u32 maxrate); + +#ifdef CONFIG_MLXSW_SPECTRUM_DCB + +int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port); +void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port); + +#else + +static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + return 0; +} + +static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{} + +#endif #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index d59195e3f7fb..a3720a0fad7d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -34,36 +34,140 @@ #include <linux/kernel.h> #include <linux/types.h> +#include <linux/dcbnl.h> +#include <linux/if_ether.h> +#include <linux/list.h> #include "spectrum.h" #include "core.h" #include "port.h" #include "reg.h" -struct mlxsw_sp_pb { - u8 index; - u16 size; -}; +static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp, + u8 pool, + enum mlxsw_reg_sbxx_dir dir) +{ + return &mlxsw_sp->sb.prs[dir][pool]; +} -#define MLXSW_SP_PB(_index, _size) \ - { \ - .index = _index, \ - .size = _size, \ +static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 pg_buff, + enum mlxsw_reg_sbxx_dir dir) +{ + return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff]; +} + +static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 pool, + enum mlxsw_reg_sbxx_dir dir) +{ + return &mlxsw_sp->sb.ports[local_port].pms[dir][pool]; +} + +static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool, + enum mlxsw_reg_sbxx_dir dir, + enum mlxsw_reg_sbpr_mode mode, u32 size) +{ + char sbpr_pl[MLXSW_REG_SBPR_LEN]; + struct mlxsw_sp_sb_pr *pr; + int err; + + mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); + if (err) + return err; + + pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + pr->mode = mode; + pr->size = size; + return 0; +} + +static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 pg_buff, enum mlxsw_reg_sbxx_dir dir, + u32 min_buff, u32 max_buff, u8 pool) +{ + char sbcm_pl[MLXSW_REG_SBCM_LEN]; + int err; + + mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir, + min_buff, max_buff, pool); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); + if (err) + return err; + if (pg_buff < MLXSW_SP_SB_TC_COUNT) { + struct mlxsw_sp_sb_cm *cm; + + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir); + cm->min_buff = min_buff; + cm->max_buff = max_buff; + cm->pool = pool; } + return 0; +} + +static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 pool, enum mlxsw_reg_sbxx_dir dir, + u32 min_buff, u32 max_buff) +{ + char sbpm_pl[MLXSW_REG_SBPM_LEN]; + struct mlxsw_sp_sb_pm *pm; + int err; + + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, + min_buff, max_buff); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl); + if (err) + return err; + + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); + pm->min_buff = min_buff; + pm->max_buff = max_buff; + return 0; +} + +static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 pool, enum mlxsw_reg_sbxx_dir dir, + struct list_head *bulk_list) +{ + char sbpm_pl[MLXSW_REG_SBPM_LEN]; + + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0); + return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, + bulk_list, NULL, 0); +} + +static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core, + char *sbpm_pl, size_t sbpm_pl_len, + unsigned long cb_priv) +{ + struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv; + + mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max); +} + +static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 pool, enum mlxsw_reg_sbxx_dir dir, + struct list_head *bulk_list) +{ + char sbpm_pl[MLXSW_REG_SBPM_LEN]; + struct mlxsw_sp_sb_pm *pm; + + pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir); + mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0); + return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl, + bulk_list, + mlxsw_sp_sb_pm_occ_query_cb, + (unsigned long) pm); +} -static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = { - MLXSW_SP_PB(0, 208), - MLXSW_SP_PB(1, 208), - MLXSW_SP_PB(2, 208), - MLXSW_SP_PB(3, 208), - MLXSW_SP_PB(4, 208), - MLXSW_SP_PB(5, 208), - MLXSW_SP_PB(6, 208), - MLXSW_SP_PB(7, 208), - MLXSW_SP_PB(9, 208), +static const u16 mlxsw_sp_pbs[] = { + [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN), + [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU), }; #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs) +#define MLXSW_SP_PB_UNUSED 8 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) { @@ -73,194 +177,206 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2); for (i = 0; i < MLXSW_SP_PBS_LEN; i++) { - const struct mlxsw_sp_pb *pb; - - pb = &mlxsw_sp_pbs[i]; - mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size); + if (i == MLXSW_SP_PB_UNUSED) + continue; + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]); } + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, + MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0); return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); } -#define MLXSW_SP_SB_BYTES_PER_CELL 96 +static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + char pptb_pl[MLXSW_REG_PPTB_LEN]; + int i; -struct mlxsw_sp_sb_pool { - u8 pool; - enum mlxsw_reg_sbpr_dir dir; - enum mlxsw_reg_sbpr_mode mode; - u32 size; -}; + mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0); + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), + pptb_pl); +} + +static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_port_pb_init(mlxsw_sp_port); + if (err) + return err; + return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port); +} -#define MLXSW_SP_SB_POOL_INGRESS_SIZE \ - ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \ - MLXSW_SP_SB_BYTES_PER_CELL) -#define MLXSW_SP_SB_POOL_EGRESS_SIZE \ - ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \ - MLXSW_SP_SB_BYTES_PER_CELL) - -#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \ - { \ - .pool = _pool, \ - .dir = _dir, \ - .mode = _mode, \ - .size = _size, \ +#define MLXSW_SP_SB_PR_INGRESS_SIZE \ + (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) +#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000) +#define MLXSW_SP_SB_PR_EGRESS_SIZE \ + (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) + +#define MLXSW_SP_SB_PR(_mode, _size) \ + { \ + .mode = _mode, \ + .size = _size, \ } -#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \ - MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \ - MLXSW_REG_SBPR_MODE_DYNAMIC, _size) - -#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \ - MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \ - MLXSW_REG_SBPR_MODE_DYNAMIC, _size) - -static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = { - MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE), - MLXSW_SP_SB_POOL_INGRESS(1, 0), - MLXSW_SP_SB_POOL_INGRESS(2, 0), - MLXSW_SP_SB_POOL_INGRESS(3, 0), - MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE), - MLXSW_SP_SB_POOL_EGRESS(1, 0), - MLXSW_SP_SB_POOL_EGRESS(2, 0), - MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE), +static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = { + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)), }; -#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools) +#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress) + +static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = { + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, + MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), + MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0), +}; -static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp) +#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress) + +static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_sbxx_dir dir, + const struct mlxsw_sp_sb_pr *prs, + size_t prs_len) { - char sbpr_pl[MLXSW_REG_SBPR_LEN]; int i; int err; - for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) { - const struct mlxsw_sp_sb_pool *pool; + for (i = 0; i < prs_len; i++) { + const struct mlxsw_sp_sb_pr *pr; - pool = &mlxsw_sp_sb_pools[i]; - mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir, - pool->mode, pool->size); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl); + pr = &prs[i]; + err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, + pr->mode, pr->size); if (err) return err; } return 0; } -struct mlxsw_sp_sb_cm { - union { - u8 pg; - u8 tc; - } u; - enum mlxsw_reg_sbcm_dir dir; - u32 min_buff; - u32 max_buff; - u8 pool; -}; +static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp) +{ + int err; -#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \ - { \ - .u.pg = _pg_tc, \ - .dir = _dir, \ - .min_buff = _min_buff, \ - .max_buff = _max_buff, \ - .pool = _pool, \ + err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS, + mlxsw_sp_sb_prs_ingress, + MLXSW_SP_SB_PRS_INGRESS_LEN); + if (err) + return err; + return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_sb_prs_egress, + MLXSW_SP_SB_PRS_EGRESS_LEN); +} + +#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool = _pool, \ } -#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \ - MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \ - _min_buff, _max_buff, 0) - -#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \ - MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \ - _min_buff, _max_buff, 0) - -#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \ - MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3) - -static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = { - MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8), - MLXSW_SP_SB_CM_INGRESS(1, 0, 0), - MLXSW_SP_SB_CM_INGRESS(2, 0, 0), - MLXSW_SP_SB_CM_INGRESS(3, 0, 0), - MLXSW_SP_SB_CM_INGRESS(4, 0, 0), - MLXSW_SP_SB_CM_INGRESS(5, 0, 0), - MLXSW_SP_SB_CM_INGRESS(6, 0, 0), - MLXSW_SP_SB_CM_INGRESS(7, 0, 0), - MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff), - MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9), - MLXSW_SP_SB_CM_EGRESS(8, 0, 0), - MLXSW_SP_SB_CM_EGRESS(9, 0, 0), - MLXSW_SP_SB_CM_EGRESS(10, 0, 0), - MLXSW_SP_SB_CM_EGRESS(11, 0, 0), - MLXSW_SP_SB_CM_EGRESS(12, 0, 0), - MLXSW_SP_SB_CM_EGRESS(13, 0, 0), - MLXSW_SP_SB_CM_EGRESS(14, 0, 0), - MLXSW_SP_SB_CM_EGRESS(15, 0, 0), - MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff), +static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = { + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0), + MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */ + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3), }; -#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms) +#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress) + +static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(0, 0, 0), + MLXSW_SP_SB_CM(1, 0xff, 0), +}; + +#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress) + +#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0) static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30), - MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31), + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, + MLXSW_SP_CPU_PORT_SB_CM, }; #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \ ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms) -static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, - const struct mlxsw_sp_sb_cm *cms, - size_t cms_len) +static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, + enum mlxsw_reg_sbxx_dir dir, + const struct mlxsw_sp_sb_cm *cms, + size_t cms_len) { - char sbcm_pl[MLXSW_REG_SBCM_LEN]; int i; int err; for (i = 0; i < cms_len; i++) { const struct mlxsw_sp_sb_cm *cm; + if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS) + continue; /* PG number 8 does not exist, skip it */ cm = &cms[i]; - mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir, - cm->min_buff, cm->max_buff, cm->pool); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl); + err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir, + cm->min_buff, cm->max_buff, + cm->pool); if (err) return err; } @@ -269,105 +385,120 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port) { - return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, - mlxsw_sp_port->local_port, mlxsw_sp_sb_cms, - MLXSW_SP_SB_CMS_LEN); + int err; + + err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + MLXSW_REG_SBXX_DIR_INGRESS, + mlxsw_sp_sb_cms_ingress, + MLXSW_SP_SB_CMS_INGRESS_LEN); + if (err) + return err; + return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_sb_cms_egress, + MLXSW_SP_SB_CMS_EGRESS_LEN); } static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp) { - return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms, - MLXSW_SP_CPU_PORT_SB_MCS_LEN); + return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_cpu_port_sb_cms, + MLXSW_SP_CPU_PORT_SB_MCS_LEN); } -struct mlxsw_sp_sb_pm { - u8 pool; - enum mlxsw_reg_sbpm_dir dir; - u32 min_buff; - u32 max_buff; +#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + } + +static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = { + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX), }; -#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \ - { \ - .pool = _pool, \ - .dir = _dir, \ - .min_buff = _min_buff, \ - .max_buff = _max_buff, \ - } +#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress) -#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \ - MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \ - _min_buff, _max_buff) - -#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \ - MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \ - _min_buff, _max_buff) - -static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = { - MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff), - MLXSW_SP_SB_PM_INGRESS(1, 0, 0), - MLXSW_SP_SB_PM_INGRESS(2, 0, 0), - MLXSW_SP_SB_PM_INGRESS(3, 0, 0), - MLXSW_SP_SB_PM_EGRESS(0, 0, 7), - MLXSW_SP_SB_PM_EGRESS(1, 0, 0), - MLXSW_SP_SB_PM_EGRESS(2, 0, 0), - MLXSW_SP_SB_PM_EGRESS(3, 0, 0), +static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = { + MLXSW_SP_SB_PM(0, 7), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), + MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN), }; -#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms) +#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress) -static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) +static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port, + enum mlxsw_reg_sbxx_dir dir, + const struct mlxsw_sp_sb_pm *pms, + size_t pms_len) { - char sbpm_pl[MLXSW_REG_SBPM_LEN]; int i; int err; - for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) { + for (i = 0; i < pms_len; i++) { const struct mlxsw_sp_sb_pm *pm; - pm = &mlxsw_sp_sb_pms[i]; - mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port, - pm->pool, pm->dir, - pm->min_buff, pm->max_buff); - err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, - MLXSW_REG(sbpm), sbpm_pl); + pm = &pms[i]; + err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir, + pm->min_buff, pm->max_buff); if (err) return err; } return 0; } +static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + MLXSW_REG_SBXX_DIR_INGRESS, + mlxsw_sp_sb_pms_ingress, + MLXSW_SP_SB_PMS_INGRESS_LEN); + if (err) + return err; + return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp, + mlxsw_sp_port->local_port, + MLXSW_REG_SBXX_DIR_EGRESS, + mlxsw_sp_sb_pms_egress, + MLXSW_SP_SB_PMS_EGRESS_LEN); +} + struct mlxsw_sp_sb_mm { - u8 prio; u32 min_buff; u32 max_buff; u8 pool; }; -#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \ - { \ - .prio = _prio, \ - .min_buff = _min_buff, \ - .max_buff = _max_buff, \ - .pool = _pool, \ +#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \ + { \ + .min_buff = _min_buff, \ + .max_buff = _max_buff, \ + .pool = _pool, \ } static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = { - MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), - MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), + MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0), }; #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms) @@ -382,7 +513,7 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_sp_sb_mm *mc; mc = &mlxsw_sp_sb_mms[i]; - mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff, + mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff, mc->max_buff, mc->pool); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl); if (err) @@ -391,26 +522,39 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp) return 0; } +#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024) + int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp) { int err; - err = mlxsw_sp_sb_pools_init(mlxsw_sp); + err = mlxsw_sp_sb_prs_init(mlxsw_sp); if (err) return err; err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp); if (err) return err; err = mlxsw_sp_sb_mms_init(mlxsw_sp); + if (err) + return err; + return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, + MLXSW_SP_SB_SIZE, + MLXSW_SP_SB_POOL_COUNT, + MLXSW_SP_SB_POOL_COUNT, + MLXSW_SP_SB_TC_COUNT, + MLXSW_SP_SB_TC_COUNT); +} - return err; +void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp) +{ + devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0); } int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) { int err; - err = mlxsw_sp_port_pb_init(mlxsw_sp_port); + err = mlxsw_sp_port_headroom_init(mlxsw_sp_port); if (err) return err; err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port); @@ -420,3 +564,394 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port) return err; } + +static u8 pool_get(u16 pool_index) +{ + return pool_index % MLXSW_SP_SB_POOL_COUNT; +} + +static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir) +{ + u16 pool_index; + + pool_index = pool; + if (dir == MLXSW_REG_SBXX_DIR_EGRESS) + pool_index += MLXSW_SP_SB_POOL_COUNT; + return pool_index; +} + +static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index) +{ + return pool_index < MLXSW_SP_SB_POOL_COUNT ? + MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS; +} + +int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, + struct devlink_sb_pool_info *pool_info) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + + pool_info->pool_type = dir; + pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size); + pool_info->threshold_type = pr->mode; + return 0; +} + +int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core, + unsigned int sb_index, u16 pool_index, u32 size, + enum devlink_sb_threshold_type threshold_type) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + enum mlxsw_reg_sbpr_mode mode = threshold_type; + u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size); + + return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size); +} + +#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */ + +static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool, + enum mlxsw_reg_sbxx_dir dir, u32 max_buff) +{ + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + + if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) + return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; + return MLXSW_SP_CELLS_TO_BYTES(max_buff); +} + +static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool, + enum mlxsw_reg_sbxx_dir dir, u32 threshold, + u32 *p_max_buff) +{ + struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir); + + if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) { + int val; + + val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET; + if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN || + val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) + return -EINVAL; + *p_max_buff = val; + } else { + *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold); + } + return 0; +} + +int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_threshold) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, + pool, dir); + + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir, + pm->max_buff); + return 0; +} + +int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 threshold) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + u32 max_buff; + int err; + + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, + threshold, &max_buff); + if (err) + return err; + + return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir, + 0, max_buff); +} + +int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 *p_pool_index, u32 *p_threshold) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pg_buff = tc_index; + enum mlxsw_reg_sbxx_dir dir = pool_type; + struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, + pg_buff, dir); + + *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir, + cm->max_buff); + *p_pool_index = pool_index_get(cm->pool, pool_type); + return 0; +} + +int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u16 pool_index, u32 threshold) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pg_buff = tc_index; + enum mlxsw_reg_sbxx_dir dir = pool_type; + u8 pool = pool_index; + u32 max_buff; + int err; + + err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir, + threshold, &max_buff); + if (err) + return err; + + if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) { + if (pool < MLXSW_SP_SB_POOL_COUNT) + return -EINVAL; + pool -= MLXSW_SP_SB_POOL_COUNT; + } else if (pool >= MLXSW_SP_SB_POOL_COUNT) { + return -EINVAL; + } + return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir, + 0, max_buff, pool); +} + +#define MASKED_COUNT_MAX \ + (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2)) + +struct mlxsw_sp_sb_sr_occ_query_cb_ctx { + u8 masked_count; + u8 local_port_1; +}; + +static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, + char *sbsr_pl, size_t sbsr_pl_len, + unsigned long cb_priv) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; + u8 masked_count; + u8 local_port; + int rec_index = 0; + struct mlxsw_sp_sb_cm *cm; + int i; + + memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx)); + + masked_count = 0; + for (local_port = cb_ctx.local_port_1; + local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + if (!mlxsw_sp->ports[local_port]) + continue; + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_INGRESS); + mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, + &cm->occ.cur, &cm->occ.max); + } + if (++masked_count == cb_ctx.masked_count) + break; + } + masked_count = 0; + for (local_port = cb_ctx.local_port_1; + local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + if (!mlxsw_sp->ports[local_port]) + continue; + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_EGRESS); + mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++, + &cm->occ.cur, &cm->occ.max); + } + if (++masked_count == cb_ctx.masked_count) + break; + } +} + +int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, + unsigned int sb_index) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; + unsigned long cb_priv; + LIST_HEAD(bulk_list); + char *sbsr_pl; + u8 masked_count; + u8 local_port_1; + u8 local_port = 0; + int i; + int err; + int err2; + + sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); + if (!sbsr_pl) + return -ENOMEM; + +next_batch: + local_port++; + local_port_1 = local_port; + masked_count = 0; + mlxsw_reg_sbsr_pack(sbsr_pl, false); + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); + mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); + } + for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + if (!mlxsw_sp->ports[local_port]) + continue; + mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { + err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_INGRESS, + &bulk_list); + if (err) + goto out; + err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_EGRESS, + &bulk_list); + if (err) + goto out; + } + if (++masked_count == MASKED_COUNT_MAX) + goto do_query; + } + +do_query: + cb_ctx.masked_count = masked_count; + cb_ctx.local_port_1 = local_port_1; + memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx)); + err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, + &bulk_list, mlxsw_sp_sb_sr_occ_query_cb, + cb_priv); + if (err) + goto out; + if (local_port < MLXSW_PORT_MAX_PORTS) + goto next_batch; + +out: + err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); + if (!err) + err = err2; + kfree(sbsr_pl); + return err; +} + +int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, + unsigned int sb_index) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); + LIST_HEAD(bulk_list); + char *sbsr_pl; + unsigned int masked_count; + u8 local_port = 0; + int i; + int err; + int err2; + + sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL); + if (!sbsr_pl) + return -ENOMEM; + +next_batch: + local_port++; + masked_count = 0; + mlxsw_reg_sbsr_pack(sbsr_pl, true); + for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) { + mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1); + mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1); + } + for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) { + if (!mlxsw_sp->ports[local_port]) + continue; + mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) { + err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_INGRESS, + &bulk_list); + if (err) + goto out; + err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, + MLXSW_REG_SBXX_DIR_EGRESS, + &bulk_list); + if (err) + goto out; + } + if (++masked_count == MASKED_COUNT_MAX) + goto do_query; + } + +do_query: + err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl, + &bulk_list, NULL, 0); + if (err) + goto out; + if (local_port < MLXSW_PORT_MAX_PORTS) + goto next_batch; + +out: + err2 = mlxsw_reg_trans_bulk_wait(&bulk_list); + if (!err) + err = err2; + kfree(sbsr_pl); + return err; +} + +int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 pool_index, + u32 *p_cur, u32 *p_max) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pool = pool_get(pool_index); + enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index); + struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, + pool, dir); + + *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur); + *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max); + return 0; +} + +int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port, + unsigned int sb_index, u16 tc_index, + enum devlink_sb_pool_type pool_type, + u32 *p_cur, u32 *p_max) +{ + struct mlxsw_sp_port *mlxsw_sp_port = + mlxsw_core_port_driver_priv(mlxsw_core_port); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 local_port = mlxsw_sp_port->local_port; + u8 pg_buff = tc_index; + enum mlxsw_reg_sbxx_dir dir = pool_type; + struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, + pg_buff, dir); + + *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur); + *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max); + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c new file mode 100644 index 000000000000..0b323661c0b6 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c @@ -0,0 +1,480 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c + * Copyright (c) 2016 Mellanox Technologies. All rights reserved. + * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <linux/netdevice.h> +#include <linux/string.h> +#include <linux/bitops.h> +#include <net/dcbnl.h> + +#include "spectrum.h" +#include "reg.h" + +static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev, + u8 mode) +{ + return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0; +} + +static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets)); + + return 0; +} + +static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_ets *ets) +{ + struct net_device *dev = mlxsw_sp_port->dev; + bool has_ets_tc = false; + int i, tx_bw_sum = 0; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + break; + case IEEE_8021QAZ_TSA_ETS: + has_ets_tc = true; + tx_bw_sum += ets->tc_tx_bw[i]; + break; + default: + netdev_err(dev, "Only strict priority and ETS are supported\n"); + return -EINVAL; + } + + if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) { + netdev_err(dev, "Invalid TC\n"); + return -EINVAL; + } + } + + if (has_ets_tc && tx_bw_sum != 100) { + netdev_err(dev, "Total ETS bandwidth should equal 100\n"); + return -EINVAL; + } + + return 0; +} + +static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port, + u8 *prio_tc) +{ + char pptb_pl[MLXSW_REG_PPTB_LEN]; + int i; + + mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port); + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]); + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), + pptb_pl); +} + +static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg) +{ + int i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + if (prio_tc[i] == pg) + return true; + return false; +} + +static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port, + u8 *old_prio_tc, u8 *new_prio_tc) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char pbmc_pl[MLXSW_REG_PBMC_LEN]; + int err, i; + + mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); + if (err) + return err; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + u8 pg = old_prio_tc[i]; + + if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg)) + mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0); + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); +} + +static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_ets *ets) +{ + bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port); + struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets; + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + /* Create the required PGs, but don't destroy existing ones, as + * traffic is still directed to them. + */ + err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, + ets->prio_tc, pause_en, + mlxsw_sp_port->dcb.pfc); + if (err) { + netdev_err(dev, "Failed to configure port's headroom\n"); + return err; + } + + err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc); + if (err) { + netdev_err(dev, "Failed to set PG-priority mapping\n"); + goto err_port_prio_pg_map; + } + + err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc, + ets->prio_tc); + if (err) + netdev_warn(dev, "Failed to remove ununsed PGs\n"); + + return 0; + +err_port_prio_pg_map: + mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc); + return err; +} + +static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_ets *ets) +{ + struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets; + struct net_device *dev = mlxsw_sp_port->dev; + int i, err; + + /* Egress configuration. */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS; + u8 weight = ets->tc_tx_bw[i]; + + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, + 0, dwrr, weight); + if (err) { + netdev_err(dev, "Failed to link subgroup ETS element %d to group\n", + i); + goto err_port_ets_set; + } + } + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, + ets->prio_tc[i]); + if (err) { + netdev_err(dev, "Failed to map prio %d to TC %d\n", i, + ets->prio_tc[i]); + goto err_port_prio_tc_set; + } + } + + /* Ingress configuration. */ + err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets); + if (err) + goto err_port_headroom_set; + + return 0; + +err_port_headroom_set: + i = IEEE_8021QAZ_MAX_TCS; +err_port_prio_tc_set: + for (i--; i >= 0; i--) + mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]); + i = IEEE_8021QAZ_MAX_TCS; +err_port_ets_set: + for (i--; i >= 0; i--) { + bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS; + u8 weight = my_ets->tc_tx_bw[i]; + + err = mlxsw_sp_port_ets_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i, + 0, dwrr, weight); + } + return err; +} + +static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev, + struct ieee_ets *ets) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets); + if (err) + return err; + + err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets); + if (err) + return err; + + memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets)); + + return 0; +} + +static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + + memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate)); + + return 0; +} + +static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev, + struct ieee_maxrate *maxrate) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate; + int err, i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + i, 0, + maxrate->tc_maxrate[i]); + if (err) { + netdev_err(dev, "Failed to set maxrate for TC %d\n", i); + goto err_port_ets_maxrate_set; + } + } + + memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate)); + + return 0; + +err_port_ets_maxrate_set: + for (i--; i >= 0; i--) + mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, + MLXSW_REG_QEEC_HIERARCY_SUBGROUP, + i, 0, my_maxrate->tc_maxrate[i]); + return err; +} + +static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port, + u8 prio) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc; + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int err; + + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, + MLXSW_REG_PPCNT_PRIO_CNT, prio); + err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl); + if (err) + return err; + + my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl); + my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl); + + return 0; +} + +static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err, i; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i); + if (err) { + netdev_err(dev, "Failed to get PFC count for priority %d\n", + i); + return err; + } + } + + memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc)); + + return 0; +} + +static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct ieee_pfc *pfc) +{ + char pfcc_pl[MLXSW_REG_PFCC_LEN]; + + mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en); + + return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc), + pfcc_pl); +} + +static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + int err; + + if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) { + netdev_err(dev, "PAUSE frames already enabled on port\n"); + return -EINVAL; + } + + err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, + mlxsw_sp_port->dcb.ets->prio_tc, + false, pfc); + if (err) { + netdev_err(dev, "Failed to configure port's headroom for PFC\n"); + return err; + } + + err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc); + if (err) { + netdev_err(dev, "Failed to configure PFC\n"); + goto err_port_pfc_set; + } + + memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc)); + + return 0; + +err_port_pfc_set: + __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, + mlxsw_sp_port->dcb.ets->prio_tc, false, + mlxsw_sp_port->dcb.pfc); + return err; +} + +static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = { + .ieee_getets = mlxsw_sp_dcbnl_ieee_getets, + .ieee_setets = mlxsw_sp_dcbnl_ieee_setets, + .ieee_getmaxrate = mlxsw_sp_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate, + .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc, + .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc, + + .getdcbx = mlxsw_sp_dcbnl_getdcbx, + .setdcbx = mlxsw_sp_dcbnl_setdcbx, +}; + +static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets), + GFP_KERNEL); + if (!mlxsw_sp_port->dcb.ets) + return -ENOMEM; + + mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS; + + return 0; +} + +static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->dcb.ets); +} + +static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int i; + + mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate), + GFP_KERNEL); + if (!mlxsw_sp_port->dcb.maxrate) + return -ENOMEM; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS; + + return 0; +} + +static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->dcb.maxrate); +} + +static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc), + GFP_KERNEL); + if (!mlxsw_sp_port->dcb.pfc) + return -ENOMEM; + + mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS; + + return 0; +} + +static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + kfree(mlxsw_sp_port->dcb.pfc); +} + +int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err; + + err = mlxsw_sp_port_ets_init(mlxsw_sp_port); + if (err) + return err; + err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port); + if (err) + goto err_port_maxrate_init; + err = mlxsw_sp_port_pfc_init(mlxsw_sp_port); + if (err) + goto err_port_pfc_init; + + mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops; + + return 0; + +err_port_pfc_init: + mlxsw_sp_port_maxrate_fini(mlxsw_sp_port); +err_port_maxrate_init: + mlxsw_sp_port_ets_fini(mlxsw_sp_port); + return err; +} + +void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) +{ + mlxsw_sp_port_pfc_fini(mlxsw_sp_port); + mlxsw_sp_port_maxrate_fini(mlxsw_sp_port); + mlxsw_sp_port_ets_fini(mlxsw_sp_port); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index e1c74efff51a..3710f19ed6bb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -214,7 +214,15 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin, table_type, range, local_port, set); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); + if (err) + goto err_flood_bm_set; + else + goto buffer_out; +err_flood_bm_set: + mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin, + table_type, range, local_port, !set); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); buffer_out: kfree(sftr_pl); return err; @@ -1430,8 +1438,8 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp) { - schedule_delayed_work(&mlxsw_sp->fdb_notify.dw, - msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); + mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw, + msecs_to_jiffies(mlxsw_sp->fdb_notify.interval)); } static void mlxsw_sp_fdb_notify_work(struct work_struct *work) diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 7a60a26759b6..3842eab9449a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -43,7 +43,6 @@ #include <linux/device.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> -#include <net/devlink.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@ -75,11 +74,11 @@ struct mlxsw_sx_port_pcpu_stats { }; struct mlxsw_sx_port { + struct mlxsw_core_port core_port; /* must be first */ struct net_device *dev; struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sx *mlxsw_sx; u8 local_port; - struct devlink_port devlink_port; }; /* tx_hdr_version @@ -303,7 +302,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, u64 len; int err; - if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info)) + if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info)) return NETDEV_TX_BUSY; if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) { @@ -321,7 +320,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb, /* Due to a race we might fail here because of a full queue. In that * unlikely case we simply drop the packet. */ - err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info); + err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info); if (!err) { pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats); @@ -518,7 +517,8 @@ static void mlxsw_sx_port_get_stats(struct net_device *dev, int i; int err; - mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port); + mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port, + MLXSW_REG_PPCNT_IEEE_8023_CNT, 0); err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl); for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++) data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0; @@ -955,9 +955,7 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port, static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) { - struct devlink *devlink = priv_to_devlink(mlxsw_sx->core); struct mlxsw_sx_port *mlxsw_sx_port; - struct devlink_port *devlink_port; struct net_device *dev; bool usable; int err; @@ -1011,14 +1009,6 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto port_not_usable; } - devlink_port = &mlxsw_sx_port->devlink_port; - err = devlink_port_register(devlink, devlink_port, local_port); - if (err) { - dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n", - mlxsw_sx_port->local_port); - goto err_devlink_port_register; - } - err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", @@ -1076,11 +1066,19 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto err_register_netdev; } - devlink_port_type_eth_set(devlink_port, dev); + err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port, + mlxsw_sx_port->local_port, dev, false, 0); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n", + mlxsw_sx_port->local_port); + goto err_core_port_init; + } mlxsw_sx->ports[local_port] = mlxsw_sx_port; return 0; +err_core_port_init: + unregister_netdev(dev); err_register_netdev: err_port_mac_learning_mode_set: err_port_stp_state_set: @@ -1089,8 +1087,6 @@ err_port_mtu_set: err_port_speed_set: err_port_swid_set: err_port_system_port_mapping_set: - devlink_port_unregister(&mlxsw_sx_port->devlink_port); -err_devlink_port_register: port_not_usable: err_port_module_check: err_dev_addr_get: @@ -1103,15 +1099,12 @@ err_alloc_stats: static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) { struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; - struct devlink_port *devlink_port; if (!mlxsw_sx_port) return; - devlink_port = &mlxsw_sx_port->devlink_port; - devlink_port_type_clear(devlink_port); + mlxsw_core_port_fini(&mlxsw_sx_port->core_port); unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); - devlink_port_unregister(devlink_port); free_percpu(mlxsw_sx_port->pcpu_stats); free_netdev(mlxsw_sx_port->dev); } @@ -1454,10 +1447,10 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx) return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl); } -static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core, +static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { - struct mlxsw_sx *mlxsw_sx = priv; + struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); int err; mlxsw_sx->core = mlxsw_core; @@ -1504,9 +1497,9 @@ err_event_register: return err; } -static void mlxsw_sx_fini(void *priv) +static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core) { - struct mlxsw_sx *mlxsw_sx = priv; + struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); mlxsw_sx_traps_fini(mlxsw_sx); mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE); diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c index a8522d8af95d..20cb85bc0c5f 100644 --- a/drivers/net/ethernet/micrel/ks8695net.c +++ b/drivers/net/ethernet/micrel/ks8695net.c @@ -1354,6 +1354,7 @@ ks8695_probe(struct platform_device *pdev) struct resource *rxirq_res, *txirq_res, *linkirq_res; int ret = 0; int buff_n; + bool inv_mac_addr = false; u32 machigh, maclow; /* Initialise a net_device */ @@ -1456,8 +1457,7 @@ ks8695_probe(struct platform_device *pdev) ndev->dev_addr[5] = maclow & 0xFF; if (!is_valid_ether_addr(ndev->dev_addr)) - dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", ndev->name); + inv_mac_addr = true; /* In order to be efficient memory-wise, we allocate both * rings in one go. @@ -1520,6 +1520,9 @@ ks8695_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret == 0) { + if (inv_mac_addr) + dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n", + ndev->name); dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n", ks8695_port_type(ksp), ndev->dev_addr); } else { diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 75dc46c5fca2..280e761d3a97 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4790,7 +4790,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal) /* Notify the network subsystem that the packet has been sent. */ if (dev) - dev->trans_start = jiffies; + netif_trans_update(dev); } /** @@ -4965,7 +4965,7 @@ static void netdev_tx_timeout(struct net_device *dev) hw_ena_intr(hw); } - dev->trans_start = jiffies; + netif_trans_update(dev); netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c index 86ea17e7ba7b..7066954c39d6 100644 --- a/drivers/net/ethernet/microchip/enc28j60.c +++ b/drivers/net/ethernet/microchip/enc28j60.c @@ -28,11 +28,12 @@ #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/spi/spi.h> +#include <linux/of_net.h> #include "enc28j60_hw.h" #define DRV_NAME "enc28j60" -#define DRV_VERSION "1.01" +#define DRV_VERSION "1.02" #define SPI_OPLEN 1 @@ -89,22 +90,26 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data) { u8 *rx_buf = priv->spi_transfer_buf + 4; u8 *tx_buf = priv->spi_transfer_buf; - struct spi_transfer t = { + struct spi_transfer tx = { .tx_buf = tx_buf, + .len = SPI_OPLEN, + }; + struct spi_transfer rx = { .rx_buf = rx_buf, - .len = SPI_OPLEN + len, + .len = len, }; struct spi_message msg; int ret; tx_buf[0] = ENC28J60_READ_BUF_MEM; - tx_buf[1] = tx_buf[2] = tx_buf[3] = 0; /* don't care */ spi_message_init(&msg); - spi_message_add_tail(&t, &msg); + spi_message_add_tail(&tx, &msg); + spi_message_add_tail(&rx, &msg); + ret = spi_sync(priv->spi, &msg); if (ret == 0) { - memcpy(data, &rx_buf[SPI_OPLEN], len); + memcpy(data, rx_buf, len); ret = msg.status; } if (ret && netif_msg_drv(priv)) @@ -1544,6 +1549,7 @@ static int enc28j60_probe(struct spi_device *spi) { struct net_device *dev; struct enc28j60_net *priv; + const void *macaddr; int ret = 0; if (netif_msg_drv(&debug)) @@ -1575,7 +1581,12 @@ static int enc28j60_probe(struct spi_device *spi) ret = -EIO; goto error_irq; } - eth_hw_addr_random(dev); + + macaddr = of_get_mac_address(spi->dev.of_node); + if (macaddr) + ether_addr_copy(dev->dev_addr, macaddr); + else + eth_hw_addr_random(dev); enc28j60_set_hw_macaddr(dev); /* Board setup must set the relevant edge trigger type; @@ -1630,9 +1641,16 @@ static int enc28j60_remove(struct spi_device *spi) return 0; } +static const struct of_device_id enc28j60_dt_ids[] = { + { .compatible = "microchip,enc28j60" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, enc28j60_dt_ids); + static struct spi_driver enc28j60_driver = { .driver = { - .name = DRV_NAME, + .name = DRV_NAME, + .of_match_table = enc28j60_dt_ids, }, .probe = enc28j60_probe, .remove = enc28j60_remove, diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c index 7df318346b05..42e34076d2de 100644 --- a/drivers/net/ethernet/microchip/encx24j600.c +++ b/drivers/net/ethernet/microchip/encx24j600.c @@ -874,7 +874,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev) netif_stop_queue(dev); /* save the timestamp */ - dev->trans_start = jiffies; + netif_trans_update(dev); /* Remember the skb for deferred processing */ priv->tx_skb = skb; @@ -890,7 +890,7 @@ static void encx24j600_tx_timeout(struct net_device *dev) struct encx24j600_priv *priv = netdev_priv(dev); netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n", - jiffies, jiffies - dev->trans_start); + jiffies, jiffies - dev_trans_start(dev)); dev->stats.tx_errors++; netif_wake_queue(dev); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 3e67f451f2ab..4367dd6879a2 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -376,7 +376,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) priv->tx_head = TX_NEXT(tx_head); - ndev->trans_start = jiffies; + netif_trans_update(ndev); ret = NETDEV_TX_OK; out_unlock: spin_unlock_irq(&priv->txlock); diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 270c9eeb7ab6..6d1a956e3f77 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev) del_timer_sync(&mgp->watchdog_timer); mgp->running = MYRI10GE_ETH_STOPPING; - local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */ for (i = 0; i < mgp->num_slices; i++) { napi_disable(&mgp->ss[i].napi); + local_bh_disable(); /* myri10ge_ss_lock_napi needs this */ /* Lock the slice to prevent the busy_poll handler from * accessing it. Later when we bring the NIC up, myri10ge_open * resets the slice including this lock. @@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev) pr_info("Slice %d locked\n", i); mdelay(1); } + local_bh_enable(); } - local_bh_enable(); netif_carrier_off(dev); netif_tx_stop_all_queues(dev); diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 122c2ee3dfe2..ed89029ff75b 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -1904,7 +1904,7 @@ static void ns_tx_timeout(struct net_device *dev) spin_unlock_irq(&np->lock); enable_irq(irq); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 1bd419dbda6d..612c7a44b26c 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev) /* Try to restart the adaptor. */ sonic_init(dev); lp->stats.tx_errors++; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 9ba975853ec6..2874dffe77de 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -4021,7 +4021,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) unsigned long flags = 0; u16 vlan_tag = 0; struct fifo_info *fifo = NULL; - int do_spin_lock = 1; int offload_type; int enable_per_list_interrupt = 0; struct config_param *config = &sp->config; @@ -4074,7 +4073,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) queue += sp->udp_fifo_idx; if (skb->len > 1024) enable_per_list_interrupt = 1; - do_spin_lock = 0; } } } @@ -4084,12 +4082,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) [skb->priority & (MAX_TX_FIFOS - 1)]; fifo = &mac_control->fifos[queue]; - if (do_spin_lock) - spin_lock_irqsave(&fifo->tx_lock, flags); - else { - if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) - return NETDEV_TX_LOCKED; - } + spin_lock_irqsave(&fifo->tx_lock, flags); if (sp->config.multiq) { if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 75683fb26734..e744acc18ef4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -59,8 +59,8 @@ netdev_warn((nn)->netdev, fmt, ## args); \ } while (0) -/* Max time to wait for NFP to respond on updates (in ms) */ -#define NFP_NET_POLL_TIMEOUT 5000 +/* Max time to wait for NFP to respond on updates (in seconds) */ +#define NFP_NET_POLL_TIMEOUT 5 /* Bar allocation */ #define NFP_NET_CRTL_BAR 0 @@ -298,6 +298,8 @@ struct nfp_net_rx_buf { * @rxds: Virtual address of FL/RX ring in host memory * @dma: DMA address of the FL/RX ring * @size: Size, in bytes, of the FL/RX ring (needed to free) + * @bufsz: Buffer allocation size for convenience of management routines + * (NOTE: this is in second cache line, do not use on fast path!) */ struct nfp_net_rx_ring { struct nfp_net_r_vector *r_vec; @@ -319,6 +321,7 @@ struct nfp_net_rx_ring { dma_addr_t dma; unsigned int size; + unsigned int bufsz; } ____cacheline_aligned; /** @@ -444,6 +447,10 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver, * @shared_name: Name for shared interrupt * @me_freq_mhz: ME clock_freq (MHz) * @reconfig_lock: Protects HW reconfiguration request regs/machinery + * @reconfig_posted: Pending reconfig bits coming from async sources + * @reconfig_timer_active: Timer for reading reconfiguration results is pending + * @reconfig_sync_present: Some thread is performing synchronous reconfig + * @reconfig_timer: Timer for async reading of reconfig results * @link_up: Is the link up? * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter @@ -472,6 +479,9 @@ struct nfp_net { u32 rx_offset; + struct nfp_net_tx_ring *tx_rings; + struct nfp_net_rx_ring *rx_rings; + #ifdef CONFIG_PCI_IOV unsigned int num_vfs; struct vf_data_storage *vfinfo; @@ -504,9 +514,6 @@ struct nfp_net { int txd_cnt; int rxd_cnt; - struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS]; - struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS]; - u8 num_irqs; u8 num_r_vecs; struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS]; @@ -528,6 +535,10 @@ struct nfp_net { spinlock_t link_status_lock; spinlock_t reconfig_lock; + u32 reconfig_posted; + bool reconfig_timer_active; + bool reconfig_sync_present; + struct timer_list reconfig_timer; u32 rx_coalesce_usecs; u32 rx_coalesce_max_frames; @@ -721,6 +732,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn); int nfp_net_irqs_alloc(struct nfp_net *nn); void nfp_net_irqs_disable(struct nfp_net *nn); +int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt); #ifdef CONFIG_NFP_NET_DEBUG void nfp_net_debugfs_create(void); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 43c618bafdb6..fa47c14c743a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -80,6 +80,116 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, put_unaligned_le32(reg, fw_ver); } +/* Firmware reconfig + * + * Firmware reconfig may take a while so we have two versions of it - + * synchronous and asynchronous (posted). All synchronous callers are holding + * RTNL so we don't have to worry about serializing them. + */ +static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update) +{ + nn_writel(nn, NFP_NET_CFG_UPDATE, update); + /* ensure update is written before pinging HW */ + nn_pci_flush(nn); + nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); +} + +/* Pass 0 as update to run posted reconfigs. */ +static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update) +{ + update |= nn->reconfig_posted; + nn->reconfig_posted = 0; + + nfp_net_reconfig_start(nn, update); + + nn->reconfig_timer_active = true; + mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ); +} + +static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check) +{ + u32 reg; + + reg = nn_readl(nn, NFP_NET_CFG_UPDATE); + if (reg == 0) + return true; + if (reg & NFP_NET_CFG_UPDATE_ERR) { + nn_err(nn, "Reconfig error: 0x%08x\n", reg); + return true; + } else if (last_check) { + nn_err(nn, "Reconfig timeout: 0x%08x\n", reg); + return true; + } + + return false; +} + +static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) +{ + bool timed_out = false; + + /* Poll update field, waiting for NFP to ack the config */ + while (!nfp_net_reconfig_check_done(nn, timed_out)) { + msleep(1); + timed_out = time_is_before_eq_jiffies(deadline); + } + + if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR) + return -EIO; + + return timed_out ? -EIO : 0; +} + +static void nfp_net_reconfig_timer(unsigned long data) +{ + struct nfp_net *nn = (void *)data; + + spin_lock_bh(&nn->reconfig_lock); + + nn->reconfig_timer_active = false; + + /* If sync caller is present it will take over from us */ + if (nn->reconfig_sync_present) + goto done; + + /* Read reconfig status and report errors */ + nfp_net_reconfig_check_done(nn, true); + + if (nn->reconfig_posted) + nfp_net_reconfig_start_async(nn, 0); +done: + spin_unlock_bh(&nn->reconfig_lock); +} + +/** + * nfp_net_reconfig_post() - Post async reconfig request + * @nn: NFP Net device to reconfigure + * @update: The value for the update field in the BAR config + * + * Record FW reconfiguration request. Reconfiguration will be kicked off + * whenever reconfiguration machinery is idle. Multiple requests can be + * merged together! + */ +static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update) +{ + spin_lock_bh(&nn->reconfig_lock); + + /* Sync caller will kick off async reconf when it's done, just post */ + if (nn->reconfig_sync_present) { + nn->reconfig_posted |= update; + goto done; + } + + /* Opportunistically check if the previous command is done */ + if (!nn->reconfig_timer_active || + nfp_net_reconfig_check_done(nn, false)) + nfp_net_reconfig_start_async(nn, update); + else + nn->reconfig_posted |= update; +done: + spin_unlock_bh(&nn->reconfig_lock); +} + /** * nfp_net_reconfig() - Reconfigure the firmware * @nn: NFP Net device to reconfigure @@ -93,35 +203,45 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, */ int nfp_net_reconfig(struct nfp_net *nn, u32 update) { - int cnt, ret = 0; - u32 new; + bool cancelled_timer = false; + u32 pre_posted_requests; + int ret; spin_lock_bh(&nn->reconfig_lock); - nn_writel(nn, NFP_NET_CFG_UPDATE, update); - /* ensure update is written before pinging HW */ - nn_pci_flush(nn); - nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); + nn->reconfig_sync_present = true; - /* Poll update field, waiting for NFP to ack the config */ - for (cnt = 0; ; cnt++) { - new = nn_readl(nn, NFP_NET_CFG_UPDATE); - if (new == 0) - break; - if (new & NFP_NET_CFG_UPDATE_ERR) { - nn_err(nn, "Reconfig error: 0x%08x\n", new); - ret = -EIO; - break; - } else if (cnt >= NFP_NET_POLL_TIMEOUT) { - nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n", - update, cnt); - ret = -EIO; - break; - } - mdelay(1); + if (nn->reconfig_timer_active) { + del_timer(&nn->reconfig_timer); + nn->reconfig_timer_active = false; + cancelled_timer = true; + } + pre_posted_requests = nn->reconfig_posted; + nn->reconfig_posted = 0; + + spin_unlock_bh(&nn->reconfig_lock); + + if (cancelled_timer) + nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires); + + /* Run the posted reconfigs which were issued before we started */ + if (pre_posted_requests) { + nfp_net_reconfig_start(nn, pre_posted_requests); + nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); } + nfp_net_reconfig_start(nn, update); + ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT); + + spin_lock_bh(&nn->reconfig_lock); + + if (nn->reconfig_posted) + nfp_net_reconfig_start_async(nn, 0); + + nn->reconfig_sync_present = false; + spin_unlock_bh(&nn->reconfig_lock); + return ret; } @@ -347,12 +467,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data) /** * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring * @tx_ring: TX ring structure + * @r_vec: IRQ vector servicing this ring + * @idx: Ring index */ -static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring) +static void +nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring, + struct nfp_net_r_vector *r_vec, unsigned int idx) { - struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; + tx_ring->idx = idx; + tx_ring->r_vec = r_vec; + tx_ring->qcidx = tx_ring->idx * nn->stride_tx; tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx); } @@ -360,12 +486,18 @@ static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring * @rx_ring: RX ring structure + * @r_vec: IRQ vector servicing this ring + * @idx: Ring index */ -static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring) +static void +nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring, + struct nfp_net_r_vector *r_vec, unsigned int idx) { - struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; + rx_ring->idx = idx; + rx_ring->r_vec = r_vec; + rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx; rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1); @@ -401,16 +533,6 @@ static void nfp_net_irqs_assign(struct net_device *netdev) r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r; cpumask_set_cpu(r, &r_vec->affinity_mask); - - r_vec->tx_ring = &nn->tx_rings[r]; - nn->tx_rings[r].idx = r; - nn->tx_rings[r].r_vec = r_vec; - nfp_net_tx_ring_init(r_vec->tx_ring); - - r_vec->rx_ring = &nn->rx_rings[r]; - nn->rx_rings[r].idx = r; - nn->rx_rings[r].r_vec = r_vec; - nfp_net_rx_ring_init(r_vec->rx_ring); } } @@ -865,61 +987,59 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) } /** - * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring - * @tx_ring: TX ring structure + * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers + * @nn: NFP Net device + * @tx_ring: TX ring structure * * Assumes that the device is stopped */ -static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring) +static void +nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) { - struct nfp_net_r_vector *r_vec = tx_ring->r_vec; - struct nfp_net *nn = r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; const struct skb_frag_struct *frag; struct netdev_queue *nd_q; - struct sk_buff *skb; - int nr_frags; - int fidx; - int idx; + struct pci_dev *pdev = nn->pdev; while (tx_ring->rd_p != tx_ring->wr_p) { - idx = tx_ring->rd_p % tx_ring->cnt; + int nr_frags, fidx, idx; + struct sk_buff *skb; + idx = tx_ring->rd_p % tx_ring->cnt; skb = tx_ring->txbufs[idx].skb; - if (skb) { - nr_frags = skb_shinfo(skb)->nr_frags; - fidx = tx_ring->txbufs[idx].fidx; - - if (fidx == -1) { - /* unmap head */ - dma_unmap_single(&pdev->dev, - tx_ring->txbufs[idx].dma_addr, - skb_headlen(skb), - DMA_TO_DEVICE); - } else { - /* unmap fragment */ - frag = &skb_shinfo(skb)->frags[fidx]; - dma_unmap_page(&pdev->dev, - tx_ring->txbufs[idx].dma_addr, - skb_frag_size(frag), - DMA_TO_DEVICE); - } - - /* check for last gather fragment */ - if (fidx == nr_frags - 1) - dev_kfree_skb_any(skb); - - tx_ring->txbufs[idx].dma_addr = 0; - tx_ring->txbufs[idx].skb = NULL; - tx_ring->txbufs[idx].fidx = -2; + nr_frags = skb_shinfo(skb)->nr_frags; + fidx = tx_ring->txbufs[idx].fidx; + + if (fidx == -1) { + /* unmap head */ + dma_unmap_single(&pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_headlen(skb), DMA_TO_DEVICE); + } else { + /* unmap fragment */ + frag = &skb_shinfo(skb)->frags[fidx]; + dma_unmap_page(&pdev->dev, + tx_ring->txbufs[idx].dma_addr, + skb_frag_size(frag), DMA_TO_DEVICE); } - memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx])); + /* check for last gather fragment */ + if (fidx == nr_frags - 1) + dev_kfree_skb_any(skb); + + tx_ring->txbufs[idx].dma_addr = 0; + tx_ring->txbufs[idx].skb = NULL; + tx_ring->txbufs[idx].fidx = -2; tx_ring->qcp_rd_p++; tx_ring->rd_p++; } + memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt); + tx_ring->wr_p = 0; + tx_ring->rd_p = 0; + tx_ring->qcp_rd_p = 0; + tx_ring->wr_ptr_add = 0; + nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); netdev_tx_reset_queue(nd_q); } @@ -957,25 +1077,27 @@ static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring) * nfp_net_rx_alloc_one() - Allocate and map skb for RX * @rx_ring: RX ring structure of the skb * @dma_addr: Pointer to storage for DMA address (output param) + * @fl_bufsz: size of freelist buffers * * This function will allcate a new skb, map it for DMA. * * Return: allocated skb or NULL on failure. */ static struct sk_buff * -nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr) +nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, + unsigned int fl_bufsz) { struct nfp_net *nn = rx_ring->r_vec->nfp_net; struct sk_buff *skb; - skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz); + skb = netdev_alloc_skb(nn->netdev, fl_bufsz); if (!skb) { nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n"); return NULL; } *dma_addr = dma_map_single(&nn->pdev->dev, skb->data, - nn->fl_bufsz, DMA_FROM_DEVICE); + fl_bufsz, DMA_FROM_DEVICE); if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { dev_kfree_skb_any(skb); nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); @@ -1020,62 +1142,101 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring, } /** - * nfp_net_rx_flush() - Free any buffers currently on the RX ring - * @rx_ring: RX ring to remove buffers from + * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable + * @rx_ring: RX ring structure * - * Assumes that the device is stopped + * Warning: Do *not* call if ring buffers were never put on the FW freelist + * (i.e. device was not enabled)! */ -static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring) +static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) { - struct nfp_net *nn = rx_ring->r_vec->nfp_net; - struct pci_dev *pdev = nn->pdev; - int idx; + unsigned int wr_idx, last_idx; - while (rx_ring->rd_p != rx_ring->wr_p) { - idx = rx_ring->rd_p % rx_ring->cnt; + /* Move the empty entry to the end of the list */ + wr_idx = rx_ring->wr_p % rx_ring->cnt; + last_idx = rx_ring->cnt - 1; + rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr; + rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb; + rx_ring->rxbufs[last_idx].dma_addr = 0; + rx_ring->rxbufs[last_idx].skb = NULL; - if (rx_ring->rxbufs[idx].skb) { - dma_unmap_single(&pdev->dev, - rx_ring->rxbufs[idx].dma_addr, - nn->fl_bufsz, DMA_FROM_DEVICE); - dev_kfree_skb_any(rx_ring->rxbufs[idx].skb); - rx_ring->rxbufs[idx].dma_addr = 0; - rx_ring->rxbufs[idx].skb = NULL; - } + memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt); + rx_ring->wr_p = 0; + rx_ring->rd_p = 0; + rx_ring->wr_ptr_add = 0; +} + +/** + * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring + * @nn: NFP Net device + * @rx_ring: RX ring to remove buffers from + * + * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) + * entries. After device is disabled nfp_net_rx_ring_reset() must be called + * to restore required ring geometry. + */ +static void +nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) +{ + struct pci_dev *pdev = nn->pdev; + unsigned int i; - memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx])); + for (i = 0; i < rx_ring->cnt - 1; i++) { + /* NULL skb can only happen when initial filling of the ring + * fails to allocate enough buffers and calls here to free + * already allocated ones. + */ + if (!rx_ring->rxbufs[i].skb) + continue; - rx_ring->rd_p++; + dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr, + rx_ring->bufsz, DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_ring->rxbufs[i].skb); + rx_ring->rxbufs[i].dma_addr = 0; + rx_ring->rxbufs[i].skb = NULL; } } /** - * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers - * @rx_ring: RX ring to fill - * - * Try to fill as many buffers as possible into freelist. Return - * number of buffers added. - * - * Return: Number of freelist buffers added. + * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) + * @nn: NFP Net device + * @rx_ring: RX ring to remove buffers from */ -static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring) +static int +nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) { - struct sk_buff *skb; - dma_addr_t dma_addr; + struct nfp_net_rx_buf *rxbufs; + unsigned int i; + + rxbufs = rx_ring->rxbufs; - while (nfp_net_rx_space(rx_ring)) { - skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr); - if (!skb) { - nfp_net_rx_flush(rx_ring); + for (i = 0; i < rx_ring->cnt - 1; i++) { + rxbufs[i].skb = + nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr, + rx_ring->bufsz); + if (!rxbufs[i].skb) { + nfp_net_rx_ring_bufs_free(nn, rx_ring); return -ENOMEM; } - nfp_net_rx_give_one(rx_ring, skb, dma_addr); } return 0; } /** + * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW + * @rx_ring: RX ring to fill + */ +static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring) +{ + unsigned int i; + + for (i = 0; i < rx_ring->cnt - 1; i++) + nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb, + rx_ring->rxbufs[i].dma_addr); +} + +/** * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors * @flags: RX descriptor flags field in CPU byte order */ @@ -1240,7 +1401,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) skb = rx_ring->rxbufs[idx].skb; - new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr); + new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr, + nn->fl_bufsz); if (!new_skb) { nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb, rx_ring->rxbufs[idx].dma_addr); @@ -1256,23 +1418,25 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr); + /* < meta_len > + * <-- [rx_offset] --> + * --------------------------------------------------------- + * | [XX] | metadata | packet | XXXX | + * --------------------------------------------------------- + * <---------------- data_len ---------------> + * + * The rx_offset is fixed for all packets, the meta_len can vary + * on a packet by packet basis. If rx_offset is set to zero + * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the + * buffer and is immediately followed by the packet (no [XX]). + */ meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; data_len = le16_to_cpu(rxd->rxd.data_len); - if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) { - dev_kfree_skb_any(skb); - continue; - } - - if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) { - /* The packet data starts after the metadata */ + if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) skb_reserve(skb, meta_len); - } else { - /* The packet data starts at a fixed offset */ + else skb_reserve(skb, nn->rx_offset); - } - - /* Adjust the SKB for the dynamic meta data pre-pended */ skb_put(skb, data_len - meta_len); nfp_net_set_hash(nn->netdev, skb, rxd); @@ -1349,10 +1513,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) struct nfp_net *nn = r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; - nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0); - nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0); - nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0); - kfree(tx_ring->txbufs); if (tx_ring->txds) @@ -1360,11 +1520,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) tx_ring->txds, tx_ring->dma); tx_ring->cnt = 0; - tx_ring->wr_p = 0; - tx_ring->rd_p = 0; - tx_ring->qcp_rd_p = 0; - tx_ring->wr_ptr_add = 0; - tx_ring->txbufs = NULL; tx_ring->txds = NULL; tx_ring->dma = 0; @@ -1374,17 +1529,18 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) /** * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring * @tx_ring: TX Ring structure to allocate + * @cnt: Ring buffer count * * Return: 0 on success, negative errno otherwise. */ -static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring) +static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; int sz; - tx_ring->cnt = nn->txd_cnt; + tx_ring->cnt = cnt; tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt; tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size, @@ -1397,11 +1553,6 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring) if (!tx_ring->txbufs) goto err_alloc; - /* Write the DMA address, size and MSI-X info to the device */ - nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma); - nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt)); - nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx); - netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx); nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n", @@ -1415,6 +1566,59 @@ err_alloc: return -ENOMEM; } +static struct nfp_net_tx_ring * +nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt) +{ + struct nfp_net_tx_ring *rings; + unsigned int r; + + rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL); + if (!rings) + return NULL; + + for (r = 0; r < nn->num_tx_rings; r++) { + nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r); + + if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt)) + goto err_free_prev; + } + + return rings; + +err_free_prev: + while (r--) + nfp_net_tx_ring_free(&rings[r]); + kfree(rings); + return NULL; +} + +static struct nfp_net_tx_ring * +nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings) +{ + struct nfp_net_tx_ring *old = nn->tx_rings; + unsigned int r; + + for (r = 0; r < nn->num_tx_rings; r++) + old[r].r_vec->tx_ring = &rings[r]; + + nn->tx_rings = rings; + return old; +} + +static void +nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings) +{ + unsigned int r; + + if (!rings) + return; + + for (r = 0; r < nn->num_tx_rings; r++) + nfp_net_tx_ring_free(&rings[r]); + + kfree(rings); +} + /** * nfp_net_rx_ring_free() - Free resources allocated to a RX ring * @rx_ring: RX ring to free @@ -1425,10 +1629,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) struct nfp_net *nn = r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; - nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0); - nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0); - nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0); - kfree(rx_ring->rxbufs); if (rx_ring->rxds) @@ -1436,10 +1636,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) rx_ring->rxds, rx_ring->dma); rx_ring->cnt = 0; - rx_ring->wr_p = 0; - rx_ring->rd_p = 0; - rx_ring->wr_ptr_add = 0; - rx_ring->rxbufs = NULL; rx_ring->rxds = NULL; rx_ring->dma = 0; @@ -1449,17 +1645,22 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) /** * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring * @rx_ring: RX ring to allocate + * @fl_bufsz: Size of buffers to allocate + * @cnt: Ring buffer count * * Return: 0 on success, negative errno otherwise. */ -static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring) +static int +nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, + u32 cnt) { struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net *nn = r_vec->nfp_net; struct pci_dev *pdev = nn->pdev; int sz; - rx_ring->cnt = nn->rxd_cnt; + rx_ring->cnt = cnt; + rx_ring->bufsz = fl_bufsz; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size, @@ -1472,11 +1673,6 @@ static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring) if (!rx_ring->rxbufs) goto err_alloc; - /* Write the DMA address, size and MSI-X info to the device */ - nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma); - nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt)); - nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx); - nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n", rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx, rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds); @@ -1488,91 +1684,109 @@ err_alloc: return -ENOMEM; } -static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free) +static struct nfp_net_rx_ring * +nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz, + u32 buf_cnt) { - struct nfp_net_r_vector *r_vec; - struct msix_entry *entry; + struct nfp_net_rx_ring *rings; + unsigned int r; - while (n_free--) { - r_vec = &nn->r_vecs[n_free]; - entry = &nn->irq_entries[r_vec->irq_idx]; + rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL); + if (!rings) + return NULL; - nfp_net_rx_ring_free(r_vec->rx_ring); - nfp_net_tx_ring_free(r_vec->tx_ring); + for (r = 0; r < nn->num_rx_rings; r++) { + nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r); - irq_set_affinity_hint(entry->vector, NULL); - free_irq(entry->vector, r_vec); + if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt)) + goto err_free_prev; - netif_napi_del(&r_vec->napi); + if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r])) + goto err_free_ring; } + + return rings; + +err_free_prev: + while (r--) { + nfp_net_rx_ring_bufs_free(nn, &rings[r]); +err_free_ring: + nfp_net_rx_ring_free(&rings[r]); + } + kfree(rings); + return NULL; } -/** - * nfp_net_free_rings() - Free all ring resources - * @nn: NFP Net device to reconfigure - */ -static void nfp_net_free_rings(struct nfp_net *nn) +static struct nfp_net_rx_ring * +nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings) { - __nfp_net_free_rings(nn, nn->num_r_vecs); + struct nfp_net_rx_ring *old = nn->rx_rings; + unsigned int r; + + for (r = 0; r < nn->num_rx_rings; r++) + old[r].r_vec->rx_ring = &rings[r]; + + nn->rx_rings = rings; + return old; } -/** - * nfp_net_alloc_rings() - Allocate resources for RX and TX rings - * @nn: NFP Net device to reconfigure - * - * Return: 0 on success or negative errno on error. - */ -static int nfp_net_alloc_rings(struct nfp_net *nn) +static void +nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings) { - struct nfp_net_r_vector *r_vec; - struct msix_entry *entry; - int err; - int r; + unsigned int r; + + if (!rings) + return; for (r = 0; r < nn->num_r_vecs; r++) { - r_vec = &nn->r_vecs[r]; - entry = &nn->irq_entries[r_vec->irq_idx]; - - /* Setup NAPI */ - netif_napi_add(nn->netdev, &r_vec->napi, - nfp_net_poll, NAPI_POLL_WEIGHT); - - snprintf(r_vec->name, sizeof(r_vec->name), - "%s-rxtx-%d", nn->netdev->name, r); - err = request_irq(entry->vector, r_vec->handler, 0, - r_vec->name, r_vec); - if (err) { - nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector); - goto err_napi_del; - } + nfp_net_rx_ring_bufs_free(nn, &rings[r]); + nfp_net_rx_ring_free(&rings[r]); + } - irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); + kfree(rings); +} - nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", - r, entry->vector, entry->entry); +static int +nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + int idx) +{ + struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; + int err; - /* Allocate TX ring resources */ - err = nfp_net_tx_ring_alloc(r_vec->tx_ring); - if (err) - goto err_free_irq; + r_vec->tx_ring = &nn->tx_rings[idx]; + nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx); - /* Allocate RX ring resources */ - err = nfp_net_rx_ring_alloc(r_vec->rx_ring); - if (err) - goto err_free_tx; + r_vec->rx_ring = &nn->rx_rings[idx]; + nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx); + + snprintf(r_vec->name, sizeof(r_vec->name), + "%s-rxtx-%d", nn->netdev->name, idx); + err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec); + if (err) { + nn_err(nn, "Error requesting IRQ %d\n", entry->vector); + return err; } + disable_irq(entry->vector); + + /* Setup NAPI */ + netif_napi_add(nn->netdev, &r_vec->napi, + nfp_net_poll, NAPI_POLL_WEIGHT); + + irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); + + nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry); return 0; +} + +static void +nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) +{ + struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; -err_free_tx: - nfp_net_tx_ring_free(r_vec->tx_ring); -err_free_irq: irq_set_affinity_hint(entry->vector, NULL); - free_irq(entry->vector, r_vec); -err_napi_del: netif_napi_del(&r_vec->napi); - __nfp_net_free_rings(nn, r); - return err; + free_irq(entry->vector, r_vec); } /** @@ -1646,6 +1860,17 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) get_unaligned_be16(nn->netdev->dev_addr + 4) << 16); } +static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx) +{ + nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0); + nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0); + + nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0); + nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0); +} + /** * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP * @nn: NFP Net device to reconfigure @@ -1653,6 +1878,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac) static void nfp_net_clear_config_and_disable(struct nfp_net *nn) { u32 new_ctrl, update; + unsigned int r; int err; new_ctrl = nn->ctrl; @@ -1669,79 +1895,40 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, update); - if (err) { + if (err) nn_err(nn, "Could not disable device: %d\n", err); - return; + + for (r = 0; r < nn->num_r_vecs; r++) { + nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring); + nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring); + nfp_net_vec_clear_ring_data(nn, r); } nn->ctrl = new_ctrl; } -/** - * nfp_net_start_vec() - Start ring vector - * @nn: NFP Net device structure - * @r_vec: Ring vector to be started - */ -static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec) +static void +nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, + unsigned int idx) { - unsigned int irq_vec; - int err = 0; - - irq_vec = nn->irq_entries[r_vec->irq_idx].vector; - - disable_irq(irq_vec); - - err = nfp_net_rx_fill_freelist(r_vec->rx_ring); - if (err) { - nn_err(nn, "RV%02d: couldn't allocate enough buffers\n", - r_vec->irq_idx); - goto out; - } - - napi_enable(&r_vec->napi); -out: - enable_irq(irq_vec); + /* Write the DMA address, size and MSI-X info to the device */ + nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma); + nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt)); + nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx); - return err; + nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma); + nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt)); + nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx); } -static int nfp_net_netdev_open(struct net_device *netdev) +static int __nfp_net_set_config_and_enable(struct nfp_net *nn) { - struct nfp_net *nn = netdev_priv(netdev); - int err, r; - u32 update = 0; - u32 new_ctrl; - - if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { - nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); - return -EBUSY; - } + u32 new_ctrl, update = 0; + unsigned int r; + int err; new_ctrl = nn->ctrl; - /* Step 1: Allocate resources for rings and the like - * - Request interrupts - * - Allocate RX and TX ring resources - * - Setup initial RSS table - */ - err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn", - nn->exn_name, sizeof(nn->exn_name), - NFP_NET_IRQ_EXN_IDX, nn->exn_handler); - if (err) - return err; - - err = nfp_net_alloc_rings(nn); - if (err) - goto err_free_exn; - - err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); - if (err) - goto err_free_rings; - - err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings); - if (err) - goto err_free_rings; - if (nn->cap & NFP_NET_CFG_CTRL_RSS) { nfp_net_rss_write_key(nn); nfp_net_rss_write_itbl(nn); @@ -1756,22 +1943,18 @@ static int nfp_net_netdev_open(struct net_device *netdev) update |= NFP_NET_CFG_UPDATE_IRQMOD; } - /* Step 2: Configure the NFP - * - Enable rings from 0 to tx_rings/rx_rings - 1. - * - Write MAC address (in case it changed) - * - Set the MTU - * - Set the Freelist buffer size - * - Enable the FW - */ + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r); + nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ? 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1); nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ? 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1); - nfp_net_write_mac_addr(nn, netdev->dev_addr); + nfp_net_write_mac_addr(nn, nn->netdev->dev_addr); - nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu); + nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu); nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz); /* Enable device */ @@ -1784,69 +1967,213 @@ static int nfp_net_netdev_open(struct net_device *netdev) nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); err = nfp_net_reconfig(nn, update); - if (err) - goto err_clear_config; nn->ctrl = new_ctrl; + for (r = 0; r < nn->num_r_vecs; r++) + nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring); + /* Since reconfiguration requests while NFP is down are ignored we * have to wipe the entire VXLAN configuration and reinitialize it. */ if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) { memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports)); memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt)); - vxlan_get_rx_port(netdev); + vxlan_get_rx_port(nn->netdev); } - /* Step 3: Enable for kernel - * - put some freelist descriptors on each RX ring - * - enable NAPI on each ring - * - enable all TX queues - * - set link state - */ + return err; +} + +/** + * nfp_net_set_config_and_enable() - Write control BAR and enable NFP + * @nn: NFP Net device to reconfigure + */ +static int nfp_net_set_config_and_enable(struct nfp_net *nn) +{ + int err; + + err = __nfp_net_set_config_and_enable(nn); + if (err) + nfp_net_clear_config_and_disable(nn); + + return err; +} + +/** + * nfp_net_open_stack() - Start the device from stack's perspective + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_open_stack(struct nfp_net *nn) +{ + unsigned int r; + for (r = 0; r < nn->num_r_vecs; r++) { - err = nfp_net_start_vec(nn, &nn->r_vecs[r]); - if (err) - goto err_disable_napi; + napi_enable(&nn->r_vecs[r].napi); + enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); } - netif_tx_wake_all_queues(netdev); + netif_tx_wake_all_queues(nn->netdev); + enable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + nfp_net_read_link_status(nn); +} + +static int nfp_net_netdev_open(struct net_device *netdev) +{ + struct nfp_net *nn = netdev_priv(netdev); + int err, r; + + if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { + nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl); + return -EBUSY; + } + + /* Step 1: Allocate resources for rings and the like + * - Request interrupts + * - Allocate RX and TX ring resources + * - Setup initial RSS table + */ + err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn", + nn->exn_name, sizeof(nn->exn_name), + NFP_NET_IRQ_EXN_IDX, nn->exn_handler); + if (err) + return err; err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc", nn->lsc_name, sizeof(nn->lsc_name), NFP_NET_IRQ_LSC_IDX, nn->lsc_handler); if (err) - goto err_stop_tx; - nfp_net_read_link_status(nn); + goto err_free_exn; + disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); - return 0; + nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings), + GFP_KERNEL); + if (!nn->rx_rings) + goto err_free_lsc; + nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings), + GFP_KERNEL); + if (!nn->tx_rings) + goto err_free_rx_rings; -err_stop_tx: - netif_tx_disable(netdev); - for (r = 0; r < nn->num_r_vecs; r++) - nfp_net_tx_flush(nn->r_vecs[r].tx_ring); -err_disable_napi: - while (r--) { - napi_disable(&nn->r_vecs[r].napi); - nfp_net_rx_flush(nn->r_vecs[r].rx_ring); + for (r = 0; r < nn->num_r_vecs; r++) { + err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); + if (err) + goto err_free_prev_vecs; + + err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt); + if (err) + goto err_cleanup_vec_p; + + err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring, + nn->fl_bufsz, nn->rxd_cnt); + if (err) + goto err_free_tx_ring_p; + + err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring); + if (err) + goto err_flush_rx_ring_p; } -err_clear_config: - nfp_net_clear_config_and_disable(nn); + + err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); + if (err) + goto err_free_rings; + + err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings); + if (err) + goto err_free_rings; + + /* Step 2: Configure the NFP + * - Enable rings from 0 to tx_rings/rx_rings - 1. + * - Write MAC address (in case it changed) + * - Set the MTU + * - Set the Freelist buffer size + * - Enable the FW + */ + err = nfp_net_set_config_and_enable(nn); + if (err) + goto err_free_rings; + + /* Step 3: Enable for kernel + * - put some freelist descriptors on each RX ring + * - enable NAPI on each ring + * - enable all TX queues + * - set link state + */ + nfp_net_open_stack(nn); + + return 0; + err_free_rings: - nfp_net_free_rings(nn); + r = nn->num_r_vecs; +err_free_prev_vecs: + while (r--) { + nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); +err_flush_rx_ring_p: + nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); +err_free_tx_ring_p: + nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); +err_cleanup_vec_p: + nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); + } + kfree(nn->tx_rings); +err_free_rx_rings: + kfree(nn->rx_rings); +err_free_lsc: + nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); err_free_exn: nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); return err; } /** + * nfp_net_close_stack() - Quiescent the stack (part of close) + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_close_stack(struct nfp_net *nn) +{ + unsigned int r; + + disable_irq(nn->irq_entries[NFP_NET_CFG_LSC].vector); + netif_carrier_off(nn->netdev); + nn->link_up = false; + + for (r = 0; r < nn->num_r_vecs; r++) { + disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector); + napi_disable(&nn->r_vecs[r].napi); + } + + netif_tx_disable(nn->netdev); +} + +/** + * nfp_net_close_free_all() - Free all runtime resources + * @nn: NFP Net device to reconfigure + */ +static void nfp_net_close_free_all(struct nfp_net *nn) +{ + unsigned int r; + + for (r = 0; r < nn->num_r_vecs; r++) { + nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); + nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); + nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); + nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); + } + + kfree(nn->rx_rings); + kfree(nn->tx_rings); + + nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); + nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); +} + +/** * nfp_net_netdev_close() - Called when the device is downed * @netdev: netdev structure */ static int nfp_net_netdev_close(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - int r; if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) { nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl); @@ -1855,14 +2182,7 @@ static int nfp_net_netdev_close(struct net_device *netdev) /* Step 1: Disable RX and TX rings from the Linux kernel perspective */ - nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); - netif_carrier_off(netdev); - nn->link_up = false; - - for (r = 0; r < nn->num_r_vecs; r++) - napi_disable(&nn->r_vecs[r].napi); - - netif_tx_disable(netdev); + nfp_net_close_stack(nn); /* Step 2: Tell NFP */ @@ -1870,13 +2190,7 @@ static int nfp_net_netdev_close(struct net_device *netdev) /* Step 3: Free resources */ - for (r = 0; r < nn->num_r_vecs; r++) { - nfp_net_rx_flush(nn->r_vecs[r].rx_ring); - nfp_net_tx_flush(nn->r_vecs[r].tx_ring); - } - - nfp_net_free_rings(nn); - nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); + nfp_net_close_free_all(nn); nn_dbg(nn, "%s down", netdev->name); return 0; @@ -1902,37 +2216,139 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) return; nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); - if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN)) - return; + nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN); nn->ctrl = new_ctrl; } static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) { + unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz; struct nfp_net *nn = netdev_priv(netdev); - u32 tmp; - - nn_dbg(nn, "New MTU = %d\n", new_mtu); + struct nfp_net_rx_ring *tmp_rings; + int err; if (new_mtu < 68 || new_mtu > nn->max_mtu) { nn_err(nn, "New MTU (%d) is not valid\n", new_mtu); return -EINVAL; } + old_mtu = netdev->mtu; + old_fl_bufsz = nn->fl_bufsz; + new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu; + + if (!netif_running(netdev)) { + netdev->mtu = new_mtu; + nn->fl_bufsz = new_fl_bufsz; + return 0; + } + + /* Prepare new rings */ + tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz, + nn->rxd_cnt); + if (!tmp_rings) + return -ENOMEM; + + /* Stop device, swap in new rings, try to start the firmware */ + nfp_net_close_stack(nn); + nfp_net_clear_config_and_disable(nn); + + tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings); + netdev->mtu = new_mtu; + nn->fl_bufsz = new_fl_bufsz; - /* Freelist buffer size rounded up to the nearest 1K */ - tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND; - nn->fl_bufsz = roundup(tmp, 1024); + err = nfp_net_set_config_and_enable(nn); + if (err) { + const int err_new = err; - /* restart if running */ - if (netif_running(netdev)) { - nfp_net_netdev_close(netdev); - nfp_net_netdev_open(netdev); + /* Try with old configuration and old rings */ + tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings); + + netdev->mtu = old_mtu; + nn->fl_bufsz = old_fl_bufsz; + + err = __nfp_net_set_config_and_enable(nn); + if (err) + nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n", + err_new, err); } - return 0; + nfp_net_shadow_rx_rings_free(nn, tmp_rings); + + nfp_net_open_stack(nn); + + return err; +} + +int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) +{ + struct nfp_net_tx_ring *tx_rings = NULL; + struct nfp_net_rx_ring *rx_rings = NULL; + u32 old_rxd_cnt, old_txd_cnt; + int err; + + if (!netif_running(nn->netdev)) { + nn->rxd_cnt = rxd_cnt; + nn->txd_cnt = txd_cnt; + return 0; + } + + old_rxd_cnt = nn->rxd_cnt; + old_txd_cnt = nn->txd_cnt; + + /* Prepare new rings */ + if (nn->rxd_cnt != rxd_cnt) { + rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz, + rxd_cnt); + if (!rx_rings) + return -ENOMEM; + } + if (nn->txd_cnt != txd_cnt) { + tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt); + if (!tx_rings) { + nfp_net_shadow_rx_rings_free(nn, rx_rings); + return -ENOMEM; + } + } + + /* Stop device, swap in new rings, try to start the firmware */ + nfp_net_close_stack(nn); + nfp_net_clear_config_and_disable(nn); + + if (rx_rings) + rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings); + if (tx_rings) + tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings); + + nn->rxd_cnt = rxd_cnt; + nn->txd_cnt = txd_cnt; + + err = nfp_net_set_config_and_enable(nn); + if (err) { + const int err_new = err; + + /* Try with old configuration and old rings */ + if (rx_rings) + rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings); + if (tx_rings) + tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings); + + nn->rxd_cnt = old_rxd_cnt; + nn->txd_cnt = old_txd_cnt; + + err = __nfp_net_set_config_and_enable(nn); + if (err) + nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", + err_new, err); + } + + nfp_net_shadow_rx_rings_free(nn, rx_rings); + nfp_net_shadow_tx_rings_free(nn, tx_rings); + + nfp_net_open_stack(nn); + + return err; } static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, @@ -2108,7 +2524,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | be16_to_cpu(nn->vxlan_ports[i])); - nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN); + nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN); } /** @@ -2254,6 +2670,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->link_status_lock); + setup_timer(&nn->reconfig_timer, + nfp_net_reconfig_timer, (unsigned long)nn); + return nn; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 8692003aeed8..ad6c4e31cedd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -81,14 +81,10 @@ /** * @NFP_NET_TXR_MAX: Maximum number of TX rings - * @NFP_NET_TXR_MASK: Mask for TX rings * @NFP_NET_RXR_MAX: Maximum number of RX rings - * @NFP_NET_RXR_MASK: Mask for RX rings */ #define NFP_NET_TXR_MAX 64 -#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1) #define NFP_NET_RXR_MAX 64 -#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1) /** * Read/Write config words (0x0000 - 0x002c) @@ -152,9 +148,9 @@ * @NFP_NET_CFG_VERSION: Firmware version number * @NFP_NET_CFG_STS: Status * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) - * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings - * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings - * @NFP_NET_MAX_MTU: Maximum support MTU + * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings + * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings + * @NFP_NET_CFG_MAX_MTU: Maximum support MTU * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) * diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 4c97c713121c..f7c9a5bc4aa3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -40,8 +40,9 @@ static struct dentry *nfp_dir; static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) { - struct nfp_net_rx_ring *rx_ring = file->private; int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt; + struct nfp_net_r_vector *r_vec = file->private; + struct nfp_net_rx_ring *rx_ring; struct nfp_net_rx_desc *rxd; struct sk_buff *skb; struct nfp_net *nn; @@ -49,9 +50,10 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) rtnl_lock(); - if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net) + if (!r_vec->nfp_net || !r_vec->rx_ring) goto out; - nn = rx_ring->r_vec->nfp_net; + nn = r_vec->nfp_net; + rx_ring = r_vec->rx_ring; if (!netif_running(nn->netdev)) goto out; @@ -115,7 +117,8 @@ static const struct file_operations nfp_rx_q_fops = { static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) { - struct nfp_net_tx_ring *tx_ring = file->private; + struct nfp_net_r_vector *r_vec = file->private; + struct nfp_net_tx_ring *tx_ring; struct nfp_net_tx_desc *txd; int d_rd_p, d_wr_p, txd_cnt; struct sk_buff *skb; @@ -124,9 +127,10 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) rtnl_lock(); - if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net) + if (!r_vec->nfp_net || !r_vec->tx_ring) goto out; - nn = tx_ring->r_vec->nfp_net; + nn = r_vec->nfp_net; + tx_ring = r_vec->tx_ring; if (!netif_running(nn->netdev)) goto out; @@ -183,7 +187,7 @@ static const struct file_operations nfp_tx_q_fops = { void nfp_net_debugfs_adapter_add(struct nfp_net *nn) { - static struct dentry *queues, *tx, *rx; + struct dentry *queues, *tx, *rx; char int_name[16]; int i; @@ -196,7 +200,7 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) /* Create queue debugging sub-tree */ queues = debugfs_create_dir("queue", nn->debugfs_dir); - if (IS_ERR_OR_NULL(nn->debugfs_dir)) + if (IS_ERR_OR_NULL(queues)) return; rx = debugfs_create_dir("rx", queues); @@ -207,13 +211,13 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) for (i = 0; i < nn->num_rx_rings; i++) { sprintf(int_name, "%d", i); debugfs_create_file(int_name, S_IRUSR, rx, - &nn->rx_rings[i], &nfp_rx_q_fops); + &nn->r_vecs[i], &nfp_rx_q_fops); } for (i = 0; i < nn->num_tx_rings; i++) { sprintf(int_name, "%d", i); debugfs_create_file(int_name, S_IRUSR, tx, - &nn->tx_rings[i], &nfp_tx_q_fops); + &nn->r_vecs[i], &nfp_tx_q_fops); } } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 9a4084a68db5..ccfef1f17627 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -153,37 +153,25 @@ static int nfp_net_set_ringparam(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); u32 rxd_cnt, txd_cnt; - if (netif_running(netdev)) { - /* Some NIC drivers allow reconfiguration on the fly, - * some down the interface, change and then up it - * again. For now we don't allow changes when the - * device is up. - */ - nn_warn(nn, "Can't change rings while device is up\n"); - return -EBUSY; - } - /* We don't have separate queues/rings for small/large frames. */ if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; /* Round up to supported values */ rxd_cnt = roundup_pow_of_two(ring->rx_pending); - rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS); - rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS); - txd_cnt = roundup_pow_of_two(ring->tx_pending); - txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS); - txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS); - if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt) - nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", - nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS || + txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) + return -EINVAL; - nn->rxd_cnt = rxd_cnt; - nn->txd_cnt = txd_cnt; + if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) + return 0; - return 0; + nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", + nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); + + return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } static void nfp_net_get_strings(struct net_device *netdev, diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c index 9fbc30264237..adbc47f2d132 100644 --- a/drivers/net/ethernet/netx-eth.c +++ b/drivers/net/ethernet/netx-eth.c @@ -313,7 +313,8 @@ static int netx_eth_enable(struct net_device *ndev) { struct netx_eth_priv *priv = netdev_priv(ndev); unsigned int mac4321, mac65; - int running, i; + int running, i, ret; + bool inv_mac_addr = false; ndev->netdev_ops = &netx_eth_netdev_ops; ndev->watchdog_timeo = msecs_to_jiffies(5000); @@ -358,15 +359,18 @@ static int netx_eth_enable(struct net_device *ndev) xc_start(priv->xc); if (!is_valid_ether_addr(ndev->dev_addr)) - printk("%s: Invalid ethernet MAC address. Please " - "set using ifconfig\n", ndev->name); + inv_mac_addr = true; for (i=2; i<=18; i++) pfifo_push(EMPTY_PTR_FIFO(priv->id), FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id)); - return register_netdev(ndev); + ret = register_netdev(ndev); + if (inv_mac_addr) + printk("%s: Invalid ethernet MAC address. Please set using ip\n", + ndev->name); + return ret; } static int netx_eth_drv_probe(struct platform_device *pdev) diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 52d9a94aebb9..87b7b814778b 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -476,7 +476,7 @@ static void w90p910_reset_mac(struct net_device *dev) w90p910_init_desc(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ ether->cur_tx = 0x0; ether->finish_tx = 0x0; ether->cur_rx = 0x0; @@ -490,7 +490,7 @@ static void w90p910_reset_mac(struct net_device *dev) w90p910_trigger_tx(dev); w90p910_trigger_rx(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ if (netif_queue_stopped(dev)) netif_wake_queue(dev); diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 2a55d6d53ee6..8d710a3b4db0 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -481,7 +481,6 @@ struct pch_gbe_buffer { /** * struct pch_gbe_tx_ring - tx ring information - * @tx_lock: spinlock structs * @desc: pointer to the descriptor ring memory * @dma: physical address of the descriptor ring * @size: length of descriptor ring in bytes @@ -491,7 +490,6 @@ struct pch_gbe_buffer { * @buffer_info: array of buffer information structs */ struct pch_gbe_tx_ring { - spinlock_t tx_lock; struct pch_gbe_tx_desc *desc; dma_addr_t dma; unsigned int size; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 3b98b263bad0..3cd87a41ac92 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1640,7 +1640,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, cleaned_count); if (cleaned_count > 0) { /*skip this if nothing cleaned*/ /* Recover from running out of Tx resources in xmit_frame */ - spin_lock(&tx_ring->tx_lock); + netif_tx_lock(adapter->netdev); if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev)))) { netif_wake_queue(adapter->netdev); @@ -1652,7 +1652,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, netdev_dbg(adapter->netdev, "next_to_clean : %d\n", tx_ring->next_to_clean); - spin_unlock(&tx_ring->tx_lock); + netif_tx_unlock(adapter->netdev); } return cleaned; } @@ -1805,7 +1805,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - spin_lock_init(&tx_ring->tx_lock); for (desNo = 0; desNo < tx_ring->count; desNo++) { tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo); @@ -2135,15 +2134,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; - unsigned long flags; - if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { - /* Collision - tell upper layer to requeue */ - return NETDEV_TX_LOCKED; - } if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { netif_stop_queue(netdev); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); netdev_dbg(netdev, "Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n", tx_ring->next_to_use, tx_ring->next_to_clean); @@ -2152,7 +2145,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* CRC,ITAG no support */ pch_gbe_tx_queue(adapter, tx_ring, skb); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 13d88a6025c8..91be2f02ef1c 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -1144,7 +1144,7 @@ static void hamachi_tx_timeout(struct net_device *dev) hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); /* Trigger an immediate transmit demand. */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; /* Restart the chip's Tx/Rx processes . */ diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index fa2db41e02f8..fb1d1031b091 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -714,7 +714,7 @@ static void yellowfin_tx_timeout(struct net_device *dev) if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) netif_wake_queue (dev); /* Typical path */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; } diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index ddcfcab034c2..680d8c736d2b 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -98,9 +98,40 @@ config QED ---help--- This enables the support for ... +config QED_SRIOV + bool "QLogic QED 25/40/100Gb SR-IOV support" + depends on QED && PCI_IOV + default y + ---help--- + This configuration parameter enables Single Root Input Output + Virtualization support for QED devices. + This allows for virtual function acceleration in virtualized + environments. + config QEDE tristate "QLogic QED 25/40/100Gb Ethernet NIC" depends on QED ---help--- This enables the support for ... + +config QEDE_VXLAN + bool "Virtual eXtensible Local Area Network support" + default n + depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m) + ---help--- + This enables hardware offload support for VXLAN protocol over + qede module. Say Y here if you want to enable hardware offload + support for Virtual eXtensible Local Area Network (VXLAN) + in the driver. + +config QEDE_GENEVE + bool "Generic Network Virtualization Encapsulation (GENEVE) support" + depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m) + ---help--- + This allows one to create GENEVE virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. GENEVE is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to enable hardware offload support for + Generic Network Virtualization Encapsulation (GENEVE) in the driver. + endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c index db80eb1c6d4f..2b10f1bcd151 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c @@ -1015,20 +1015,24 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base, { int i, v, addr; __le32 *ptr32; + int ret; addr = base; ptr32 = buf; for (i = 0; i < size / sizeof(u32); i++) { - if (netxen_rom_fast_read(adapter, addr, &v) == -1) - return -1; + ret = netxen_rom_fast_read(adapter, addr, &v); + if (ret) + return ret; + *ptr32 = cpu_to_le32(v); ptr32++; addr += sizeof(u32); } if ((char *)buf + size > (char *)ptr32) { __le32 local; - if (netxen_rom_fast_read(adapter, addr, &v) == -1) - return -1; + ret = netxen_rom_fast_read(adapter, addr, &v); + if (ret) + return ret; local = cpu_to_le32(v); memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32); } @@ -1940,7 +1944,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) if (adapter->phy_read && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, - &autoneg) != 0) + &autoneg) == 0) adapter->link_autoneg = autoneg; } else goto link_down; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index fd362b6923f4..7a0281a36c28 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -852,7 +852,8 @@ netxen_check_options(struct netxen_adapter *adapter) ptr32 = (__le32 *)&serial_num; offset = NX_FW_SERIAL_NUM_OFFSET; for (i = 0; i < 8; i++) { - if (netxen_rom_fast_read(adapter, offset, &val) == -1) { + err = netxen_rom_fast_read(adapter, offset, &val); + if (err) { dev_err(&pdev->dev, "error reading board info\n"); adapter->driver_mismatch = 1; return; @@ -2285,7 +2286,7 @@ static void netxen_tx_timeout_task(struct work_struct *work) goto request_reset; } } - adapter->netdev->trans_start = jiffies; + netif_trans_update(adapter->netdev); rtnl_unlock(); return; diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 5c2fd57236fe..d1f157e439cf 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -1,4 +1,6 @@ obj-$(CONFIG_QED) := qed.o qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ - qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o + qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ + qed_selftest.o qed_dcbx.o +qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index fcb8e9ba51d9..1042f2af854a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -26,12 +26,14 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.7.0.0" +#define DRV_MODULE_VERSION "8.7.1.20" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 #define VER_SIZE 16 +#define QED_WFQ_UNIT 100 + /* cau states */ enum qed_coalescing_mode { QED_COAL_MODE_DISABLE, @@ -74,6 +76,51 @@ struct qed_rt_data { bool *b_valid; }; +enum qed_tunn_mode { + QED_MODE_L2GENEVE_TUNN, + QED_MODE_IPGENEVE_TUNN, + QED_MODE_L2GRE_TUNN, + QED_MODE_IPGRE_TUNN, + QED_MODE_VXLAN_TUNN, +}; + +enum qed_tunn_clss { + QED_TUNN_CLSS_MAC_VLAN, + QED_TUNN_CLSS_MAC_VNI, + QED_TUNN_CLSS_INNER_MAC_VLAN, + QED_TUNN_CLSS_INNER_MAC_VNI, + MAX_QED_TUNN_CLSS, +}; + +struct qed_tunn_start_params { + unsigned long tunn_mode; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 update_vxlan_udp_port; + u8 update_geneve_udp_port; + u8 tunn_clss_vxlan; + u8 tunn_clss_l2geneve; + u8 tunn_clss_ipgeneve; + u8 tunn_clss_l2gre; + u8 tunn_clss_ipgre; +}; + +struct qed_tunn_update_params { + unsigned long tunn_mode_update_mask; + unsigned long tunn_mode; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 update_vxlan_udp_port; + u8 update_geneve_udp_port; + u8 tunn_clss_vxlan; + u8 tunn_clss_l2geneve; + u8 tunn_clss_ipgeneve; + u8 tunn_clss_l2gre; + u8 tunn_clss_ipgre; +}; + /* The PCI personality is not quite synonymous to protocol ID: * 1. All personalities need CORE connections * 2. The Ethernet personality may support also the RoCE protocol @@ -105,6 +152,7 @@ enum QED_RESOURCES { enum QED_FEATURE { QED_PF_L2_QUE, + QED_VF, QED_MAX_FEATURES, }; @@ -192,6 +240,12 @@ struct qed_dmae_info { struct dmae_cmd *p_dmae_cmd; }; +struct qed_wfq_data { + /* when feature is configured for at least 1 vport */ + u32 min_speed; + bool configured; +}; + struct qed_qm_info { struct init_qm_pq_params *qm_pq_params; struct init_qm_vport_params *qm_vport_params; @@ -212,6 +266,7 @@ struct qed_qm_info { bool vport_wfq_en; u8 pf_wfq; u32 pf_rl; + struct qed_wfq_data *wfq_data; }; struct storm_stats { @@ -256,6 +311,8 @@ struct qed_hwfn { bool first_on_engine; bool hw_init_done; + u8 num_funcs_on_engine; + /* BAR access */ void __iomem *regview; void __iomem *doorbells; @@ -306,8 +363,12 @@ struct qed_hwfn { /* True if the driver requests for the link */ bool b_drv_link_init; + struct qed_vf_iov *vf_iov_info; + struct qed_pf_iov *pf_iov_info; struct qed_mcp_info *mcp_info; + struct qed_dcbx_info *p_dcbx_info; + struct qed_hw_cid_data *p_tx_cids; struct qed_hw_cid_data *p_rx_cids; @@ -322,6 +383,12 @@ struct qed_hwfn { struct qed_simd_fp_handler simd_proto_handler[64]; +#ifdef CONFIG_QED_SRIOV + struct workqueue_struct *iov_wq; + struct delayed_work iov_task; + unsigned long iov_task_flags; +#endif + struct z_stream_s *stream; }; @@ -430,6 +497,13 @@ struct qed_dev { u8 num_hwfns; struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; + /* SRIOV */ + struct qed_hw_sriov_info *p_iov_info; +#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) + + unsigned long tunn_mode; + + bool b_is_vf; u32 drv_type; struct qed_eth_stats *reset_stats; @@ -459,6 +533,8 @@ struct qed_dev { const struct firmware *firmware; }; +#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB +#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB @@ -480,6 +556,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, #define PURE_LB_TC 8 +int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate); + +void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) /* Other Linux specific common definitions */ @@ -507,6 +587,4 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn, int qed_slowpath_irq_req(struct qed_hwfn *hwfn); -#define QED_ETH_INTERFACE_VERSION 300 - #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index fc767c07a264..ac284c58d8c2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -24,11 +24,13 @@ #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_reg_addr.h" +#include "qed_sriov.h" /* Max number of connection types in HW (DQ/CDU etc.) */ #define MAX_CONN_TYPES PROTOCOLID_COMMON #define NUM_TASK_TYPES 2 #define NUM_TASK_PF_SEGMENTS 4 +#define NUM_TASK_VF_SEGMENTS 1 /* QM constants */ #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ @@ -63,10 +65,12 @@ union conn_context { struct qed_conn_type_cfg { u32 cid_count; u32 cid_start; + u32 cids_per_vf; }; /* ILT Client configuration, Per connection type (protocol) resources. */ #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) +#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) #define CDUC_BLK (0) enum ilt_clients { @@ -97,6 +101,10 @@ struct qed_ilt_client_cfg { /* ILT client blocks for PF */ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; u32 pf_total_lines; + + /* ILT client blocks for VFs */ + struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS]; + u32 vf_total_lines; }; /* Per Path - @@ -123,6 +131,11 @@ struct qed_cxt_mngr { /* computed ILT structure */ struct qed_ilt_client_cfg clients[ILT_CLI_MAX]; + /* total number of VFs for this hwfn - + * ALL VFs are symmetric in terms of HW resources + */ + u32 vf_count; + /* Acquired CIDs */ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; @@ -131,37 +144,60 @@ struct qed_cxt_mngr { u32 pf_start_line; }; -static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr) -{ - u32 type, pf_cids = 0; +/* counts the iids for the CDU/CDUC ILT client configuration */ +struct qed_cdu_iids { + u32 pf_cids; + u32 per_vf_cids; +}; - for (type = 0; type < MAX_CONN_TYPES; type++) - pf_cids += p_mngr->conn_cfg[type].cid_count; +static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr, + struct qed_cdu_iids *iids) +{ + u32 type; - return pf_cids; + for (type = 0; type < MAX_CONN_TYPES; type++) { + iids->pf_cids += p_mngr->conn_cfg[type].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + } } static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, struct qed_qm_iids *iids) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - int type; + u32 vf_cids = 0, type; - for (type = 0; type < MAX_CONN_TYPES; type++) + for (type = 0; type < MAX_CONN_TYPES; type++) { iids->cids += p_mngr->conn_cfg[type].cid_count; + vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + } - DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids); + iids->vf_cids += vf_cids * p_mngr->vf_count; + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "iids: CIDS %08x vf_cids %08x\n", + iids->cids, iids->vf_cids); } /* set the iids count per protocol */ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, - u32 cid_count) + u32 cid_count, u32 vf_cid_cnt) { struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); + p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN); +} + +u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, + u32 *vf_cid) +{ + if (vf_cid) + *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; + + return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; } static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, @@ -210,10 +246,12 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; - u32 curr_line, total, pf_cids; + struct qed_cdu_iids cdu_iids; struct qed_qm_iids qm_iids; + u32 curr_line, total, i; memset(&qm_iids, 0, sizeof(qm_iids)); + memset(&cdu_iids, 0, sizeof(cdu_iids)); p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); @@ -224,14 +262,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) /* CDUC */ p_cli = &p_mngr->clients[ILT_CLI_CDUC]; curr_line = p_mngr->pf_start_line; + + /* CDUC PF */ p_cli->pf_total_lines = 0; /* get the counters for the CDUC and QM clients */ - pf_cids = qed_cxt_cdu_iids(p_mngr); + qed_cxt_cdu_iids(p_mngr, &cdu_iids); p_blk = &p_cli->pf_blks[CDUC_BLK]; - total = pf_cids * CONN_CXT_SIZE(p_hwfn); + total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, CONN_CXT_SIZE(p_hwfn)); @@ -239,17 +279,36 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn) qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); p_cli->pf_total_lines = curr_line - p_blk->start_line; + /* CDUC VF */ + p_blk = &p_cli->vf_blks[CDUC_BLK]; + total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); + + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->vf_total_lines = curr_line - p_blk->start_line; + + for (i = 1; i < p_mngr->vf_count; i++) + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUC); + /* QM */ p_cli = &p_mngr->clients[ILT_CLI_QM]; p_blk = &p_cli->pf_blks[0]; qed_cxt_qm_iids(p_hwfn, &qm_iids); - total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0, - p_hwfn->qm_info.num_pqs, 0); - - DP_VERBOSE(p_hwfn, QED_MSG_ILT, - "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n", - qm_iids.cids, p_hwfn->qm_info.num_pqs, total); + total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, + qm_iids.vf_cids, 0, + p_hwfn->qm_info.num_pqs, + p_hwfn->qm_info.num_vf_pqs); + + DP_VERBOSE(p_hwfn, + QED_MSG_ILT, + "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", + qm_iids.cids, + qm_iids.vf_cids, + p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000, @@ -358,7 +417,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; struct qed_ilt_client_cfg *clients = p_mngr->clients; struct qed_ilt_cli_blk *p_blk; - u32 size, i, j; + u32 size, i, j, k; int rc; size = qed_cxt_ilt_shadow_size(clients); @@ -383,6 +442,16 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) if (rc != 0) goto ilt_shadow_fail; } + for (k = 0; k < p_mngr->vf_count; k++) { + for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) { + u32 lines = clients[i].vf_total_lines * k; + + p_blk = &clients[i].vf_blks[j]; + rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines); + if (rc != 0) + goto ilt_shadow_fail; + } + } } return 0; @@ -467,6 +536,9 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) for (i = 0; i < ILT_CLI_MAX; i++) p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + if (p_hwfn->cdev->p_iov_info) + p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; + /* Set the cxt mangr pointer priori to further allocations */ p_hwfn->p_cxt_mngr = p_mngr; @@ -579,8 +651,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn) params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.is_first_pf = p_hwfn->first_on_engine; params.num_pf_cids = iids.cids; + params.num_vf_cids = iids.vf_cids; params.start_pq = qm_info->start_pq; - params.num_pf_pqs = qm_info->num_pqs; + params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs; + params.num_vf_pqs = qm_info->num_vf_pqs; params.start_vport = qm_info->start_vport; params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; @@ -610,26 +684,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn) static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; - u32 dq_pf_max_cid = 0; + u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0; dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); + dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); + dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); + dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); + dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); - /* 5 - PF */ + dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid); + dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid); + + /* Connection types 6 & 7 are not in use, yet they must be configured + * as the highest possible connection. Not configuring them means the + * defaults will be used, and with a large number of cids a bug may + * occur, if the defaults will be smaller than dq_pf_max_cid / + * dq_vf_max_cid. + */ + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid); + + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid); } static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) @@ -653,6 +756,38 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) } } +static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + u32 blk_factor; + + /* For simplicty we set the 'block' to be an ILT page */ + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_BASE_RT_OFFSET, + p_iov->first_vf_in_pf); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET, + p_iov->first_vf_in_pf + p_iov->total_vfs); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } +} + /* ILT (PSWRQ2) PF */ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) { @@ -662,6 +797,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) u32 line, rt_offst, i; qed_ilt_bounds_init(p_hwfn); + qed_ilt_vf_bounds_init(p_hwfn); p_mngr = p_hwfn->p_cxt_mngr; p_shdw = p_mngr->ilt_shadow; @@ -839,10 +975,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn) /* Set the number of required CORE connections */ u32 core_cids = 1; /* SPQ */ - qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids); + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, - p_params->num_cons); + p_params->num_cons, 1); return 0; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index c8e1f5e5c42b..234c0fa8db2a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -51,6 +51,9 @@ enum qed_cxt_elem_type { QED_ELEM_TASK }; +u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid); + /** * @brief qed_cxt_set_pf_params - Set the PF params for cxt init * @@ -128,6 +131,16 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn); void qed_qm_init_pf(struct qed_hwfn *p_hwfn); /** + * @brief Reconfigures QM pf on the fly + * + * @param p_hwfn + * @param p_ptt + * + * @return int + */ +int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** * @brief qed_cxt_release - Release a cid * * @param p_hwfn diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c new file mode 100644 index 000000000000..cbf58e1f9333 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -0,0 +1,562 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dcbx.h" +#include "qed_hsi.h" +#include "qed_sp.h" + +#define QED_DCBX_MAX_MIB_READ_TRY (100) +#define QED_ETH_TYPE_DEFAULT (0) +#define QED_ETH_TYPE_ROCE (0x8915) +#define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7) +#define QED_ETH_TYPE_FCOE (0x8906) +#define QED_TCP_PORT_ISCSI (0xCBC) + +#define QED_DCBX_INVALID_PRIORITY 0xFF + +/* Get Traffic Class from priority traffic class table, 4 bits represent + * the traffic class corresponding to the priority. + */ +#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \ + ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) + +static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = { + {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT}, + {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT}, + {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT}, + {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT}, + {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH} +}; + +static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) +{ + return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_ETHTYPE); +} + +static bool qed_dcbx_app_port(u32 app_info_bitmap) +{ + return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_PORT); +} + +static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_ethtype(app_info_bitmap) && + proto_id == QED_ETH_TYPE_DEFAULT); +} + +static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_port(app_info_bitmap) && + proto_id == QED_TCP_PORT_ISCSI); +} + +static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_ethtype(app_info_bitmap) && + proto_id == QED_ETH_TYPE_FCOE); +} + +static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_ethtype(app_info_bitmap) && + proto_id == QED_ETH_TYPE_ROCE); +} + +static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return !!(qed_dcbx_app_port(app_info_bitmap) && + proto_id == QED_UDP_PORT_TYPE_ROCE_V2); +} + +static void +qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) +{ + enum dcbx_protocol_type id; + int i; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n", + p_data->dcbx_enabled); + + for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { + id = qed_dcbx_app_update[i].id; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n", + qed_dcbx_app_update[i].name, p_data->arr[id].update, + p_data->arr[id].enable, p_data->arr[id].priority, + p_data->arr[id].tc, p_hwfn->hw_info.num_tc); + } +} + +static void +qed_dcbx_set_params(struct qed_dcbx_results *p_data, + struct qed_hw_info *p_info, + bool enable, + bool update, + u8 prio, + u8 tc, + enum dcbx_protocol_type type, + enum qed_pci_personality personality) +{ + /* PF update ramrod data */ + p_data->arr[type].update = update; + p_data->arr[type].enable = enable; + p_data->arr[type].priority = prio; + p_data->arr[type].tc = tc; + + /* QM reconf data */ + if (p_info->personality == personality) { + if (personality == QED_PCI_ETH) + p_info->non_offload_tc = tc; + else + p_info->offload_tc = tc; + } +} + +/* Update app protocol data and hw_info fields with the TLV info */ +static void +qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, + struct qed_hwfn *p_hwfn, + bool enable, + bool update, + u8 prio, u8 tc, enum dcbx_protocol_type type) +{ + struct qed_hw_info *p_info = &p_hwfn->hw_info; + enum qed_pci_personality personality; + enum dcbx_protocol_type id; + char *name; + int i; + + for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { + id = qed_dcbx_app_update[i].id; + + if (type != id) + continue; + + personality = qed_dcbx_app_update[i].personality; + name = qed_dcbx_app_update[i].name; + + qed_dcbx_set_params(p_data, p_info, enable, update, + prio, tc, type, personality); + } +} + +static bool +qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, + u32 app_prio_bitmap, + u16 id, enum dcbx_protocol_type *type) +{ + if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_FCOE; + } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ROCE; + } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ISCSI; + } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ETH; + } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ROCE_V2; + } else { + *type = DCBX_MAX_PROTOCOL_TYPE; + DP_ERR(p_hwfn, + "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n", + id, app_prio_bitmap); + return false; + } + + return true; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static int +qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, + struct qed_dcbx_results *p_data, + struct dcbx_app_priority_entry *p_tbl, + u32 pri_tc_tbl, int count, bool dcbx_enabled) +{ + u8 tc, priority, priority_map; + enum dcbx_protocol_type type; + u16 protocol_id; + bool enable; + int i; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); + + /* Parse APP TLV */ + for (i = 0; i < count; i++) { + protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PRI_MAP); + priority = ffs(priority_map) - 1; + if (priority < 0) { + DP_ERR(p_hwfn, "Invalid priority\n"); + return -EINVAL; + } + + tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); + if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + protocol_id, &type)) { + /* ETH always have the enable bit reset, as it gets + * vlan information per packet. For other protocols, + * should be set according to the dcbx_enabled + * indication, but we only got here if there was an + * app tlv for the protocol, so dcbx must be enabled. + */ + enable = !!(type == DCBX_PROTOCOL_ETH); + + qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, + priority, tc, type); + } + } + + /* If RoCE-V2 TLV is not detected, driver need to use RoCE app + * data for RoCE-v2 not the default app data. + */ + if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update && + p_data->arr[DCBX_PROTOCOL_ROCE].update) { + tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc; + priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority; + qed_dcbx_update_app_info(p_data, p_hwfn, true, true, + priority, tc, DCBX_PROTOCOL_ROCE_V2); + } + + /* Update ramrod protocol data and hw_info fields + * with default info when corresponding APP TLV's are not detected. + * The enabled field has a different logic for ethernet as only for + * ethernet dcb should disabled by default, as the information arrives + * from the OS (unless an explicit app tlv was present). + */ + tc = p_data->arr[DCBX_PROTOCOL_ETH].tc; + priority = p_data->arr[DCBX_PROTOCOL_ETH].priority; + for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) { + if (p_data->arr[type].update) + continue; + + enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled; + qed_dcbx_update_app_info(p_data, p_hwfn, enable, true, + priority, tc, type); + } + + return 0; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) +{ + struct dcbx_app_priority_feature *p_app; + struct dcbx_app_priority_entry *p_tbl; + struct qed_dcbx_results data = { 0 }; + struct dcbx_ets_feature *p_ets; + struct qed_hw_info *p_info; + u32 pri_tc_tbl, flags; + bool dcbx_enabled; + int num_entries; + int rc = 0; + + /* If DCBx version is non zero, then negotiation was + * successfuly performed + */ + flags = p_hwfn->p_dcbx_info->operational.flags; + dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); + + p_app = &p_hwfn->p_dcbx_info->operational.features.app; + p_tbl = p_app->app_pri_tbl; + + p_ets = &p_hwfn->p_dcbx_info->operational.features.ets; + pri_tc_tbl = p_ets->pri_tc_tbl[0]; + + p_info = &p_hwfn->hw_info; + num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); + + rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, + num_entries, dcbx_enabled); + if (rc) + return rc; + + p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); + data.pf_id = p_hwfn->rel_pf_id; + data.dcbx_enabled = dcbx_enabled; + + qed_dcbx_dp_protocol(p_hwfn, &data); + + memcpy(&p_hwfn->p_dcbx_info->results, &data, + sizeof(struct qed_dcbx_results)); + + return 0; +} + +static int +qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_mib_meta_data *p_data, + enum qed_mib_read_type type) +{ + u32 prefix_seq_num, suffix_seq_num; + int read_count = 0; + int rc = 0; + + /* The data is considered to be valid only if both sequence numbers are + * the same. + */ + do { + if (type == QED_DCBX_REMOTE_LLDP_MIB) { + qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote, + p_data->addr, p_data->size); + prefix_seq_num = p_data->lldp_remote->prefix_seq_num; + suffix_seq_num = p_data->lldp_remote->suffix_seq_num; + } else { + qed_memcpy_from(p_hwfn, p_ptt, p_data->mib, + p_data->addr, p_data->size); + prefix_seq_num = p_data->mib->prefix_seq_num; + suffix_seq_num = p_data->mib->suffix_seq_num; + } + read_count++; + + DP_VERBOSE(p_hwfn, + QED_MSG_DCB, + "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + } while ((prefix_seq_num != suffix_seq_num) && + (read_count < QED_DCBX_MAX_MIB_READ_TRY)); + + if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) { + DP_ERR(p_hwfn, + "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + rc = -EIO; + } + + return rc; +} + +static int +qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_config_params); + data.lldp_local = p_hwfn->p_dcbx_info->lldp_local; + data.size = sizeof(struct lldp_config_params_s); + qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size); + + return rc; +} + +static int +qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_status_params); + data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote; + data.size = sizeof(struct lldp_status_params_s); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, operational_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->operational; + data.size = sizeof(struct dcbx_mib); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, remote_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->remote; + data.size = sizeof(struct dcbx_mib); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &p_hwfn->p_dcbx_info->local_admin; + data.size = sizeof(struct dcbx_local_params); + qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size); + + return rc; +} + +static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + int rc = -EINVAL; + + switch (type) { + case QED_DCBX_OPERATIONAL_MIB: + rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_REMOTE_MIB: + rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_LOCAL_MIB: + rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt); + break; + case QED_DCBX_REMOTE_LLDP_MIB: + rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_LOCAL_LLDP_MIB: + rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + } + + return rc; +} + +/* Read updated MIB. + * Reconfigure QM and invoke PF update ramrod command if operational MIB + * change is detected. + */ +int +qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + int rc = 0; + + rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc) + return rc; + + if (type == QED_DCBX_OPERATIONAL_MIB) { + rc = qed_dcbx_process_mib_info(p_hwfn); + if (!rc) { + /* reconfigure tcs of QM queues according + * to negotiation results + */ + qed_qm_reconf(p_hwfn, p_ptt); + + /* update storm FW with negotiation results */ + qed_sp_pf_update(p_hwfn); + } + } + + return rc; +} + +int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn) +{ + int rc = 0; + + p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL); + if (!p_hwfn->p_dcbx_info) { + DP_NOTICE(p_hwfn, + "Failed to allocate 'struct qed_dcbx_info'\n"); + rc = -ENOMEM; + } + + return rc; +} + +void qed_dcbx_info_free(struct qed_hwfn *p_hwfn, + struct qed_dcbx_info *p_dcbx_info) +{ + kfree(p_hwfn->p_dcbx_info); +} + +static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, + struct qed_dcbx_results *p_src, + enum dcbx_protocol_type type) +{ + p_data->dcb_enable_flag = p_src->arr[type].enable; + p_data->dcb_priority = p_src->arr[type].priority; + p_data->dcb_tc = p_src->arr[type].tc; +} + +/* Set pf update ramrod command params */ +void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest) +{ + struct protocol_dcb_data *p_dcb_data; + bool update_flag = false; + + p_dest->pf_id = p_src->pf_id; + + update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; + p_dest->update_fcoe_dcb_data_flag = update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update; + p_dest->update_roce_dcb_data_flag = update_flag; + update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update; + p_dest->update_roce_dcb_data_flag = update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update; + p_dest->update_iscsi_dcb_data_flag = update_flag; + update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update; + p_dest->update_eth_dcb_data_flag = update_flag; + + p_dcb_data = &p_dest->fcoe_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE); + p_dcb_data = &p_dest->roce_dcb_data; + + if (p_src->arr[DCBX_PROTOCOL_ROCE].update) + qed_dcbx_update_protocol_data(p_dcb_data, p_src, + DCBX_PROTOCOL_ROCE); + if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update) + qed_dcbx_update_protocol_data(p_dcb_data, p_src, + DCBX_PROTOCOL_ROCE_V2); + + p_dcb_data = &p_dest->iscsi_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI); + p_dcb_data = &p_dest->eth_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h new file mode 100644 index 000000000000..e7f834dbda2d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -0,0 +1,80 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_DCBX_H +#define _QED_DCBX_H +#include <linux/types.h> +#include <linux/slab.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" + +#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 + +enum qed_mib_read_type { + QED_DCBX_OPERATIONAL_MIB, + QED_DCBX_REMOTE_MIB, + QED_DCBX_LOCAL_MIB, + QED_DCBX_REMOTE_LLDP_MIB, + QED_DCBX_LOCAL_LLDP_MIB +}; + +struct qed_dcbx_app_data { + bool enable; /* DCB enabled */ + bool update; /* Update indication */ + u8 priority; /* Priority */ + u8 tc; /* Traffic Class */ +}; + +struct qed_dcbx_results { + bool dcbx_enabled; + u8 pf_id; + struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE]; +}; + +struct qed_dcbx_app_metadata { + enum dcbx_protocol_type id; + char *name; + enum qed_pci_personality personality; +}; + +#define QED_MFW_GET_FIELD(name, field) \ + (((name) & (field ## _MASK)) >> (field ## _SHIFT)) + +struct qed_dcbx_info { + struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS]; + struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS]; + struct dcbx_local_params local_admin; + struct qed_dcbx_results results; + struct dcbx_mib operational; + struct dcbx_mib remote; + u8 dcbx_cap; +}; + +struct qed_dcbx_mib_meta_data { + struct lldp_config_params_s *lldp_local; + struct lldp_status_params_s *lldp_remote; + struct dcbx_local_params *local_admin; + struct dcbx_mib *mib; + size_t size; + u32 addr; +}; + +/* QED local interface routines */ +int +qed_dcbx_mib_update_event(struct qed_hwfn *, + struct qed_ptt *, enum qed_mib_read_type); + +int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn); +void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *); +void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index b7d100f6bd6f..089016f46f26 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -22,6 +22,7 @@ #include <linux/qed/qed_if.h> #include "qed.h" #include "qed_cxt.h" +#include "qed_dcbx.h" #include "qed_dev_api.h" #include "qed_hsi.h" #include "qed_hw.h" @@ -30,6 +31,11 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_vf.h" + +static spinlock_t qm_lock; +static bool qm_lock_init = false; /* API common to all protocols */ enum BAR_ID { @@ -40,10 +46,14 @@ enum BAR_ID { static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) { - u32 bar_reg = (bar_id == BAR_ID_0 ? - PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); - u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); + u32 bar_reg = (bar_id == BAR_ID_0 ? + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val; + + if (IS_VF(p_hwfn->cdev)) + return 1 << 17; + val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); if (val) return 1 << (val + 15); @@ -105,12 +115,17 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn) qm_info->qm_vport_params = NULL; kfree(qm_info->qm_port_params); qm_info->qm_port_params = NULL; + kfree(qm_info->wfq_data); + qm_info->wfq_data = NULL; } void qed_resc_free(struct qed_dev *cdev) { int i; + if (IS_VF(cdev)) + return; + kfree(cdev->fw_data); cdev->fw_data = NULL; @@ -134,20 +149,27 @@ void qed_resc_free(struct qed_dev *cdev) qed_eq_free(p_hwfn, p_hwfn->p_eq); qed_consq_free(p_hwfn, p_hwfn->p_consq); qed_int_free(p_hwfn); + qed_iov_free(p_hwfn); qed_dmae_info_free(p_hwfn); + qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); } } static int qed_init_qm_info(struct qed_hwfn *p_hwfn) { + u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0; struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct init_qm_port_params *p_qm_port; - u8 num_vports, i, vport_id, num_ports; u16 num_pqs, multi_cos_tcs = 1; + u16 num_vfs = 0; +#ifdef CONFIG_QED_SRIOV + if (p_hwfn->cdev->p_iov_info) + num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; +#endif memset(qm_info, 0, sizeof(*qm_info)); - num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */ + num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); /* Sanity checking that setup requires legal number of resources */ @@ -175,25 +197,50 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) if (!qm_info->qm_port_params) goto alloc_err; + qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data), + GFP_KERNEL); + if (!qm_info->wfq_data) + goto alloc_err; + vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); /* First init per-TC PQs */ for (i = 0; i < multi_cos_tcs; i++) { - struct init_qm_pq_params *params = &qm_info->qm_pq_params[i]; - - params->vport_id = vport_id; - params->tc_id = p_hwfn->hw_info.non_offload_tc; - params->wrr_group = 1; + struct init_qm_pq_params *params = + &qm_info->qm_pq_params[curr_queue++]; + + if (p_hwfn->hw_info.personality == QED_PCI_ETH) { + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.non_offload_tc; + params->wrr_group = 1; + } else { + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.offload_tc; + params->wrr_group = 1; + } } /* Then init pure-LB PQ */ - qm_info->pure_lb_pq = i; - qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); - qm_info->qm_pq_params[i].tc_id = PURE_LB_TC; - qm_info->qm_pq_params[i].wrr_group = 1; - i++; + qm_info->pure_lb_pq = curr_queue; + qm_info->qm_pq_params[curr_queue].vport_id = + (u8) RESC_START(p_hwfn, QED_VPORT); + qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; qm_info->offload_pq = 0; + /* Then init per-VF PQs */ + vf_offset = curr_queue; + for (i = 0; i < num_vfs; i++) { + /* First vport is used by the PF */ + qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1; + qm_info->qm_pq_params[curr_queue].tc_id = + p_hwfn->hw_info.non_offload_tc; + qm_info->qm_pq_params[curr_queue].wrr_group = 1; + curr_queue++; + } + + qm_info->vf_queues_offset = vf_offset; qm_info->num_pqs = num_pqs; qm_info->num_vports = num_vports; @@ -211,29 +258,91 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); - qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); + qm_info->num_vf_pqs = num_vfs; + qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT); + + for (i = 0; i < qm_info->num_vports; i++) + qm_info->qm_vport_params[i].vport_wfq = 1; qm_info->pf_wfq = 0; qm_info->pf_rl = 0; qm_info->vport_rl_en = 1; + qm_info->vport_wfq_en = 1; return 0; alloc_err: DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); - kfree(qm_info->qm_pq_params); - kfree(qm_info->qm_vport_params); - kfree(qm_info->qm_port_params); - + qed_qm_info_free(p_hwfn); return -ENOMEM; } +/* This function reconfigures the QM pf on the fly. + * For this purpose we: + * 1. reconfigure the QM database + * 2. set new values to runtime arrat + * 3. send an sdm_qm_cmd through the rbc interface to stop the QM + * 4. activate init tool in QM_PF stage + * 5. send an sdm_qm_cmd through rbc interface to release the QM + */ +int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + bool b_rc; + int rc; + + /* qm_info is allocated in qed_init_qm_info() which is already called + * from qed_resc_alloc() or previous call of qed_qm_reconf(). + * The allocated size may change each init, so we free it before next + * allocation. + */ + qed_qm_info_free(p_hwfn); + + /* initialize qed's qm data structure */ + rc = qed_init_qm_info(p_hwfn); + if (rc) + return rc; + + /* stop PF's qm queues */ + spin_lock_bh(&qm_lock); + b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, + qm_info->start_pq, qm_info->num_pqs); + spin_unlock_bh(&qm_lock); + if (!b_rc) + return -EINVAL; + + /* clear the QM_PF runtime phase leftovers from previous init */ + qed_init_clear_rt_data(p_hwfn); + + /* prepare QM portion of runtime array */ + qed_qm_init_pf(p_hwfn); + + /* activate init tool on runtime array */ + rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, + p_hwfn->hw_info.hw_mode); + if (rc) + return rc; + + /* start PF's qm queues */ + spin_lock_bh(&qm_lock); + b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, + qm_info->start_pq, qm_info->num_pqs); + spin_unlock_bh(&qm_lock); + if (!b_rc) + return -EINVAL; + + return 0; +} + int qed_resc_alloc(struct qed_dev *cdev) { struct qed_consq *p_consq; struct qed_eq *p_eq; int i, rc = 0; + if (IS_VF(cdev)) + return rc; + cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); if (!cdev->fw_data) return -ENOMEM; @@ -308,6 +417,10 @@ int qed_resc_alloc(struct qed_dev *cdev) if (rc) goto alloc_err; + rc = qed_iov_alloc(p_hwfn); + if (rc) + goto alloc_err; + /* EQ */ p_eq = qed_eq_alloc(p_hwfn, 256); if (!p_eq) { @@ -330,6 +443,14 @@ int qed_resc_alloc(struct qed_dev *cdev) "Failed to allocate memory for dmae_info structure\n"); goto alloc_err; } + + /* DCBX initialization */ + rc = qed_dcbx_info_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to allocate memory for dcbx structure\n"); + goto alloc_err; + } } cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); @@ -350,6 +471,9 @@ void qed_resc_setup(struct qed_dev *cdev) { int i; + if (IS_VF(cdev)) + return; + for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -365,14 +489,15 @@ void qed_resc_setup(struct qed_dev *cdev) p_hwfn->mcp_info->mfw_mb_length); qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); + + qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt); } } #define FINAL_CLEANUP_POLL_CNT (100) #define FINAL_CLEANUP_POLL_TIME (10) int qed_final_cleanup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 id) + struct qed_ptt *p_ptt, u16 id, bool is_vf) { u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; @@ -380,6 +505,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); + if (is_vf) + id += 0x10; + command |= X_FINAL_CLEANUP_AGG_INT << SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; @@ -492,7 +620,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_common_rt_init_params params; struct qed_dev *cdev = p_hwfn->cdev; + u32 concrete_fid; int rc = 0; + u8 vf_id; qed_init_cau_rt_data(cdev); @@ -542,6 +672,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, 0x20b4, qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); + for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) { + concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); + qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid); + qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + } + /* pretend to original PF */ + qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + return rc; } @@ -558,6 +696,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn, static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_tunn_start_params *p_tunn, int hw_mode, bool b_hw_start, enum qed_int_mode int_mode, @@ -574,7 +713,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; /* Update rate limit once we'll actually have a link */ - p_hwfn->qm_info.pf_rl = 100; + p_hwfn->qm_info.pf_rl = 100000; } qed_cxt_hw_init_pf(p_hwfn); @@ -603,7 +742,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); /* Cleanup chip from previous driver if such remains exist */ - rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id); + rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); if (rc != 0) return rc; @@ -625,7 +764,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, qed_int_igu_enable(p_hwfn, p_ptt, int_mode); /* send function start command */ - rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode); + rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, + allow_npar_tx_switch); if (rc) DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); } @@ -672,6 +812,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, } int qed_hw_init(struct qed_dev *cdev, + struct qed_tunn_start_params *p_tunn, bool b_hw_start, enum qed_int_mode int_mode, bool allow_npar_tx_switch, @@ -680,13 +821,20 @@ int qed_hw_init(struct qed_dev *cdev, u32 load_code, param; int rc, mfw_rc, i; - rc = qed_init_fw_data(cdev, bin_fw_data); - if (rc != 0) - return rc; + if (IS_PF(cdev)) { + rc = qed_init_fw_data(cdev, bin_fw_data); + if (rc != 0) + return rc; + } for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + p_hwfn->b_int_enabled = 1; + continue; + } + /* Enable DMAE in PXP */ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); @@ -708,6 +856,11 @@ int qed_hw_init(struct qed_dev *cdev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); + if (!qm_lock_init) { + spin_lock_init(&qm_lock); + qm_lock_init = true; + } + switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -724,7 +877,7 @@ int qed_hw_init(struct qed_dev *cdev, /* Fall into */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, - p_hwfn->hw_info.hw_mode, + p_tunn, p_hwfn->hw_info.hw_mode, b_hw_start, int_mode, allow_npar_tx_switch); break; @@ -749,6 +902,20 @@ int qed_hw_init(struct qed_dev *cdev, return mfw_rc; } + /* send DCBX attention request command */ + DP_VERBOSE(p_hwfn, + QED_MSG_DCB, + "sending phony dcbx set command to trigger DCBx attention handling\n"); + mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, + &load_code, ¶m); + if (mfw_rc) { + DP_NOTICE(p_hwfn, + "Failed to send DCBX attention request\n"); + return mfw_rc; + } + p_hwfn->hw_init_done = true; } @@ -811,6 +978,11 @@ int qed_hw_stop(struct qed_dev *cdev) DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); + if (IS_VF(cdev)) { + qed_vf_pf_int_cleanup(p_hwfn); + continue; + } + /* mark the hw as uninitialized... */ p_hwfn->hw_init_done = false; @@ -842,15 +1014,16 @@ int qed_hw_stop(struct qed_dev *cdev) usleep_range(1000, 2000); } - /* Disable DMAE in PXP - in CMT, this should only be done for - * first hw-function, and only after all transactions have - * stopped for all active hw-functions. - */ - t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], - cdev->hwfns[0].p_main_ptt, - false); - if (t_rc != 0) - rc = t_rc; + if (IS_PF(cdev)) { + /* Disable DMAE in PXP - in CMT, this should only be done for + * first hw-function, and only after all transactions have + * stopped for all active hw-functions. + */ + t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], + cdev->hwfns[0].p_main_ptt, false); + if (t_rc != 0) + rc = t_rc; + } return rc; } @@ -861,7 +1034,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; - struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + if (IS_VF(cdev)) { + qed_vf_pf_int_cleanup(p_hwfn); + continue; + } DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, @@ -885,6 +1063,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) { + if (IS_VF(p_hwfn->cdev)) + return; + /* Re-open incoming traffic */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); @@ -914,6 +1095,13 @@ int qed_hw_reset(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + rc = qed_vf_pf_reset(p_hwfn); + if (rc) + return rc; + continue; + } + DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); /* Check for incorrect states */ @@ -1009,13 +1197,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) { u32 *resc_start = p_hwfn->hw_info.resc_start; + u8 num_funcs = p_hwfn->num_funcs_on_engine; u32 *resc_num = p_hwfn->hw_info.resc_num; struct qed_sb_cnt_info sb_cnt_info; - int num_funcs, i; - - num_funcs = MAX_NUM_PFS_BB; + int i, max_vf_vlan_filters; memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + +#ifdef CONFIG_QED_SRIOV + max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS; +#else + max_vf_vlan_filters = 0; +#endif + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); resc_num[QED_SB] = min_t(u32, @@ -1220,6 +1414,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } +static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 reg_function_hide, tmp, eng_mask; + u8 num_funcs; + + num_funcs = MAX_NUM_PFS_BB; + + /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values + * in the other bits are selected. + * Bits 1-15 are for functions 1-15, respectively, and their value is + * '0' only for enabled functions (function 0 always exists and + * enabled). + * In case of CMT, only the "even" functions are enabled, and thus the + * number of functions for both hwfns is learnt from the same bits. + */ + reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); + + if (reg_function_hide & 0x1) { + if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) { + num_funcs = 0; + eng_mask = 0xaaaa; + } else { + num_funcs = 1; + eng_mask = 0x5554; + } + + /* Get the number of the enabled functions on the engine */ + tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; + while (tmp) { + if (tmp & 0x1) + num_funcs++; + tmp >>= 0x1; + } + } + + p_hwfn->num_funcs_on_engine = num_funcs; + + DP_VERBOSE(p_hwfn, + NETIF_MSG_PROBE, + "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n", + p_hwfn->rel_pf_id, + p_hwfn->abs_pf_id, + p_hwfn->num_funcs_on_engine); +} + static int qed_get_hw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1228,6 +1467,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, u32 port_mode; int rc; + /* Since all information is common, only first hwfns should do this */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_iov_hw_info(p_hwfn); + if (rc) + return rc; + } + /* Read the port mode */ port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); @@ -1271,6 +1517,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.personality = protocol; } + qed_get_num_funcs(p_hwfn, p_ptt); + qed_hw_get_resc(p_hwfn); return rc; @@ -1336,6 +1584,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, p_hwfn->regview = p_regview; p_hwfn->doorbells = p_doorbells; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_hw_prepare(p_hwfn); + /* Validate that chip access is feasible */ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { DP_ERR(p_hwfn, @@ -1387,6 +1638,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, return rc; err2: + if (IS_LEAD_HWFN(p_hwfn)) + qed_iov_free_hw_info(p_hwfn->cdev); qed_mcp_free(p_hwfn); err1: qed_hw_hwfn_free(p_hwfn); @@ -1401,7 +1654,8 @@ int qed_hw_prepare(struct qed_dev *cdev, int rc; /* Store the precompiled init data ptrs */ - qed_init_iro_array(cdev); + if (IS_PF(cdev)) + qed_init_iro_array(cdev); /* Initialize the first hwfn - will learn number of hwfns */ rc = qed_hw_prepare_single(p_hwfn, @@ -1433,9 +1687,11 @@ int qed_hw_prepare(struct qed_dev *cdev, * initiliazed hwfn 0. */ if (rc) { - qed_init_free(p_hwfn); - qed_mcp_free(p_hwfn); - qed_hw_hwfn_free(p_hwfn); + if (IS_PF(cdev)) { + qed_init_free(p_hwfn); + qed_mcp_free(p_hwfn); + qed_hw_hwfn_free(p_hwfn); + } } } @@ -1449,10 +1705,17 @@ void qed_hw_remove(struct qed_dev *cdev) for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + if (IS_VF(cdev)) { + qed_vf_pf_release(p_hwfn); + continue; + } + qed_init_free(p_hwfn); qed_hw_hwfn_free(p_hwfn); qed_mcp_free(p_hwfn); } + + qed_iov_free_hw_info(cdev); } int qed_chain_alloc(struct qed_dev *cdev, @@ -1593,3 +1856,388 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, return 0; } + +/* Calculate final WFQ values for all vports and configure them. + * After this configuration each vport will have + * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) + */ +static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + + vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) / + min_pf_rate; + qed_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].vport_wfq); + } +} + +static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, + u32 min_pf_rate) + +{ + int i; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) + p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; +} + +static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + qed_init_wfq_default_param(p_hwfn, min_pf_rate); + qed_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].vport_wfq); + } +} + +/* This function performs several validations for WFQ + * configuration and required min rate for a given vport + * 1. req_rate must be greater than one percent of min_pf_rate. + * 2. req_rate should not cause other vports [not configured for WFQ explicitly] + * rates to get less than one percent of min_pf_rate. + * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. + */ +static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, + u16 vport_id, u32 req_rate, + u32 min_pf_rate) +{ + u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; + int non_requested_count = 0, req_count = 0, i, num_vports; + + num_vports = p_hwfn->qm_info.num_vports; + + /* Accounting for the vports which are configured for WFQ explicitly */ + for (i = 0; i < num_vports; i++) { + u32 tmp_speed; + + if ((i != vport_id) && + p_hwfn->qm_info.wfq_data[i].configured) { + req_count++; + tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + total_req_min_rate += tmp_speed; + } + } + + /* Include current vport data as well */ + req_count++; + total_req_min_rate += req_rate; + non_requested_count = num_vports - req_count; + + if (req_rate < min_pf_rate / QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + vport_id, req_rate, min_pf_rate); + return -EINVAL; + } + + if (num_vports > QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Number of vports is greater than %d\n", + QED_WFQ_UNIT); + return -EINVAL; + } + + if (total_req_min_rate > min_pf_rate) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", + total_req_min_rate, min_pf_rate); + return -EINVAL; + } + + total_left_rate = min_pf_rate - total_req_min_rate; + + left_rate_per_vp = total_left_rate / non_requested_count; + if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + left_rate_per_vp, min_pf_rate); + return -EINVAL; + } + + p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; + p_hwfn->qm_info.wfq_data[vport_id].configured = true; + + for (i = 0; i < num_vports; i++) { + if (p_hwfn->qm_info.wfq_data[i].configured) + continue; + + p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; + } + + return 0; +} + +static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 vp_id, u32 rate) +{ + struct qed_mcp_link_state *p_link; + int rc = 0; + + p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; + + if (!p_link->min_pf_rate) { + p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; + p_hwfn->qm_info.wfq_data[vp_id].configured = true; + return rc; + } + + rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); + + if (rc == 0) + qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + else + DP_NOTICE(p_hwfn, + "Validation failed while configuring min rate\n"); + + return rc; +} + +static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + bool use_wfq = false; + int rc = 0; + u16 i; + + /* Validate all pre configured vports for wfq */ + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 rate; + + if (!p_hwfn->qm_info.wfq_data[i].configured) + continue; + + rate = p_hwfn->qm_info.wfq_data[i].min_speed; + use_wfq = true; + + rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); + if (rc) { + DP_NOTICE(p_hwfn, + "WFQ validation failed while configuring min rate\n"); + break; + } + } + + if (!rc && use_wfq) + qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + else + qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + + return rc; +} + +/* Main API for qed clients to configure vport min rate. + * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] + * rate - Speed in Mbps needs to be assigned to a given vport. + */ +int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) +{ + int i, rc = -EINVAL; + + /* Currently not supported; Might change in future */ + if (cdev->num_hwfns > 1) { + DP_NOTICE(cdev, + "WFQ configuration is not supported for this device\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); + + if (!rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + + qed_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +/* API to configure WFQ from mcp link change */ +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + __qed_configure_vp_wfq_on_link_change(p_hwfn, + p_hwfn->p_dpc_ptt, + min_pf_rate); + } +} + +int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 max_bw) +{ + int rc = 0; + + p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; + + if (!p_link->line_speed && (max_bw != 100)) + return rc; + + p_link->speed = (p_link->line_speed * max_bw) / 100; + p_hwfn->qm_info.pf_rl = p_link->speed; + + /* Since the limiter also affects Tx-switched traffic, we don't want it + * to limit such traffic in case there's no actual limit. + * In that case, set limit to imaginary high boundary. + */ + if (max_bw == 100) + p_hwfn->qm_info.pf_rl = 100000; + + rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_hwfn->qm_info.pf_rl); + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MAX bandwidth to be %08x Mb/sec\n", + p_link->speed); + + return rc; +} + +/* Main API to configure PF max bandwidth where bw range is [1 - 100] */ +int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) +{ + int i, rc = -EINVAL; + + if (max_bw < 1 || max_bw > 100) { + DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); + struct qed_mcp_link_state *p_link; + struct qed_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, + p_link, max_bw); + + qed_ptt_release(p_hwfn, p_ptt); + + if (rc) + break; + } + + return rc; +} + +int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 min_bw) +{ + int rc = 0; + + p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; + p_hwfn->qm_info.pf_wfq = min_bw; + + if (!p_link->line_speed) + return rc; + + p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; + + rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MIN bandwidth to be %d Mb/sec\n", + p_link->min_pf_rate); + + return rc; +} + +/* Main API to configure PF min bandwidth where bw range is [1-100] */ +int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) +{ + int i, rc = -EINVAL; + + if (min_bw < 1 || min_bw > 100) { + DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); + struct qed_mcp_link_state *p_link; + struct qed_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, + p_link, min_bw); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + + if (p_link->min_pf_rate) { + u32 min_rate = p_link->min_pf_rate; + + rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, + p_ptt, + min_rate); + } + + qed_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_link_state *p_link; + + p_link = &p_hwfn->mcp_info->link_output; + + if (p_link->min_pf_rate) + qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + + memset(p_hwfn->qm_info.wfq_data, 0, + sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index d6c7ddf4f4d4..dde364d6f502 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev); * @brief qed_hw_init - * * @param cdev + * @param p_tunn * @param b_hw_start * @param int_mode - interrupt mode [msix, inta, etc.] to use. * @param allow_npar_tx_switch - npar tx switching to be used @@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev); * @return int */ int qed_hw_init(struct qed_dev *cdev, + struct qed_tunn_start_params *p_tunn, bool b_hw_start, enum qed_int_mode int_mode, bool allow_npar_tx_switch, @@ -180,11 +182,15 @@ enum qed_dmae_address_type_t { * used mostly to write a zeroed buffer to destination address * using DMA */ -#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001 -#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008 +#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001 +#define QED_DMAE_FLAG_VF_SRC 0x00000002 +#define QED_DMAE_FLAG_VF_DST 0x00000004 +#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008 struct qed_dmae_params { - u32 flags; /* consists of QED_DMAE_FLAG_* values */ + u32 flags; /* consists of QED_DMAE_FLAG_* values */ + u8 src_vfid; + u8 dst_vfid; }; /** @@ -207,6 +213,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, u32 flags); /** + * @brief qed_dmae_host2host - copy data from to source address + * to a destination adress (for SRIOV) using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param source_addr + * @param dest_addr + * @param size_in_dwords + * @param params + */ +int qed_dmae_host2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct qed_dmae_params *p_params); + +/** * @brief qed_chain_alloc - Allocate and initialize a chain * * @param p_hwfn @@ -280,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param p_ptt * @param id - For PF, engine-relative. For VF, PF-relative. + * @param is_vf - true iff cleanup is made for a VF. * * @return int */ int qed_final_cleanup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u16 id); + struct qed_ptt *p_ptt, u16 id, bool is_vf); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index a368f5e71d95..9afc15fdbb02 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -29,9 +29,9 @@ struct qed_ptt; enum common_event_opcode { COMMON_EVENT_PF_START, COMMON_EVENT_PF_STOP, - COMMON_EVENT_RESERVED, - COMMON_EVENT_RESERVED2, - COMMON_EVENT_RESERVED3, + COMMON_EVENT_VF_START, + COMMON_EVENT_VF_STOP, + COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED5, COMMON_EVENT_RESERVED6, @@ -44,9 +44,9 @@ enum common_ramrod_cmd_id { COMMON_RAMROD_UNUSED, COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, - COMMON_RAMROD_RESERVED, - COMMON_RAMROD_RESERVED2, - COMMON_RAMROD_RESERVED3, + COMMON_RAMROD_VF_START, + COMMON_RAMROD_VF_STOP, + COMMON_RAMROD_PF_UPDATE, COMMON_RAMROD_EMPTY, MAX_COMMON_RAMROD_CMD_ID }; @@ -573,6 +573,14 @@ union event_ring_element { struct event_ring_next_addr next_addr; }; +struct mstorm_non_trigger_vf_zone { + struct eth_mstorm_per_queue_stat eth_queue_stat; +}; + +struct mstorm_vf_zone { + struct mstorm_non_trigger_vf_zone non_trigger; +}; + enum personality_type { BAD_PERSONALITY_TYP, PERSONALITY_RESERVED, @@ -626,6 +634,59 @@ struct pf_start_ramrod_data { u8 reserved0[4]; }; +/* Data for port update ramrod */ +struct protocol_dcb_data { + u8 dcb_enable_flag; + u8 dcb_priority; + u8 dcb_tc; + u8 reserved; +}; + +/* tunnel configuration */ +struct pf_update_tunnel_config { + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 tx_enable_vxlan; + u8 tx_enable_l2geneve; + u8 tx_enable_ipgeneve; + u8 tx_enable_l2gre; + u8 tx_enable_ipgre; + u8 tunnel_clss_vxlan; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + __le16 vxlan_udp_port; + __le16 geneve_udp_port; + __le16 reserved[3]; +}; + +struct pf_update_ramrod_data { + u8 pf_id; + u8 update_eth_dcb_data_flag; + u8 update_fcoe_dcb_data_flag; + u8 update_iscsi_dcb_data_flag; + u8 update_roce_dcb_data_flag; + u8 update_mf_vlan_flag; + __le16 mf_vlan; + struct protocol_dcb_data eth_dcb_data; + struct protocol_dcb_data fcoe_dcb_data; + struct protocol_dcb_data iscsi_dcb_data; + struct protocol_dcb_data roce_dcb_data; + struct pf_update_tunnel_config tunnel_config; +}; + +/* Tunnel classification scheme */ +enum tunnel_clss { + TUNNEL_CLSS_MAC_VLAN = 0, + TUNNEL_CLSS_MAC_VNI, + TUNNEL_CLSS_INNER_MAC_VLAN, + TUNNEL_CLSS_INNER_MAC_VNI, + MAX_TUNNEL_CLSS +}; + enum ports_mode { ENGX2_PORTX1 /* 2 engines x 1 port */, ENGX2_PORTX2 /* 2 engines x 2 ports */, @@ -635,6 +696,16 @@ enum ports_mode { MAX_PORTS_MODE }; +struct pstorm_non_trigger_vf_zone { + struct eth_pstorm_per_queue_stat eth_queue_stat; + struct regpair reserved[2]; +}; + +struct pstorm_vf_zone { + struct pstorm_non_trigger_vf_zone non_trigger; + struct regpair reserved[7]; +}; + /* Ramrod Header of SPQE */ struct ramrod_header { __le32 cid /* Slowpath Connection CID */; @@ -664,6 +735,36 @@ struct tstorm_per_port_stat { struct regpair preroce_irregular_pkt; }; +struct ustorm_non_trigger_vf_zone { + struct eth_ustorm_per_queue_stat eth_queue_stat; + struct regpair vf_pf_msg_addr; +}; + +struct ustorm_trigger_vf_zone { + u8 vf_pf_msg_valid; + u8 reserved[7]; +}; + +struct ustorm_vf_zone { + struct ustorm_non_trigger_vf_zone non_trigger; + struct ustorm_trigger_vf_zone trigger; +}; + +struct vf_start_ramrod_data { + u8 vf_id; + u8 enable_flr_ack; + __le16 opaque_fid; + u8 personality; + u8 reserved[3]; +}; + +struct vf_stop_ramrod_data { + u8 vf_id; + u8 reserved0; + __le16 reserved1; + __le32 reserved2; +}; + struct atten_status_block { __le32 atten_bits; __le32 atten_ack; @@ -990,7 +1091,7 @@ enum init_phases { PHASE_ENGINE, PHASE_PORT, PHASE_PF, - PHASE_RESERVED, + PHASE_VF, PHASE_QM_PF, MAX_INIT_PHASES }; @@ -1603,6 +1704,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, u16 start_pq, u16 num_pqs); +void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port); +void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool vxlan_enable); +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool eth_gre_enable, + bool ip_gre_enable); +void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port); +void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool eth_geneve_enable, + bool ip_geneve_enable); + /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ #define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) #define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) @@ -3788,7 +3902,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_SET_LLDP 0x24000000 #define DRV_MSG_CODE_SET_DCBX 0x25000000 - +#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 #define DRV_MSG_CODE_NIG_DRAIN 0x30000000 #define DRV_MSG_CODE_INITIATE_FLR 0x02000000 @@ -3808,6 +3922,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 #define DRV_MSG_CODE_SET_VERSION 0x000f0000 +#define DRV_MSG_CODE_BIST_TEST 0x001e0000 #define DRV_MSG_CODE_SET_LED_MODE 0x00200000 #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff @@ -3865,6 +3980,18 @@ struct public_drv_mb { #define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 #define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 +#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 + +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 @@ -5067,4 +5194,8 @@ struct hw_set_image { struct hw_set_info hw_sets[1]; }; +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 pf_id, u16 pf_wfq); +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index a95a3e4b3101..0ada7fdb91bc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -23,6 +23,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_reg_addr.h" +#include "qed_sriov.h" #define QED_BAR_ACQUIRE_TIMEOUT 1000 @@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, quota = min_t(size_t, n - done, PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); - qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); - hw_offset = qed_ptt_get_bar_addr(p_ptt); + if (IS_PF(p_hwfn->cdev)) { + qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); + hw_offset = qed_ptt_get_bar_addr(p_ptt); + } else { + hw_offset = hw_addr + done; + } dw_count = quota / 4; host_addr = (u32 *)((u8 *)addr + done); @@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, *(u32 *)&p_ptt->pxp.pretend); } +u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) +{ + u32 concrete_fid = 0; + + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); + + return concrete_fid; +} + /* DMAE */ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, const u8 is_src_type_grc, const u8 is_dst_type_grc, struct qed_dmae_params *p_params) { + u16 opcode_b = 0; u32 opcode = 0; - u16 opcodeB = 0; /* Whether the source is the PCIe or the GRC. * 0- The source is the PCIe @@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT); - opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK << - DMAE_CMD_SRC_VF_ID_SHIFT); + /* SRC/DST VFID: all 1's - pf, otherwise VF id */ + if (p_params->flags & QED_DMAE_FLAG_VF_SRC) { + opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT; + opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT; + } else { + opcode_b |= DMAE_CMD_SRC_VF_ID_MASK << + DMAE_CMD_SRC_VF_ID_SHIFT; + } - opcodeB |= (DMAE_CMD_DST_VF_ID_MASK << - DMAE_CMD_DST_VF_ID_SHIFT); + if (p_params->flags & QED_DMAE_FLAG_VF_DST) { + opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT; + opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT; + } else { + opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT; + } p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); - p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB); + p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b); } u32 qed_dmae_idx_to_go_cmd(u8 idx) @@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, return rc; } +int +qed_dmae_host2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct qed_dmae_params *p_params) +{ + int rc; + + mutex_lock(&(p_hwfn->dmae_info.mutex)); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, + dest_addr, + QED_DMAE_ADDRESS_HOST_PHYS, + QED_DMAE_ADDRESS_HOST_PHYS, + size_in_dwords, p_params); + + mutex_unlock(&(p_hwfn->dmae_info.mutex)); + + return rc; +} + u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, enum protocol_type proto, union qed_qm_pq_params *p_params) @@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, break; case PROTOCOLID_ETH: pq_id = p_params->eth.tc; + if (p_params->eth.is_vf) + pq_id += p_hwfn->qm_info.vf_queues_offset + + p_params->eth.vf_id; break; default: pq_id = 0; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index e56d433793be..4367363ade40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -221,6 +221,16 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** + * @brief qed_vfid_to_concrete - build a concrete FID for a + * given VF ID + * + * @param p_hwfn + * @param p_ptt + * @param vfid + */ +u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid); + +/** * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd * this is declared here since other files will require it. * @param idx diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index f55ebdc3c832..e8a3b9da59b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -712,6 +712,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, return 0; } +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 pf_id, u16 pf_wfq) +{ + u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); + + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration"); + return -1; + } + + qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); + return 0; +} + int qed_init_pf_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, @@ -732,6 +747,31 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, return 0; } +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], + u16 vport_wfq) +{ + u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); + u8 tc; + + if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration"); + return -1; + } + + for (tc = 0; tc < NUM_OF_TCS; tc++) { + u16 vport_pq_id = first_tx_pq_id[tc]; + + if (vport_pq_id != QM_INVALID_PQ_ID) + qed_wr(p_hwfn, p_ptt, + QM_REG_WFQVPWEIGHT + vport_pq_id * 4, + inc_val); + } + + return 0; +} + int qed_init_vport_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vport_id, @@ -788,3 +828,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, return true; } + +static void +qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable) +{ + if (enable) + set_bit(bit, var); + else + clear_bit(bit, var); +} + +#define PRS_ETH_TUNN_FIC_FORMAT -188897008 + +void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 dest_port) +{ + qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); +} + +void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool vxlan_enable) +{ + unsigned long reg_val = 0; + u8 shift; + + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); + + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + + if (reg_val) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + + reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, vxlan_enable); + + qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, + vxlan_enable ? 1 : 0); +} + +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable) +{ + unsigned long reg_val = 0; + u8 shift; + + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); + + shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + + reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, eth_gre_enable); + + shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, ip_gre_enable); + qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, + eth_gre_enable ? 1 : 0); + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, + ip_gre_enable ? 1 : 0); +} + +void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 dest_port) +{ + qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); + qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); +} + +void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool eth_geneve_enable, + bool ip_geneve_enable) +{ + unsigned long reg_val = 0; + u8 shift; + + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, eth_geneve_enable); + + shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT; + qed_set_tunnel_type_enable_bit(®_val, shift, ip_geneve_enable); + + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, + eth_geneve_enable ? 1 : 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); + + /* comp ver */ + reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0; + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val); + qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val); + qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val); + + /* EDPM with geneve tunnel not supported in BB_B0 */ + if (QED_IS_BB_B0(p_hwfn->cdev)) + return; + + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN, + eth_geneve_enable ? 1 : 0); + qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN, + ip_geneve_enable ? 1 : 0); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 3269b3610e03..d358c3bb1308 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -18,6 +18,7 @@ #include "qed_hw.h" #include "qed_init_ops.h" #include "qed_reg_addr.h" +#include "qed_sriov.h" #define QED_INIT_MAX_POLL_COUNT 100 #define QED_INIT_POLL_PERIOD_US 500 @@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) { struct qed_rt_data *rt_data = &p_hwfn->rt_data; + if (IS_VF(p_hwfn->cdev)) + return 0; + rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE, GFP_KERNEL); if (!rt_data->b_valid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 2017b0121f5f..09a6ad3d22dd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -26,6 +26,8 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_vf.h" struct qed_pi_info { qed_int_comp_cb_t comp_cb; @@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, u32 sb_offset; u32 pi_offset; + if (IS_VF(p_hwfn->cdev)) + return; + sb_offset = igu_sb_id * PIS_PER_SB; memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); @@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, sb_info->sb_ack = 0; memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); - qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, - sb_info->igu_sb_id, 0, 0); + if (IS_PF(p_hwfn->cdev)) + qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, + sb_info->igu_sb_id, 0, 0); } /** @@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, /* Assuming continuous set of IGU SBs dedicated for given PF */ if (sb_id == QED_SP_SB_ID) igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; - else + else if (IS_PF(p_hwfn->cdev)) igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; + else + igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); @@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, /* The igu address will hold the absolute address that needs to be * written to for a specific status block */ - sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_IGU_CMD + - (sb_info->igu_sb_id << 3); + if (IS_PF(p_hwfn->cdev)) { + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + (sb_info->igu_sb_id << 3); + } else { + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_IGU + + ((IGU_CMD_INT_ACK_BASE + + sb_info->igu_sb_id) << 3); + } sb_info->flags |= QED_SB_INFO_INIT; @@ -2783,24 +2798,20 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, { p_hwfn->b_int_enabled = 0; + if (IS_VF(p_hwfn->cdev)) + return; + qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); } #define IGU_CLEANUP_SLEEP_LENGTH (1000) -void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 sb_id, - bool cleanup_set, - u16 opaque_fid - ) +static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 sb_id, bool cleanup_set, u16 opaque_fid) { + u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; - u32 data = 0; - u32 cmd_ctrl = 0; - u32 val = 0; - u32 sb_bit = 0; - u32 sb_bit_addr = 0; /* Set the data field */ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); @@ -2845,11 +2856,9 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u32 sb_id, - u16 opaque, - bool b_set) + u32 sb_id, u16 opaque, bool b_set) { - int pi; + int pi, i; /* Set */ if (b_set) @@ -2858,6 +2867,22 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, /* Clear */ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); + /* Wait for the IGU SB to cleanup */ + for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { + u32 val; + + val = qed_rd(p_hwfn, p_ptt, + IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4)); + if (val & (1 << (sb_id % 32))) + usleep_range(10, 20); + else + break; + } + if (i == IGU_CLEANUP_SLEEP_LENGTH) + DP_NOTICE(p_hwfn, + "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", + sb_id); + /* Clear the CAU for the SB */ for (pi = 0; pi < 12; pi++) qed_wr(p_hwfn, p_ptt, @@ -2866,13 +2891,11 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - bool b_set, - bool b_slowpath) + bool b_set, bool b_slowpath) { u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; - u32 sb_id = 0; - u32 val = 0; + u32 sb_id = 0, val = 0; val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; @@ -2888,14 +2911,14 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.opaque_fid, b_set); - if (b_slowpath) { - sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU cleaning slowpath SB [%d]\n", sb_id); - qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, - p_hwfn->hw_info.opaque_fid, - b_set); - } + if (!b_slowpath) + return; + + sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU cleaning slowpath SB [%d]\n", sb_id); + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, + p_hwfn->hw_info.opaque_fid, b_set); } static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, @@ -2935,9 +2958,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_igu_info *p_igu_info; + u32 val, min_vf = 0, max_vf = 0; + u16 sb_id, last_iov_sb_id = 0; struct qed_igu_block *blk; - u32 val; - u16 sb_id; u16 prev_sb_id = 0xFF; p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); @@ -2947,12 +2970,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, p_igu_info = p_hwfn->hw_info.p_igu_info; - /* Initialize base sb / sb cnt for PFs */ + /* Initialize base sb / sb cnt for PFs and VFs */ p_igu_info->igu_base_sb = 0xffff; p_igu_info->igu_sb_cnt = 0; p_igu_info->igu_dsb_id = 0xffff; p_igu_info->igu_base_sb_iov = 0xffff; + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + min_vf = p_iov->first_vf_in_pf; + max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; + } + for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); sb_id++) { blk = &p_igu_info->igu_map.igu_blocks[sb_id]; @@ -2986,14 +3016,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, (p_igu_info->igu_sb_cnt)++; } } + } else { + if ((blk->function_id >= min_vf) && + (blk->function_id < max_vf)) { + /* Available for VFs of this PF */ + if (p_igu_info->igu_base_sb_iov == 0xffff) { + p_igu_info->igu_base_sb_iov = sb_id; + } else if (last_iov_sb_id != sb_id - 1) { + if (!val) { + DP_VERBOSE(p_hwfn->cdev, + NETIF_MSG_INTR, + "First uninitialized IGU CAM entry at index 0x%04x\n", + sb_id); + } else { + DP_NOTICE(p_hwfn->cdev, + "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n", + p_hwfn->rel_pf_id, + last_iov_sb_id, + sb_id); } + break; + } + blk->status |= QED_IGU_STATUS_FREE; + p_hwfn->hw_info.p_igu_info->free_blks++; + last_iov_sb_id = sb_id; + } } } - - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", - p_igu_info->igu_base_sb, - p_igu_info->igu_sb_cnt, - p_igu_info->igu_dsb_id); + p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; + + DP_VERBOSE( + p_hwfn, + NETIF_MSG_INTR, + "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n", + p_igu_info->igu_base_sb, + p_igu_info->igu_base_sb_iov, + p_igu_info->igu_sb_cnt, + p_igu_info->igu_sb_cnt_iov, + p_igu_info->igu_dsb_id); if (p_igu_info->igu_base_sb == 0xffff || p_igu_info->igu_dsb_id == 0xffff || @@ -3116,6 +3175,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, p_sb_cnt_info->sb_free_blk = info->free_blks; } +u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + + /* Determine origin of SB id */ + if ((sb_id >= p_info->igu_base_sb) && + (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { + return sb_id - p_info->igu_base_sb; + } else if ((sb_id >= p_info->igu_base_sb_iov) && + (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { + return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt; + } else { + DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id); + return 0; + } +} + void qed_int_disable_post_isr_release(struct qed_dev *cdev) { int i; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index c57f2e680770..20b468637504 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -20,6 +20,12 @@ #define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ #define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ #define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ +/* Fields of IGU VF CONFIGRATION REGISTER */ +#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */ +#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ +#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ +#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */ +#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */ /* Igu control commands */ @@ -292,26 +298,8 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); * @param p_hwfn * @param p_ptt * @param sb_id - igu status block id - * @param cleanup_set - set(1) / clear(0) - * @param opaque_fid - the function for which to perform - * cleanup, for example a PF on behalf of - * its VFs. - */ -void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 sb_id, - bool cleanup_set, - u16 opaque_fid); - -/** - * @brief Status block cleanup. Should be called for each status - * block that will be used -> both PF / VF - * - * @param p_hwfn - * @param p_ptt - * @param sb_id - igu status block id * @param opaque - opaque fid of the sb owner. - * @param cleanup_set - set(1) / clear(0) + * @param b_set - set(1) / clear(0) */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -365,6 +353,16 @@ void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** + * @brief - Returns an Rx queue index appropriate for usage with given SB. + * + * @param p_hwfn + * @param sb_id - absolute index of SB + * + * @return index of Rx queue + */ +u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); + +/** * @brief - Enable Interrupt & Attention for hw function * * @param p_hwfn diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 3f35c6ca9252..8fba87dd48af 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -31,137 +31,25 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_l2.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" -enum qed_rss_caps { - QED_RSS_IPV4 = 0x1, - QED_RSS_IPV6 = 0x2, - QED_RSS_IPV4_TCP = 0x4, - QED_RSS_IPV6_TCP = 0x8, - QED_RSS_IPV4_UDP = 0x10, - QED_RSS_IPV6_UDP = 0x20, -}; - -/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ -#define QED_RSS_IND_TABLE_SIZE 128 -#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */ - -struct qed_rss_params { - u8 update_rss_config; - u8 rss_enable; - u8 rss_eng_id; - u8 update_rss_capabilities; - u8 update_rss_ind_table; - u8 update_rss_key; - u8 rss_caps; - u8 rss_table_size_log; - u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; - u32 rss_key[QED_RSS_KEY_SIZE]; -}; - -enum qed_filter_opcode { - QED_FILTER_ADD, - QED_FILTER_REMOVE, - QED_FILTER_MOVE, - QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ - QED_FILTER_FLUSH, /* Removes all filters */ -}; - -enum qed_filter_ucast_type { - QED_FILTER_MAC, - QED_FILTER_VLAN, - QED_FILTER_MAC_VLAN, - QED_FILTER_INNER_MAC, - QED_FILTER_INNER_VLAN, - QED_FILTER_INNER_PAIR, - QED_FILTER_INNER_MAC_VNI_PAIR, - QED_FILTER_MAC_VNI_PAIR, - QED_FILTER_VNI, -}; - -struct qed_filter_ucast { - enum qed_filter_opcode opcode; - enum qed_filter_ucast_type type; - u8 is_rx_filter; - u8 is_tx_filter; - u8 vport_to_add_to; - u8 vport_to_remove_from; - unsigned char mac[ETH_ALEN]; - u8 assert_on_error; - u16 vlan; - u32 vni; -}; - -struct qed_filter_mcast { - /* MOVE is not supported for multicast */ - enum qed_filter_opcode opcode; - u8 vport_to_add_to; - u8 vport_to_remove_from; - u8 num_mc_addrs; -#define QED_MAX_MC_ADDRS 64 - unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; -}; - -struct qed_filter_accept_flags { - u8 update_rx_mode_config; - u8 update_tx_mode_config; - u8 rx_accept_filter; - u8 tx_accept_filter; -#define QED_ACCEPT_NONE 0x01 -#define QED_ACCEPT_UCAST_MATCHED 0x02 -#define QED_ACCEPT_UCAST_UNMATCHED 0x04 -#define QED_ACCEPT_MCAST_MATCHED 0x08 -#define QED_ACCEPT_MCAST_UNMATCHED 0x10 -#define QED_ACCEPT_BCAST 0x20 -}; - -struct qed_sp_vport_update_params { - u16 opaque_fid; - u8 vport_id; - u8 update_vport_active_rx_flg; - u8 vport_active_rx_flg; - u8 update_vport_active_tx_flg; - u8 vport_active_tx_flg; - u8 update_approx_mcast_flg; - u8 update_accept_any_vlan_flg; - u8 accept_any_vlan; - unsigned long bins[8]; - struct qed_rss_params *rss_params; - struct qed_filter_accept_flags accept_flags; -}; - -enum qed_tpa_mode { - QED_TPA_MODE_NONE, - QED_TPA_MODE_UNUSED, - QED_TPA_MODE_GRO, - QED_TPA_MODE_MAX -}; - -struct qed_sp_vport_start_params { - enum qed_tpa_mode tpa_mode; - bool remove_inner_vlan; - bool drop_ttl0; - u8 max_buffers_per_cqe; - u32 concrete_fid; - u16 opaque_fid; - u8 vport_id; - u16 mtu; -}; #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 -static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, - struct qed_sp_vport_start_params *p_params) +int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params) { struct vport_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; + u8 abs_vport_id = 0; int rc = -EINVAL; u16 rx_mode = 0; - u8 abs_vport_id = 0; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc != 0) @@ -211,6 +99,8 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, break; } + p_ramrod->tx_switching_en = p_params->tx_switching; + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, p_params->concrete_fid); @@ -218,6 +108,21 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +int qed_sp_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params) +{ + if (IS_VF(p_hwfn->cdev)) { + return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, + p_params->mtu, + p_params->remove_inner_vlan, + p_params->tpa_mode, + p_params->max_buffers_per_cqe, + p_params->only_untagged); + } + + return qed_sp_eth_vport_start(p_hwfn, p_params); +} + static int qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, @@ -363,6 +268,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, } static void +qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_sge_tpa_params *p_params) +{ + struct eth_vport_tpa_param *p_tpa; + + if (!p_params) { + p_ramrod->common.update_tpa_param_flg = 0; + p_ramrod->common.update_tpa_en_flg = 0; + p_ramrod->common.update_tpa_param_flg = 0; + return; + } + + p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; + p_tpa = &p_ramrod->tpa_param; + p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; + p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; + p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; + p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; + + p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; + p_tpa->max_buff_num = p_params->max_buffers_per_cqe; + p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; + p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; + p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; + p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; + p_tpa->tpa_max_size = p_params->tpa_max_size; + p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; + p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; +} + +static void qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, struct vport_update_ramrod_data *p_ramrod, struct qed_sp_vport_update_params *p_params) @@ -383,20 +320,24 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, } } -static int -qed_sp_vport_update(struct qed_hwfn *p_hwfn, - struct qed_sp_vport_update_params *p_params, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) +int qed_sp_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct qed_rss_params *p_rss_params = p_params->rss_params; struct vport_update_ramrod_data_cmn *p_cmn; struct qed_sp_init_data init_data; struct vport_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; - u8 abs_vport_id = 0; + u8 abs_vport_id = 0, val; int rc = -EINVAL; + if (IS_VF(p_hwfn->cdev)) { + rc = qed_vf_pf_vport_update(p_hwfn, p_params); + return rc; + } + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc != 0) return rc; @@ -425,6 +366,27 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_cmn->accept_any_vlan = p_params->accept_any_vlan; p_cmn->update_accept_any_vlan_flg = p_params->update_accept_any_vlan_flg; + + p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; + val = p_params->update_inner_vlan_removal_flg; + p_cmn->update_inner_vlan_removal_en_flg = val; + + p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; + val = p_params->update_default_vlan_enable_flg; + p_cmn->update_default_vlan_en_flg = val; + + p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); + p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; + + p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; + + p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; + p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; + + p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; + val = p_params->update_anti_spoofing_en_flg; + p_ramrod->common.update_anti_spoofing_en_flg = val; + rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { /* Return spq entry which is taken in qed_sp_init_request()*/ @@ -436,12 +398,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); + qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); return qed_spq_post(p_hwfn, p_ent, NULL); } -static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u8 vport_id) +int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) { struct vport_stop_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; @@ -449,6 +410,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u8 abs_vport_id = 0; int rc; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_vport_stop(p_hwfn); + rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); if (rc != 0) return rc; @@ -470,13 +434,26 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } +static int +qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, + struct qed_filter_accept_flags *p_accept_flags) +{ + struct qed_sp_vport_update_params s_params; + + memset(&s_params, 0, sizeof(s_params)); + memcpy(&s_params.accept_flags, p_accept_flags, + sizeof(struct qed_filter_accept_flags)); + + return qed_vf_pf_vport_update(p_hwfn, &s_params); +} + static int qed_filter_accept_cmd(struct qed_dev *cdev, u8 vport, struct qed_filter_accept_flags accept_flags, u8 update_accept_any_vlan, u8 accept_any_vlan, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct qed_sp_vport_update_params vport_update_params; int i, rc; @@ -493,6 +470,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + if (IS_VF(cdev)) { + rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); + if (rc) + return rc; + continue; + } + rc = qed_sp_vport_update(p_hwfn, &vport_update_params, comp_mode, p_comp_data); if (rc != 0) { @@ -527,16 +511,14 @@ static int qed_sp_release_queue_cid( return 0; } -static int -qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *params, - u8 stats_id, - u16 bd_max_bytes, - dma_addr_t bd_chain_phys_addr, - dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size) +int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *params, + u8 stats_id, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -605,8 +587,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, - u16 cqe_pbl_size, - void __iomem **pp_prod) + u16 cqe_pbl_size, void __iomem **pp_prod) { struct qed_hw_cid_data *p_rx_cid; u64 init_prod_val = 0; @@ -614,6 +595,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, u8 abs_stats_id = 0; int rc; + if (IS_VF(p_hwfn->cdev)) { + return qed_vf_pf_rxq_start(p_hwfn, + params->queue_id, + params->sb, + params->sb_idx, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, pp_prod); + } + rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue); if (rc != 0) return rc; @@ -656,10 +647,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn, return rc; } -static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, - u16 rx_queue_id, - bool eq_completion_only, - bool cqe_completion) +int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct rx_queue_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + struct qed_hw_cid_data *p_rx_cid; + u16 qid, abs_rx_q_id = 0; + int rc = -EINVAL; + u8 i; + + memset(&init_data, 0, sizeof(init_data)); + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + for (i = 0; i < num_rxqs; i++) { + qid = rx_queue_id + i; + p_rx_cid = &p_hwfn->p_rx_cids[qid]; + + /* Get SPQ entry */ + init_data.cid = p_rx_cid->cid; + init_data.opaque_fid = p_rx_cid->opaque_fid; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_update; + + qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); + qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); + p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id); + p_ramrod->complete_cqe_flg = complete_cqe_flg; + p_ramrod->complete_event_flg = complete_event_flg; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + } + + return rc; +} + +int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + bool eq_completion_only, bool cqe_completion) { struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; struct rx_queue_stop_ramrod_data *p_ramrod = NULL; @@ -668,6 +708,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, u16 abs_rx_q_id = 0; int rc = -EINVAL; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion); + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_rx_cid->cid; @@ -703,15 +746,14 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, return qed_sp_release_queue_cid(p_hwfn, p_rx_cid); } -static int -qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - u32 cid, - struct qed_queue_start_common_params *p_params, - u8 stats_id, - dma_addr_t pbl_addr, - u16 pbl_size, - union qed_qm_pq_params *p_pq_params) +int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *p_params, + u8 stats_id, + dma_addr_t pbl_addr, + u16 pbl_size, + union qed_qm_pq_params *p_pq_params) { struct tx_queue_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -765,14 +807,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, u16 opaque_fid, struct qed_queue_start_common_params *p_params, dma_addr_t pbl_addr, - u16 pbl_size, - void __iomem **pp_doorbell) + u16 pbl_size, void __iomem **pp_doorbell) { struct qed_hw_cid_data *p_tx_cid; union qed_qm_pq_params pq_params; u8 abs_stats_id = 0; int rc; + if (IS_VF(p_hwfn->cdev)) { + return qed_vf_pf_txq_start(p_hwfn, + p_params->queue_id, + p_params->sb, + p_params->sb_idx, + pbl_addr, pbl_size, pp_doorbell); + } + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id); if (rc) return rc; @@ -813,14 +862,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn, return rc; } -static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, - u16 tx_queue_id) +int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) { struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id); + /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); init_data.cid = p_tx_cid->cid; @@ -1016,11 +1067,11 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, return 0; } -static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, - u16 opaque_fid, - struct qed_filter_ucast *p_filter_cmd, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) +int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct vport_filter_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -1118,7 +1169,7 @@ static inline u32 qed_crc32c_le(u32 seed, return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); } -static u8 qed_mcast_bin_from_mac(u8 *mac) +u8 qed_mcast_bin_from_mac(u8 *mac) { u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac, ETH_ALEN); @@ -1201,11 +1252,10 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, return qed_spq_post(p_hwfn, p_ent, NULL); } -static int -qed_filter_mcast_cmd(struct qed_dev *cdev, - struct qed_filter_mcast *p_filter_cmd, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) +static int qed_filter_mcast_cmd(struct qed_dev *cdev, + struct qed_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { int rc = 0; int i; @@ -1221,8 +1271,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev, u16 opaque_fid; - if (rc != 0) - break; + if (IS_VF(cdev)) { + qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); + continue; + } opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -1247,8 +1299,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u16 opaque_fid; - if (rc != 0) - break; + if (IS_VF(cdev)) { + rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); + continue; + } opaque_fid = p_hwfn->hw_info.opaque_fid; @@ -1257,6 +1311,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, p_filter_cmd, comp_mode, p_comp_data); + if (rc != 0) + break; } return rc; @@ -1265,12 +1321,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, /* Statistics related code */ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, - u32 *p_len, - u16 statistics_bin) + u32 *p_len, u16 statistics_bin) { - *p_addr = BAR0_MAP_REG_PSDM_RAM + - PSTORM_QUEUE_STAT_OFFSET(statistics_bin); - *p_len = sizeof(struct eth_pstorm_per_queue_stat); + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_pstorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.pstats.address; + *p_len = p_resp->pfdev_info.stats_info.pstats.len; + } } static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, @@ -1285,32 +1348,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, statistics_bin); memset(&pstats, 0, sizeof(pstats)); - qed_memcpy_from(p_hwfn, p_ptt, &pstats, - pstats_addr, pstats_len); - - p_stats->tx_ucast_bytes += - HILO_64_REGPAIR(pstats.sent_ucast_bytes); - p_stats->tx_mcast_bytes += - HILO_64_REGPAIR(pstats.sent_mcast_bytes); - p_stats->tx_bcast_bytes += - HILO_64_REGPAIR(pstats.sent_bcast_bytes); - p_stats->tx_ucast_pkts += - HILO_64_REGPAIR(pstats.sent_ucast_pkts); - p_stats->tx_mcast_pkts += - HILO_64_REGPAIR(pstats.sent_mcast_pkts); - p_stats->tx_bcast_pkts += - HILO_64_REGPAIR(pstats.sent_bcast_pkts); - p_stats->tx_err_drop_pkts += - HILO_64_REGPAIR(pstats.error_drop_pkts); -} - -static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn, - u32 *p_addr, - u32 *p_len) -{ - *p_addr = BAR0_MAP_REG_TSDM_RAM + - TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); - *p_len = sizeof(struct tstorm_per_port_stat); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); + + p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); + p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); } static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, @@ -1318,14 +1364,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_eth_stats *p_stats, u16 statistics_bin) { - u32 tstats_addr = 0, tstats_len = 0; struct tstorm_per_port_stat tstats; + u32 tstats_addr, tstats_len; + + if (IS_PF(p_hwfn->cdev)) { + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + tstats_len = sizeof(struct tstorm_per_port_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; - __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len); + tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; + tstats_len = p_resp->pfdev_info.stats_info.tstats.len; + } memset(&tstats, 0, sizeof(tstats)); - qed_memcpy_from(p_hwfn, p_ptt, &tstats, - tstats_addr, tstats_len); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); p_stats->mftag_filter_discards += HILO_64_REGPAIR(tstats.mftag_filter_discard); @@ -1335,12 +1390,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, - u32 *p_len, - u16 statistics_bin) + u32 *p_len, u16 statistics_bin) { - *p_addr = BAR0_MAP_REG_USDM_RAM + - USTORM_QUEUE_STAT_OFFSET(statistics_bin); - *p_len = sizeof(struct eth_ustorm_per_queue_stat); + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_ustorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.ustats.address; + *p_len = p_resp->pfdev_info.stats_info.ustats.len; + } } static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, @@ -1355,31 +1417,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, statistics_bin); memset(&ustats, 0, sizeof(ustats)); - qed_memcpy_from(p_hwfn, p_ptt, &ustats, - ustats_addr, ustats_len); - - p_stats->rx_ucast_bytes += - HILO_64_REGPAIR(ustats.rcv_ucast_bytes); - p_stats->rx_mcast_bytes += - HILO_64_REGPAIR(ustats.rcv_mcast_bytes); - p_stats->rx_bcast_bytes += - HILO_64_REGPAIR(ustats.rcv_bcast_bytes); - p_stats->rx_ucast_pkts += - HILO_64_REGPAIR(ustats.rcv_ucast_pkts); - p_stats->rx_mcast_pkts += - HILO_64_REGPAIR(ustats.rcv_mcast_pkts); - p_stats->rx_bcast_pkts += - HILO_64_REGPAIR(ustats.rcv_bcast_pkts); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); + + p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); } static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, u32 *p_addr, - u32 *p_len, - u16 statistics_bin) + u32 *p_len, u16 statistics_bin) { - *p_addr = BAR0_MAP_REG_MSDM_RAM + - MSTORM_QUEUE_STAT_OFFSET(statistics_bin); - *p_len = sizeof(struct eth_mstorm_per_queue_stat); + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_mstorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.mstats.address; + *p_len = p_resp->pfdev_info.stats_info.mstats.len; + } } static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, @@ -1394,21 +1456,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, statistics_bin); memset(&mstats, 0, sizeof(mstats)); - qed_memcpy_from(p_hwfn, p_ptt, &mstats, - mstats_addr, mstats_len); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); - p_stats->no_buff_discards += - HILO_64_REGPAIR(mstats.no_buff_discard); + p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); p_stats->packet_too_big_discard += HILO_64_REGPAIR(mstats.packet_too_big_discard); - p_stats->ttl0_discard += - HILO_64_REGPAIR(mstats.ttl0_discard); + p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); p_stats->tpa_coalesced_pkts += HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); p_stats->tpa_coalesced_events += HILO_64_REGPAIR(mstats.tpa_coalesced_events); - p_stats->tpa_aborts_num += - HILO_64_REGPAIR(mstats.tpa_aborts_num); + p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); p_stats->tpa_coalesced_bytes += HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); } @@ -1428,16 +1486,16 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, sizeof(port_stats)); p_stats->rx_64_byte_packets += port_stats.pmm.r64; - p_stats->rx_127_byte_packets += port_stats.pmm.r127; - p_stats->rx_255_byte_packets += port_stats.pmm.r255; - p_stats->rx_511_byte_packets += port_stats.pmm.r511; - p_stats->rx_1023_byte_packets += port_stats.pmm.r1023; - p_stats->rx_1518_byte_packets += port_stats.pmm.r1518; - p_stats->rx_1522_byte_packets += port_stats.pmm.r1522; - p_stats->rx_2047_byte_packets += port_stats.pmm.r2047; - p_stats->rx_4095_byte_packets += port_stats.pmm.r4095; - p_stats->rx_9216_byte_packets += port_stats.pmm.r9216; - p_stats->rx_16383_byte_packets += port_stats.pmm.r16383; + p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127; + p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255; + p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511; + p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023; + p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518; + p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522; + p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047; + p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095; + p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216; + p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383; p_stats->rx_crc_errors += port_stats.pmm.rfcs; p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; p_stats->rx_pause_frames += port_stats.pmm.rxpf; @@ -1481,44 +1539,49 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_eth_stats *stats, - u16 statistics_bin) + u16 statistics_bin, bool b_get_port_stats) { __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); - if (p_hwfn->mcp_info) + if (b_get_port_stats && p_hwfn->mcp_info) __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); } static void _qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) { - u8 fw_vport = 0; - int i; + u8 fw_vport = 0; + int i; memset(stats, 0, sizeof(*stats)); for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct qed_ptt *p_ptt; - - /* The main vport index is relative first */ - if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { - DP_ERR(p_hwfn, "No vport available!\n"); - continue; + struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) + : NULL; + + if (IS_PF(cdev)) { + /* The main vport index is relative first */ + if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { + DP_ERR(p_hwfn, "No vport available!\n"); + goto out; + } } - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) { + if (IS_PF(cdev) && !p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); continue; } - __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport); + __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, + IS_PF(cdev) ? true : false); - qed_ptt_release(p_hwfn, p_ptt); +out: + if (IS_PF(cdev) && p_ptt) + qed_ptt_release(p_hwfn, p_ptt); } } @@ -1552,10 +1615,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev) struct eth_mstorm_per_queue_stat mstats; struct eth_ustorm_per_queue_stat ustats; struct eth_pstorm_per_queue_stat pstats; - struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) + : NULL; u32 addr = 0, len = 0; - if (!p_ptt) { + if (IS_PF(cdev) && !p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); continue; } @@ -1572,7 +1636,8 @@ void qed_reset_vport_stats(struct qed_dev *cdev) __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); - qed_ptt_release(p_hwfn, p_ptt); + if (IS_PF(cdev)) + qed_ptt_release(p_hwfn, p_ptt); } /* PORT statistics are not necessarily reset, so we need to @@ -1593,32 +1658,61 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, info->num_tc = 1; - if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { - for_each_hwfn(cdev, i) - info->num_queues += FEAT_NUM(&cdev->hwfns[i], - QED_PF_L2_QUE); - if (cdev->int_params.fp_msix_cnt) - info->num_queues = min_t(u8, info->num_queues, - cdev->int_params.fp_msix_cnt); + if (IS_PF(cdev)) { + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + for_each_hwfn(cdev, i) + info->num_queues += + FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); + if (cdev->int_params.fp_msix_cnt) + info->num_queues = + min_t(u8, info->num_queues, + cdev->int_params.fp_msix_cnt); + } else { + info->num_queues = cdev->num_hwfns; + } + + info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); + ether_addr_copy(info->port_mac, + cdev->hwfns[0].hw_info.hw_mac_addr); } else { - info->num_queues = cdev->num_hwfns; - } + qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues); + if (cdev->num_hwfns > 1) { + u8 queues = 0; - info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); - ether_addr_copy(info->port_mac, - cdev->hwfns[0].hw_info.hw_mac_addr); + qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues); + info->num_queues += queues; + } + + qed_vf_get_num_vlan_filters(&cdev->hwfns[0], + &info->num_vlan_filters); + qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); + } qed_fill_dev_info(cdev, &info->common); + if (IS_VF(cdev)) + memset(info->common.hw_mac, 0, ETH_ALEN); + return 0; } static void qed_register_eth_ops(struct qed_dev *cdev, - struct qed_eth_cb_ops *ops, - void *cookie) + struct qed_eth_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.eth = ops; + cdev->ops_cookie = cookie; + + /* For VF, we start bulletin reading */ + if (IS_VF(cdev)) + qed_vf_start_iov_wq(cdev); +} + +static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) { - cdev->protocol_ops.eth = ops; - cdev->ops_cookie = cookie; + if (IS_PF(cdev)) + return true; + + return qed_vf_check_mac(&cdev->hwfns[0], mac); } static int qed_start_vport(struct qed_dev *cdev, @@ -1633,6 +1727,7 @@ static int qed_start_vport(struct qed_dev *cdev, start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : QED_TPA_MODE_NONE; start.remove_inner_vlan = params->remove_inner_vlan; + start.only_untagged = true; /* untagged only */ start.drop_ttl0 = params->drop_ttl0; start.opaque_fid = p_hwfn->hw_info.opaque_fid; start.concrete_fid = p_hwfn->hw_info.concrete_fid; @@ -1699,6 +1794,8 @@ static int qed_update_vport(struct qed_dev *cdev, params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; + sp_params.update_tx_switching_flg = params->update_tx_switching_flg; + sp_params.tx_switching_flg = params->tx_switching_flg; sp_params.accept_any_vlan = params->accept_any_vlan; sp_params.update_accept_any_vlan_flg = params->update_accept_any_vlan_flg; @@ -1744,9 +1841,7 @@ static int qed_update_vport(struct qed_dev *cdev, sp_rss_params.update_rss_capabilities = 1; sp_rss_params.update_rss_ind_table = 1; sp_rss_params.update_rss_key = 1; - sp_rss_params.rss_caps = QED_RSS_IPV4 | - QED_RSS_IPV6 | - QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP; + sp_rss_params.rss_caps = params->rss_params.rss_caps; sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ memcpy(sp_rss_params.rss_ind_table, params->rss_params.rss_ind_table, @@ -1899,6 +1994,39 @@ static int qed_stop_txq(struct qed_dev *cdev, return 0; } +static int qed_tunn_configure(struct qed_dev *cdev, + struct qed_tunn_params *tunn_params) +{ + struct qed_tunn_update_params tunn_info; + int i, rc; + + if (IS_VF(cdev)) + return 0; + + memset(&tunn_info, 0, sizeof(tunn_info)); + if (tunn_params->update_vxlan_port == 1) { + tunn_info.update_vxlan_udp_port = 1; + tunn_info.vxlan_udp_port = tunn_params->vxlan_port; + } + + if (tunn_params->update_geneve_port == 1) { + tunn_info.update_geneve_udp_port = 1; + tunn_info.geneve_udp_port = tunn_params->geneve_port; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + + rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, + QED_SPQ_MODE_EBLOCK, NULL); + + if (rc) + return rc; + } + + return 0; +} + static int qed_configure_filter_rx_mode(struct qed_dev *cdev, enum qed_filter_rx_mode_type type) { @@ -2026,10 +2154,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev, cqe); } +#ifdef CONFIG_QED_SRIOV +extern const struct qed_iov_hv_ops qed_iov_ops_pass; +#endif + static const struct qed_eth_ops qed_eth_ops_pass = { .common = &qed_common_ops_pass, +#ifdef CONFIG_QED_SRIOV + .iov = &qed_iov_ops_pass, +#endif .fill_dev_info = &qed_fill_eth_dev_info, .register_ops = &qed_register_eth_ops, + .check_mac = &qed_check_mac, .vport_start = &qed_start_vport, .vport_stop = &qed_stop_vport, .vport_update = &qed_update_vport, @@ -2041,16 +2177,11 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .fastpath_stop = &qed_fastpath_stop, .eth_cqe_completion = &qed_fp_cqe_completion, .get_vport_stats = &qed_get_vport_stats, + .tunn_config = &qed_tunn_configure, }; -const struct qed_eth_ops *qed_get_eth_ops(u32 version) +const struct qed_eth_ops *qed_get_eth_ops(void) { - if (version != QED_ETH_INTERFACE_VERSION) { - pr_notice("Cannot supply ethtool operations [%08x != %08x]\n", - version, QED_ETH_INTERFACE_VERSION); - return NULL; - } - return &qed_eth_ops_pass; } EXPORT_SYMBOL(qed_get_eth_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h new file mode 100644 index 000000000000..002114543451 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -0,0 +1,239 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ +#ifndef _QED_L2_H +#define _QED_L2_H +#include <linux/types.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/qed/qed_eth_if.h> +#include "qed.h" +#include "qed_hw.h" +#include "qed_sp.h" + +struct qed_sge_tpa_params { + u8 max_buffers_per_cqe; + + u8 update_tpa_en_flg; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + + u8 update_tpa_param_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + u8 tpa_max_aggs_num; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; +}; + +enum qed_filter_opcode { + QED_FILTER_ADD, + QED_FILTER_REMOVE, + QED_FILTER_MOVE, + QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ + QED_FILTER_FLUSH, /* Removes all filters */ +}; + +enum qed_filter_ucast_type { + QED_FILTER_MAC, + QED_FILTER_VLAN, + QED_FILTER_MAC_VLAN, + QED_FILTER_INNER_MAC, + QED_FILTER_INNER_VLAN, + QED_FILTER_INNER_PAIR, + QED_FILTER_INNER_MAC_VNI_PAIR, + QED_FILTER_MAC_VNI_PAIR, + QED_FILTER_VNI, +}; + +struct qed_filter_ucast { + enum qed_filter_opcode opcode; + enum qed_filter_ucast_type type; + u8 is_rx_filter; + u8 is_tx_filter; + u8 vport_to_add_to; + u8 vport_to_remove_from; + unsigned char mac[ETH_ALEN]; + u8 assert_on_error; + u16 vlan; + u32 vni; +}; + +struct qed_filter_mcast { + /* MOVE is not supported for multicast */ + enum qed_filter_opcode opcode; + u8 vport_to_add_to; + u8 vport_to_remove_from; + u8 num_mc_addrs; +#define QED_MAX_MC_ADDRS 64 + unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; +}; + +int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + bool eq_completion_only, bool cqe_completion); + +int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id); + +enum qed_tpa_mode { + QED_TPA_MODE_NONE, + QED_TPA_MODE_UNUSED, + QED_TPA_MODE_GRO, + QED_TPA_MODE_MAX +}; + +struct qed_sp_vport_start_params { + enum qed_tpa_mode tpa_mode; + bool remove_inner_vlan; + bool tx_switching; + bool only_untagged; + bool drop_ttl0; + u8 max_buffers_per_cqe; + u32 concrete_fid; + u16 opaque_fid; + u8 vport_id; + u16 mtu; +}; + +int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params); + +struct qed_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; + u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE]; + u32 rss_key[QED_RSS_KEY_SIZE]; +}; + +struct qed_filter_accept_flags { + u8 update_rx_mode_config; + u8 update_tx_mode_config; + u8 rx_accept_filter; + u8 tx_accept_filter; +#define QED_ACCEPT_NONE 0x01 +#define QED_ACCEPT_UCAST_MATCHED 0x02 +#define QED_ACCEPT_UCAST_UNMATCHED 0x04 +#define QED_ACCEPT_MCAST_MATCHED 0x08 +#define QED_ACCEPT_MCAST_UNMATCHED 0x10 +#define QED_ACCEPT_BCAST 0x20 +}; + +struct qed_sp_vport_update_params { + u16 opaque_fid; + u8 vport_id; + u8 update_vport_active_rx_flg; + u8 vport_active_rx_flg; + u8 update_vport_active_tx_flg; + u8 vport_active_tx_flg; + u8 update_inner_vlan_removal_flg; + u8 inner_vlan_removal_flg; + u8 silent_vlan_removal_flg; + u8 update_default_vlan_enable_flg; + u8 default_vlan_enable_flg; + u8 update_default_vlan_flg; + u16 default_vlan; + u8 update_tx_switching_flg; + u8 tx_switching_flg; + u8 update_approx_mcast_flg; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + unsigned long bins[8]; + struct qed_rss_params *rss_params; + struct qed_filter_accept_flags accept_flags; + struct qed_sge_tpa_params *sge_tpa_params; +}; + +int qed_sp_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + +/** + * @brief qed_sp_vport_stop - + * + * This ramrod closes a VPort after all its RX and TX queues are terminated. + * An Assert is generated if any queues are left open. + * + * @param p_hwfn + * @param opaque_fid + * @param vport_id VPort ID + * + * @return int + */ +int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); + +int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + +/** + * @brief qed_sp_rx_eth_queues_update - + * + * This ramrod updates an RX queue. It is used for setting the active state + * of the queue and updating the TPA and SGE parameters. + * + * @note At the moment - only used by non-linux VFs. + * + * @param p_hwfn + * @param rx_queue_id RX Queue ID + * @param num_rxqs Allow to update multiple rx + * queues, from rx_queue_id to + * (rx_queue_id + num_rxqs) + * @param complete_cqe_flg Post completion to the CQE Ring if set + * @param complete_event_flg Post completion to the Event Ring if set + * + * @return int + */ + +int +qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + +int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params); + +int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *params, + u8 stats_id, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); + +int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *p_params, + u8 stats_id, + dma_addr_t pbl_addr, + u16 pbl_size, + union qed_qm_pq_params *p_pq_params); + +u8 qed_mcast_bin_from_mac(u8 *mac); + +#endif /* _QED_L2_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 26d40db07ddd..8b22f87033ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -24,10 +24,12 @@ #include <linux/qed/qed_if.h> #include "qed.h" +#include "qed_sriov.h" #include "qed_sp.h" #include "qed_dev_api.h" #include "qed_mcp.h" #include "qed_hw.h" +#include "qed_selftest.h" static char version[] = "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; @@ -124,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev, goto err1; } - if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { DP_NOTICE(cdev, "No memory region found in bar #2\n"); rc = -EIO; goto err1; @@ -156,7 +158,7 @@ static int qed_init_pci(struct qed_dev *cdev, } cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); - if (cdev->pci_params.pm_cap == 0) + if (IS_PF(cdev) && !cdev->pci_params.pm_cap) DP_NOTICE(cdev, "Cannot find power management capability\n"); rc = qed_set_coherency_mask(cdev); @@ -174,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev, goto err2; } - cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); - cdev->db_size = pci_resource_len(cdev->pdev, 2); - cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); - if (!cdev->doorbells) { - DP_NOTICE(cdev, "Cannot map doorbell space\n"); - return -ENOMEM; + if (IS_PF(cdev)) { + cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); + cdev->db_size = pci_resource_len(cdev->pdev, 2); + cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); + if (!cdev->doorbells) { + DP_NOTICE(cdev, "Cannot map doorbell space\n"); + return -ENOMEM; + } } return 0; @@ -206,20 +210,33 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); - dev_info->fw_major = FW_MAJOR_VERSION; - dev_info->fw_minor = FW_MINOR_VERSION; - dev_info->fw_rev = FW_REVISION_VERSION; - dev_info->fw_eng = FW_ENGINEERING_VERSION; - dev_info->mf_mode = cdev->mf_mode; + if (IS_PF(cdev)) { + dev_info->fw_major = FW_MAJOR_VERSION; + dev_info->fw_minor = FW_MINOR_VERSION; + dev_info->fw_rev = FW_REVISION_VERSION; + dev_info->fw_eng = FW_ENGINEERING_VERSION; + dev_info->mf_mode = cdev->mf_mode; + dev_info->tx_switching = true; + } else { + qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, + &dev_info->fw_minor, &dev_info->fw_rev, + &dev_info->fw_eng); + } - qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); + if (IS_PF(cdev)) { + ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (ptt) { + qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, + &dev_info->mfw_rev, NULL); - ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); - if (ptt) { - qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, - &dev_info->flash_size); + qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, + &dev_info->flash_size); - qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); + qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); + } + } else { + qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, + &dev_info->mfw_rev, NULL); } return 0; @@ -256,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev, /* probing */ static struct qed_dev *qed_probe(struct pci_dev *pdev, - enum qed_protocol protocol, - u32 dp_module, - u8 dp_level) + struct qed_probe_params *params) { struct qed_dev *cdev; int rc; @@ -267,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, if (!cdev) goto err0; - cdev->protocol = protocol; + cdev->protocol = params->protocol; + + if (params->is_vf) + cdev->b_is_vf = true; - qed_init_dp(cdev, dp_module, dp_level); + qed_init_dp(cdev, params->dp_module, params->dp_level); rc = qed_init_pci(cdev, pdev); if (rc) { @@ -663,6 +681,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, return 0; } +static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) +{ + int rc; + + memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); + cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; + + qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), + &cdev->int_params.in.num_vectors); + if (cdev->num_hwfns > 1) { + u8 vectors = 0; + + qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); + cdev->int_params.in.num_vectors += vectors; + } + + /* We want a minimum of one fastpath vector per vf hwfn */ + cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; + + rc = qed_set_int_mode(cdev, true); + if (rc) + return rc; + + cdev->int_params.fp_msix_base = 0; + cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; + + return 0; +} + u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf) { @@ -744,39 +791,62 @@ static void qed_update_pf_params(struct qed_dev *cdev, static int qed_slowpath_start(struct qed_dev *cdev, struct qed_slowpath_params *params) { + struct qed_tunn_start_params tunn_info; struct qed_mcp_drv_version drv_version; const u8 *data = NULL; struct qed_hwfn *hwfn; - int rc; + int rc = -EINVAL; - rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, - &cdev->pdev->dev); - if (rc) { - DP_NOTICE(cdev, - "Failed to find fw file - /lib/firmware/%s\n", - QED_FW_FILE_NAME); + if (qed_iov_wq_start(cdev)) goto err; + + if (IS_PF(cdev)) { + rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, + &cdev->pdev->dev); + if (rc) { + DP_NOTICE(cdev, + "Failed to find fw file - /lib/firmware/%s\n", + QED_FW_FILE_NAME); + goto err; + } } rc = qed_nic_setup(cdev); if (rc) goto err; - rc = qed_slowpath_setup_int(cdev, params->int_mode); + if (IS_PF(cdev)) + rc = qed_slowpath_setup_int(cdev, params->int_mode); + else + rc = qed_slowpath_vf_setup_int(cdev); if (rc) goto err1; - /* Allocate stream for unzipping */ - rc = qed_alloc_stream_mem(cdev); - if (rc) { - DP_NOTICE(cdev, "Failed to allocate stream memory\n"); - goto err2; + if (IS_PF(cdev)) { + /* Allocate stream for unzipping */ + rc = qed_alloc_stream_mem(cdev); + if (rc) { + DP_NOTICE(cdev, "Failed to allocate stream memory\n"); + goto err2; + } + + data = cdev->firmware->data; } - /* Start the slowpath */ - data = cdev->firmware->data; + memset(&tunn_info, 0, sizeof(tunn_info)); + tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | + 1 << QED_MODE_L2GRE_TUNN | + 1 << QED_MODE_IPGRE_TUNN | + 1 << QED_MODE_L2GENEVE_TUNN | + 1 << QED_MODE_IPGENEVE_TUNN; + + tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; - rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, + /* Start the slowpath */ + rc = qed_hw_init(cdev, &tunn_info, true, + cdev->int_params.out.int_mode, true, data); if (rc) goto err2; @@ -784,18 +854,20 @@ static int qed_slowpath_start(struct qed_dev *cdev, DP_INFO(cdev, "HW initialization and function start completed successfully\n"); - hwfn = QED_LEADING_HWFN(cdev); - drv_version.version = (params->drv_major << 24) | - (params->drv_minor << 16) | - (params->drv_rev << 8) | - (params->drv_eng); - strlcpy(drv_version.name, params->name, - MCP_DRV_VER_STR_SIZE - 4); - rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, - &drv_version); - if (rc) { - DP_NOTICE(cdev, "Failed sending drv version command\n"); - return rc; + if (IS_PF(cdev)) { + hwfn = QED_LEADING_HWFN(cdev); + drv_version.version = (params->drv_major << 24) | + (params->drv_minor << 16) | + (params->drv_rev << 8) | + (params->drv_eng); + strlcpy(drv_version.name, params->name, + MCP_DRV_VER_STR_SIZE - 4); + rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, + &drv_version); + if (rc) { + DP_NOTICE(cdev, "Failed sending drv version command\n"); + return rc; + } } qed_reset_vport_stats(cdev); @@ -804,13 +876,17 @@ static int qed_slowpath_start(struct qed_dev *cdev, err2: qed_hw_timers_stop_all(cdev); - qed_slowpath_irq_free(cdev); + if (IS_PF(cdev)) + qed_slowpath_irq_free(cdev); qed_free_stream_mem(cdev); qed_disable_msix(cdev); err1: qed_resc_free(cdev); err: - release_firmware(cdev->firmware); + if (IS_PF(cdev)) + release_firmware(cdev->firmware); + + qed_iov_wq_stop(cdev, false); return rc; } @@ -820,15 +896,21 @@ static int qed_slowpath_stop(struct qed_dev *cdev) if (!cdev) return -ENODEV; - qed_free_stream_mem(cdev); + if (IS_PF(cdev)) { + qed_free_stream_mem(cdev); + qed_sriov_disable(cdev, true); - qed_nic_stop(cdev); - qed_slowpath_irq_free(cdev); + qed_nic_stop(cdev); + qed_slowpath_irq_free(cdev); + } qed_disable_msix(cdev); qed_nic_reset(cdev); - release_firmware(cdev->firmware); + qed_iov_wq_stop(cdev, true); + + if (IS_PF(cdev)) + release_firmware(cdev->firmware); return 0; } @@ -902,6 +984,11 @@ static u32 qed_sb_release(struct qed_dev *cdev, return rc; } +static bool qed_can_link_change(struct qed_dev *cdev) +{ + return true; +} + static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) { @@ -913,6 +1000,9 @@ static int qed_set_link(struct qed_dev *cdev, if (!cdev) return -ENODEV; + if (IS_VF(cdev)) + return 0; + /* The link should be set only once per PF */ hwfn = &cdev->hwfns[0]; @@ -944,6 +1034,39 @@ static int qed_set_link(struct qed_dev *cdev, } if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) link_params->speed.forced_speed = params->forced_speed; + if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { + if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + link_params->pause.autoneg = true; + else + link_params->pause.autoneg = false; + if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) + link_params->pause.forced_rx = true; + else + link_params->pause.forced_rx = false; + if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) + link_params->pause.forced_tx = true; + else + link_params->pause.forced_tx = false; + } + if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { + switch (params->loopback_mode) { + case QED_LINK_LOOPBACK_INT_PHY: + link_params->loopback_mode = PMM_LOOPBACK_INT_PHY; + break; + case QED_LINK_LOOPBACK_EXT_PHY: + link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY; + break; + case QED_LINK_LOOPBACK_EXT: + link_params->loopback_mode = PMM_LOOPBACK_EXT; + break; + case QED_LINK_LOOPBACK_MAC: + link_params->loopback_mode = PMM_LOOPBACK_MAC; + break; + default: + link_params->loopback_mode = PMM_LOOPBACK_NONE; + break; + } + } rc = qed_mcp_set_link(hwfn, ptt, params->link_up); @@ -991,10 +1114,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn, memset(if_link, 0, sizeof(*if_link)); /* Prepare source inputs */ - memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); - memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); - memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), - sizeof(link_caps)); + if (IS_PF(hwfn->cdev)) { + memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); + memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), + sizeof(link_caps)); + } else { + qed_vf_get_link_params(hwfn, ¶ms); + qed_vf_get_link_state(hwfn, &link); + qed_vf_get_link_caps(hwfn, &link_caps); + } /* Set the link parameters to pass to protocol driver */ if (link.link_up) @@ -1096,7 +1225,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn, static void qed_get_current_link(struct qed_dev *cdev, struct qed_link_output *if_link) { + int i; + qed_fill_link(&cdev->hwfns[0], if_link); + + for_each_hwfn(cdev, i) + qed_inform_vf_link_state(&cdev->hwfns[i]); } void qed_link_update(struct qed_hwfn *hwfn) @@ -1106,6 +1240,7 @@ void qed_link_update(struct qed_hwfn *hwfn) struct qed_link_output if_link; qed_fill_link(hwfn, &if_link); + qed_inform_vf_link_state(hwfn); if (IS_LEAD_HWFN(hwfn) && cookie) op->link_update(cookie, &if_link); @@ -1117,6 +1252,9 @@ static int qed_drain(struct qed_dev *cdev) struct qed_ptt *ptt; int i, rc; + if (IS_VF(cdev)) + return 0; + for_each_hwfn(cdev, i) { hwfn = &cdev->hwfns[i]; ptt = qed_ptt_acquire(hwfn); @@ -1150,7 +1288,15 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) return status; } +struct qed_selftest_ops qed_selftest_ops_pass = { + .selftest_memory = &qed_selftest_memory, + .selftest_interrupt = &qed_selftest_interrupt, + .selftest_register = &qed_selftest_register, + .selftest_clock = &qed_selftest_clock, +}; + const struct qed_common_ops qed_common_ops_pass = { + .selftest = &qed_selftest_ops_pass, .probe = &qed_probe, .remove = &qed_remove, .set_power_state = &qed_set_power_state, @@ -1164,6 +1310,7 @@ const struct qed_common_ops qed_common_ops_pass = { .sb_release = &qed_sb_release, .simd_handler_config = &qed_simd_handler_config, .simd_handler_clean = &qed_simd_handler_clean, + .can_link_change = &qed_can_link_change, .set_link = &qed_set_link, .get_link = &qed_get_current_link, .drain = &qed_drain, @@ -1172,14 +1319,3 @@ const struct qed_common_ops qed_common_ops_pass = { .chain_free = &qed_chain_free, .set_led = &qed_set_led, }; - -u32 qed_get_protocol_version(enum qed_protocol protocol) -{ - switch (protocol) { - case QED_PROTOCOL_ETH: - return QED_ETH_INTERFACE_VERSION; - default: - return 0; - } -} -EXPORT_SYMBOL(qed_get_protocol_version); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index b89c9a8e1655..1182361798b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -15,10 +15,13 @@ #include <linux/spinlock.h> #include <linux/string.h> #include "qed.h" +#include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" +#include "qed_sriov.h" + #define CHIP_MCP_RESP_ITER_US 10 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ @@ -440,6 +443,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 path_addr = SECTION_ADDR(mfw_path_offsize, + QED_PATH_ID(p_hwfn)); + u32 disabled_vfs[VF_MAX_STATIC / 32]; + int i; + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Reading Disabled VF information from [offset %08x], path_addr %08x\n", + mfw_path_offsize, path_addr); + + for (i = 0; i < (VF_MAX_STATIC / 32); i++) { + disabled_vfs[i] = qed_rd(p_hwfn, p_ptt, + path_addr + + offsetof(struct public_path, + mcp_vf_disabled) + + sizeof(u32) * i); + DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), + "FLR-ed VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); + } + + if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs)) + qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG); +} + +int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *vfs_to_ack) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_func_offsize, + MCP_PF_ID(p_hwfn)); + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + int rc; + int i; + + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), + "Acking VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; + memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8); + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); + return -EBUSY; + } + + /* Clear the ACK bits */ + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + qed_wr(p_hwfn, p_ptt, + func_addr + + offsetof(struct public_func, drv_ack_vf_disabled) + + i * sizeof(u32), 0); + + return rc; +} + static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -472,6 +544,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, bool b_reset) { struct qed_mcp_link_state *p_link; + u8 max_bw, min_bw; u32 status = 0; p_link = &p_hwfn->mcp_info->link_output; @@ -527,17 +600,20 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, p_link->speed = 0; } - /* Correct speed according to bandwidth allocation */ - if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) { - p_link->speed = p_link->speed * - p_hwfn->mcp_info->func_info.bandwidth_max / - 100; - qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, - p_link->speed); - DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Configured MAX bandwidth to be %08x Mb/sec\n", - p_link->speed); - } + if (p_link->link_up && p_link->speed) + p_link->line_speed = p_link->speed; + else + p_link->line_speed = 0; + + max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; + min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; + + /* Max bandwidth configuration */ + __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); + + /* Min bandwidth configuration */ + __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); + qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate); p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); p_link->an_complete = !!(status & @@ -648,6 +724,77 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, return 0; } +static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, + struct public_func *p_shmem_info) +{ + struct qed_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + + p_info->bandwidth_min = (p_shmem_info->config & + FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT; + if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + p_info->bandwidth_min); + p_info->bandwidth_min = 1; + } + + p_info->bandwidth_max = (p_shmem_info->config & + FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + p_info->bandwidth_max); + p_info->bandwidth_max = 100; + } +} + +static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct public_func *p_data, + int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + u32 i, size; + + memset(p_data, 0, sizeof(*p_data)); + + size = min_t(u32, sizeof(*p_data), + QED_SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + return size; +} + +static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_function_info *p_info; + struct public_func shmem_info; + u32 resp = 0, param = 0; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + + qed_read_pf_bandwidth(p_hwfn, &shmem_info); + + p_info = &p_hwfn->mcp_info->func_info; + + qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min); + qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); + + /* Acknowledge the MFW */ + qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, + ¶m); +} + int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -676,9 +823,27 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_LINK_CHANGE: qed_mcp_handle_link_change(p_hwfn, p_ptt, false); break; + case MFW_DRV_MSG_VF_DISABLED: + qed_mcp_handle_vf_flr(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_LLDP_DATA_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_REMOTE_LLDP_MIB); + break; + case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_REMOTE_MIB); + break; + case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_OPERATIONAL_MIB); + break; case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_BW_UPDATE: + qed_mcp_update_bw(p_hwfn, p_ptt); + break; default: DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); rc = -EINVAL; @@ -709,26 +874,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, return rc; } -int qed_mcp_get_mfw_ver(struct qed_dev *cdev, - u32 *p_mfw_ver) +int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_mfw_ver, u32 *p_running_bundle_id) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; - struct qed_ptt *p_ptt; u32 global_offsize; - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) - return -EBUSY; + if (IS_VF(p_hwfn->cdev)) { + if (p_hwfn->vf_iov_info) { + struct pfvf_acquire_resp_tlv *p_resp; + + p_resp = &p_hwfn->vf_iov_info->acquire_resp; + *p_mfw_ver = p_resp->pfdev_info.mfw_ver; + return 0; + } else { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF requested MFW version prior to ACQUIRE\n"); + return -EINVAL; + } + } global_offsize = qed_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> - public_base, + SECTION_OFFSIZE_ADDR(p_hwfn-> + mcp_info->public_base, PUBLIC_GLOBAL)); - *p_mfw_ver = qed_rd(p_hwfn, p_ptt, - SECTION_ADDR(global_offsize, 0) + - offsetof(struct public_global, mfw_ver)); - - qed_ptt_release(p_hwfn, p_ptt); + *p_mfw_ver = + qed_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, + 0) + offsetof(struct public_global, mfw_ver)); + + if (p_running_bundle_id != NULL) { + *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, 0) + + offsetof(struct public_global, + running_bundle_id)); + } return 0; } @@ -739,6 +920,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_ptt *p_ptt; + if (IS_VF(cdev)) + return -EINVAL; + if (!qed_mcp_is_init(p_hwfn)) { DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); return -EBUSY; @@ -758,28 +942,6 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, return 0; } -static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - struct public_func *p_data, - int pfid) -{ - u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, - PUBLIC_FUNC); - u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); - u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); - u32 i, size; - - memset(p_data, 0, sizeof(*p_data)); - - size = min_t(u32, sizeof(*p_data), - QED_SECTION_SIZE(mfw_path_offsize)); - for (i = 0; i < size / sizeof(u32); i++) - ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, - func_addr + (i << 2)); - - return size; -} - static int qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, struct public_func *p_info, @@ -818,26 +980,7 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, return -EINVAL; } - - info->bandwidth_min = (shmem_info.config & - FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; - if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { - DP_INFO(p_hwfn, - "bandwidth minimum out of bounds [%02x]. Set to 1\n", - info->bandwidth_min); - info->bandwidth_min = 1; - } - - info->bandwidth_max = (shmem_info.config & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { - DP_INFO(p_hwfn, - "bandwidth maximum out of bounds [%02x]. Set to 100\n", - info->bandwidth_max); - info->bandwidth_max = 100; - } + qed_read_pf_bandwidth(p_hwfn, &shmem_info); if (shmem_info.mac_upper || shmem_info.mac_lower) { info->mac[0] = (u8)(shmem_info.mac_upper >> 8); @@ -914,6 +1057,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, { u32 flash_size; + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; @@ -924,6 +1070,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num) +{ + u32 resp = 0, param = 0, rc_param = 0; + int rc; + + /* Only Leader can configure MSIX, and need to take CMT into account */ + if (!IS_LEAD_HWFN(p_hwfn)) + return 0; + num *= p_hwfn->cdev->num_hwfns; + + param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & + DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; + param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & + DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, + &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { + DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id); + rc = -EINVAL; + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", + num, vf_id); + } + + return rc; +} + int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -938,9 +1115,10 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, p_drv_version = &union_data.drv_version; p_drv_version->version = p_ver->version; + for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { val = cpu_to_be32(p_ver->name[i]); - *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val; + *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val; } memset(&mb_params, 0, sizeof(mb_params)); @@ -979,3 +1157,45 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return rc; } + +int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 drv_mb_param = 0, rsp, param; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, ¶m); + + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (param != DRV_MB_PARAM_BIST_RC_PASSED)) + rc = -EAGAIN; + + return rc; +} + +int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 drv_mb_param, rsp, param; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, ¶m); + + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (param != DRV_MB_PARAM_BIST_RC_PASSED)) + rc = -EAGAIN; + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 50917a2131a5..6dd59eb7f4c6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -40,7 +40,15 @@ struct qed_mcp_link_capabilities { struct qed_mcp_link_state { bool link_up; - u32 speed; /* In Mb/s */ + u32 min_pf_rate; + + /* Actual link speed in Mb/s */ + u32 line_speed; + + /* PF max speed in Mb/s, deduced from line_speed + * according to PF max bandwidth configuration. + */ + u32 speed; bool full_duplex; bool an; @@ -141,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, /** * @brief Get the management firmware version value * - * @param cdev - qed dev pointer - * @param mfw_ver - mfw version value + * @param p_hwfn + * @param p_ptt + * @param p_mfw_ver - mfw version value + * @param p_running_bundle_id - image id in nvram; Optional. * - * @return int - 0 - operation was successul. + * @return int - 0 - operation was successful. */ -int qed_mcp_get_mfw_ver(struct qed_dev *cdev, - u32 *mfw_ver); +int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_mfw_ver, u32 *p_running_bundle_id); /** * @brief Get media type value of the port. @@ -237,6 +248,28 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode); +/** + * @brief Bist register test + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * @brief Bist clock test + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * + * @return int - 0 - operation was successful. + */ +int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + /* Using hwfn number (and not pf_num) is required since in CMT mode, * same pf_num may be used by two different hwfn * TODO - this shouldn't really be in .h file, but until all fields @@ -360,6 +393,18 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** + * @brief Ack to mfw that driver finished FLR process for VFs + * + * @param p_hwfn + * @param p_ptt + * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * + * @param return int - 0 upon success. + */ +int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *vfs_to_ack); + +/** * @brief - calls during init to read shmem of all function-related info. * * @param p_hwfn @@ -389,4 +434,27 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); +/** + * @brief request MFW to configure MSI-X for a VF + * + * @param p_hwfn + * @param p_ptt + * @param vf_id - absolute inside engine + * @param num_sbs - number of entries to request + * + * @return int + */ +int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num); + +int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw); +int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); +int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 max_bw); +int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 min_bw); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index c15b1622e636..3a6c506f0d71 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -39,6 +39,10 @@ 0x2aae04UL #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ 0x2aa16cUL +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \ + 0x2aa118UL +#define PSWHST_REG_ZONE_PERMISSION_TABLE \ + 0x2a0800UL #define BAR0_MAP_REG_MSDM_RAM \ 0x1d00000UL #define BAR0_MAP_REG_USDM_RAM \ @@ -77,6 +81,8 @@ 0x2f2eb0UL #define DORQ_REG_PF_DB_ENABLE \ 0x100508UL +#define DORQ_REG_VF_USAGE_CNT \ + 0x1009c4UL #define QM_REG_PF_EN \ 0x2f2ea4UL #define TCFC_REG_STRONG_ENABLE_PF \ @@ -111,6 +117,8 @@ 0x009778UL #define MISCS_REG_CHIP_METAL \ 0x009774UL +#define MISCS_REG_FUNCTION_HIDE \ + 0x0096f0UL #define BRB_REG_HEADER_SIZE \ 0x340804UL #define BTB_REG_HEADER_SIZE \ @@ -119,6 +127,8 @@ 0x1c0708UL #define CCFC_REG_ACTIVITY_COUNTER \ 0x2e8800UL +#define CCFC_REG_STRONG_ENABLE_VF \ + 0x2e070cUL #define CDU_REG_CID_ADDR_PARAMS \ 0x580900UL #define DBG_REG_CLIENT_ENABLE \ @@ -161,6 +171,10 @@ 0x040200UL #define PBF_REG_INIT \ 0xd80000UL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \ + 0xd806c8UL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \ + 0xd806ccUL #define PTU_REG_ATC_INIT_ARRAY \ 0x560000UL #define PCM_REG_INIT \ @@ -385,6 +399,8 @@ 0x1d0000UL #define IGU_REG_PF_CONFIGURATION \ 0x180800UL +#define IGU_REG_VF_CONFIGURATION \ + 0x180804UL #define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ 0x00849cUL #define MISC_REG_AEU_AFTER_INVERT_1_IGU \ @@ -411,6 +427,10 @@ 0x1 << 0) #define IGU_REG_MAPPING_MEMORY \ 0x184000UL +#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \ + 0x180408UL +#define IGU_REG_WRITE_DONE_PENDING \ + 0x180900UL #define MISCS_REG_GENERIC_POR_0 \ 0x0096d4UL #define MCP_REG_NVM_CFG4 \ @@ -427,4 +447,37 @@ 0x2aae60UL #define PGLUE_B_REG_PF_BAR1_SIZE \ 0x2aae64UL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_GRE_PROTOCOL 0x1f0734UL +#define PRS_REG_VXLAN_PORT 0x1f0738UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL + +#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) +#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1) +#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1 +#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2) +#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2 + +#define NIG_REG_VXLAN_PORT 0x50105cUL +#define PBF_REG_VXLAN_PORT 0xd80518UL +#define PBF_REG_NGE_PORT 0xd8051cUL +#define PRS_REG_NGE_PORT 0x1f086cUL +#define NIG_REG_NGE_PORT 0x508b38UL + +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL +#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL + +#define NIG_REG_NGE_IP_ENABLE 0x508b28UL +#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL +#define NIG_REG_NGE_COMP_VER 0x508b30UL +#define PBF_REG_NGE_COMP_VER 0xd80524UL +#define PRS_REG_NGE_COMP_VER 0x1f0878UL + +#define QM_REG_WFQPFWEIGHT 0x2f4e80UL +#define QM_REG_WFQVPWEIGHT 0x2fa000UL #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c new file mode 100644 index 000000000000..a342bfe4280d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -0,0 +1,76 @@ +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +int qed_selftest_memory(struct qed_dev *cdev) +{ + int rc = 0, i; + + for_each_hwfn(cdev, i) { + rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]); + if (rc) + return rc; + } + + return rc; +} + +int qed_selftest_interrupt(struct qed_dev *cdev) +{ + int rc = 0, i; + + for_each_hwfn(cdev, i) { + rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]); + if (rc) + return rc; + } + + return rc; +} + +int qed_selftest_register(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0, i; + + /* although performed by MCP, this test is per engine */ + for_each_hwfn(cdev, i) { + p_hwfn = &cdev->hwfns[i]; + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + rc = qed_mcp_bist_register_test(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); + if (rc) + break; + } + + return rc; +} + +int qed_selftest_clock(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0, i; + + /* although performed by MCP, this test is per engine */ + for_each_hwfn(cdev, i) { + p_hwfn = &cdev->hwfns[i]; + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); + if (rc) + break; + } + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h new file mode 100644 index 000000000000..50eb0b49950f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -0,0 +1,40 @@ +#ifndef _QED_SELFTEST_API_H +#define _QED_SELFTEST_API_H +#include <linux/types.h> + +/** + * @brief qed_selftest_memory - Perform memory test + * + * @param cdev + * + * @return int + */ +int qed_selftest_memory(struct qed_dev *cdev); + +/** + * @brief qed_selftest_interrupt - Perform interrupt test + * + * @param cdev + * + * @return int + */ +int qed_selftest_interrupt(struct qed_dev *cdev); + +/** + * @brief qed_selftest_register - Perform register test + * + * @param cdev + * + * @return int + */ +int qed_selftest_register(struct qed_dev *cdev); + +/** + * @brief qed_selftest_clock - Perform clock test + * + * @param cdev + * + * @return int + */ +int qed_selftest_clock(struct qed_dev *cdev); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index d39f914b66ee..ea4e9ce53e0a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, union ramrod_data { struct pf_start_ramrod_data pf_start; + struct pf_update_ramrod_data pf_update; struct rx_queue_start_ramrod_data rx_queue_start; struct rx_queue_update_ramrod_data rx_queue_update; struct rx_queue_stop_ramrod_data rx_queue_stop; @@ -61,6 +62,9 @@ union ramrod_data { struct vport_stop_ramrod_data vport_stop; struct vport_update_ramrod_data vport_update; struct vport_filter_update_ramrod_data vport_filter_update; + + struct vf_start_ramrod_data vf_start; + struct vf_stop_ramrod_data vf_stop; }; #define EQ_MAX_CREDIT 0xffffffff @@ -338,13 +342,29 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * to the internal RAM of the UStorm by the Function Start Ramrod. * * @param p_hwfn + * @param p_tunn * @param mode + * @param allow_npar_tx_switch * * @return int */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - enum qed_mf_mode mode); + struct qed_tunn_start_params *p_tunn, + enum qed_mf_mode mode, bool allow_npar_tx_switch); + +/** + * @brief qed_sp_pf_update - PF Function Update Ramrod + * + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. + * + * @param p_hwfn + * + * @return int + */ + +int qed_sp_pf_update(struct qed_hwfn *p_hwfn); /** * @brief qed_sp_pf_stop - PF Function Stop Ramrod @@ -362,4 +382,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); +int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, + struct qed_tunn_update_params *p_tunn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); +/** + * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod + * + * @param p_hwfn + * + * @return int + */ + +int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); + #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 1c06c37d4c3d..67f6ce3c84c8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -15,11 +15,13 @@ #include "qed.h" #include <linux/qed/qed_chain.h> #include "qed_cxt.h" +#include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, @@ -87,8 +89,218 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, return 0; } +static enum tunnel_clss qed_tunn_get_clss_type(u8 type) +{ + switch (type) { + case QED_TUNN_CLSS_MAC_VLAN: + return TUNNEL_CLSS_MAC_VLAN; + case QED_TUNN_CLSS_MAC_VNI: + return TUNNEL_CLSS_MAC_VNI; + case QED_TUNN_CLSS_INNER_MAC_VLAN: + return TUNNEL_CLSS_INNER_MAC_VLAN; + case QED_TUNN_CLSS_INNER_MAC_VNI: + return TUNNEL_CLSS_INNER_MAC_VNI; + default: + return TUNNEL_CLSS_MAC_VLAN; + } +} + +static void +qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn, + struct qed_tunn_update_params *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode; + unsigned long update_mask = p_src->tunn_mode_update_mask; + unsigned long tunn_mode = p_src->tunn_mode; + unsigned long new_tunn_mode = 0; + + if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) { + if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) + __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode); + } + + if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) { + if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) + __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode); + } + + if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) { + if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) + __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode); + } + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + cpu_to_le16(p_src->geneve_udp_port); + } + + if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) { + if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) + __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode); + } + + if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) { + if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) + __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); + } else { + if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode)) + __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode); + } + + p_src->tunn_mode = new_tunn_mode; +} + +static void +qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, + struct qed_tunn_update_params *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + unsigned long tunn_mode = p_src->tunn_mode; + enum tunnel_clss type; + + qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); + p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss; + p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); + p_tunn_cfg->tunnel_clss_vxlan = type; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); + p_tunn_cfg->tunnel_clss_l2gre = type; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); + p_tunn_cfg->tunnel_clss_ipgre = type; + + if (p_src->update_vxlan_udp_port) { + p_tunn_cfg->set_vxlan_udp_port_flg = 1; + p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port); + } + + if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2gre = 1; + + if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgre = 1; + + if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_vxlan = 1; + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + cpu_to_le16(p_src->geneve_udp_port); + } + + if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2geneve = 1; + + if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgeneve = 1; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve); + p_tunn_cfg->tunnel_clss_l2geneve = type; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); + p_tunn_cfg->tunnel_clss_ipgeneve = type; +} + +static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + unsigned long tunn_mode) +{ + u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0; + u8 l2geneve_enable = 0, ipgeneve_enable = 0; + + if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) + l2gre_enable = 1; + + if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) + ipgre_enable = 1; + + if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) + vxlan_enable = 1; + + qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable); + qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable); + + if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) + l2geneve_enable = 1; + + if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) + ipgeneve_enable = 1; + + qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable, + ipgeneve_enable); +} + +static void +qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, + struct qed_tunn_start_params *p_src, + struct pf_start_tunnel_config *p_tunn_cfg) +{ + unsigned long tunn_mode; + enum tunnel_clss type; + + if (!p_src) + return; + + tunn_mode = p_src->tunn_mode; + type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan); + p_tunn_cfg->tunnel_clss_vxlan = type; + type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre); + p_tunn_cfg->tunnel_clss_l2gre = type; + type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre); + p_tunn_cfg->tunnel_clss_ipgre = type; + + if (p_src->update_vxlan_udp_port) { + p_tunn_cfg->set_vxlan_udp_port_flg = 1; + p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port); + } + + if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2gre = 1; + + if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgre = 1; + + if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_vxlan = 1; + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + cpu_to_le16(p_src->geneve_udp_port); + } + + if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2geneve = 1; + + if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgeneve = 1; + + type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve); + p_tunn_cfg->tunnel_clss_l2geneve = type; + type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); + p_tunn_cfg->tunnel_clss_ipgeneve = type; +} + int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - enum qed_mf_mode mode) + struct qed_tunn_start_params *p_tunn, + enum qed_mf_mode mode, bool allow_npar_tx_switch) { struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); @@ -143,16 +355,103 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, p_hwfn->p_consq->chain.pbl.p_phys_table); + qed_tunn_set_pf_start_params(p_hwfn, p_tunn, + &p_ramrod->tunnel_config); p_hwfn->hw_info.personality = PERSONALITY_ETH; + if (IS_MF_SI(p_hwfn)) + p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; + + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf; + p_ramrod->num_vfs = (u8) p_iov->total_vfs; + } + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, p_ramrod->outer_tag); + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + if (p_tunn) { + qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->tunn_mode); + p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; + } + + return rc; +} + +int qed_sp_pf_update(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, + &p_ent->ramrod.pf_update); + return qed_spq_post(p_hwfn, p_ent, NULL); } +/* Set pf update ramrod command params */ +int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, + struct qed_tunn_update_params *p_tunn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + qed_tunn_set_pf_update_params(p_hwfn, p_tunn, + &p_ent->ramrod.pf_update.tunnel_config); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + if (p_tunn->update_vxlan_udp_port) + qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->vxlan_udp_port); + if (p_tunn->update_geneve_udp_port) + qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->geneve_udp_port); + + qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode); + p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode; + + return rc; +} + int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) { struct qed_spq_entry *p_ent = NULL; @@ -173,3 +472,24 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) return qed_spq_post(p_hwfn, p_ent, NULL); } + +int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 89469d5aae25..acac6626a1b2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -27,6 +27,7 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" /*************************************************************************** * Structures & Definitions @@ -242,10 +243,17 @@ static int qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { - DP_NOTICE(p_hwfn, - "Unknown Async completion for protocol: %d\n", - p_eqe->protocol_id); - return -EINVAL; + switch (p_eqe->protocol_id) { + case PROTOCOLID_COMMON: + return qed_sriov_eqe_event(p_hwfn, + p_eqe->opcode, + p_eqe->echo, &p_eqe->data); + default: + DP_NOTICE(p_hwfn, + "Unknown Async completion for protocol: %d\n", + p_eqe->protocol_id); + return -EINVAL; + } } /*************************************************************************** @@ -379,6 +387,9 @@ static int qed_cqe_completion( struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) { + if (IS_VF(p_hwfn->cdev)) + return 0; + /* @@@tmp - it's possible we'll eventually want to handle some * actual commands that can arrive here, but for now this is only * used to complete the ramrod using the echo value on the cqe diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c new file mode 100644 index 000000000000..c325ee857ecd --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -0,0 +1,3613 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/etherdevice.h> +#include <linux/crc32.h> +#include <linux/qed/qed_iov_if.h> +#include "qed_cxt.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_vf.h" + +/* IOV ramrods */ +static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid) +{ + struct vf_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_vfid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_START, + PROTOCOLID_COMMON, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vf_start; + + p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); + p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid); + + p_ramrod->personality = PERSONALITY_ETH; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid) +{ + struct vf_stop_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_vfid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_STOP, + PROTOCOLID_COMMON, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vf_stop; + + p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, bool b_enabled_only) +{ + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->cdev, "No iov info\n"); + return false; + } + + if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || + (rel_vf_id < 0)) + return false; + + if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && + b_enabled_only) + return false; + + return true; +} + +static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct qed_vf_info *vf = NULL; + + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->cdev, "No iov info\n"); + return NULL; + } + + if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) + vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; + else + DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n", + relative_vf_id); + + return vf; +} + +int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, + int vfid, struct qed_ptt *p_ptt) +{ + struct qed_bulletin_content *p_bulletin; + int crc_size = sizeof(p_bulletin->crc); + struct qed_dmae_params params; + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!p_vf) + return -EINVAL; + + if (!p_vf->vf_bulletin) + return -EINVAL; + + p_bulletin = p_vf->bulletin.p_virt; + + /* Increment bulletin board version and compute crc */ + p_bulletin->version++; + p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, + p_vf->bulletin.size - crc_size); + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", + p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); + + /* propagate bulletin board via dmae to vm memory */ + memset(¶ms, 0, sizeof(params)); + params.flags = QED_DMAE_FLAG_VF_DST; + params.dst_vfid = p_vf->abs_vf_id; + return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, + p_vf->vf_bulletin, p_vf->bulletin.size / 4, + ¶ms); +} + +static int qed_iov_pci_cfg_info(struct qed_dev *cdev) +{ + struct qed_hw_sriov_info *iov = cdev->p_iov_info; + int pos = iov->pos; + + DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); + pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); + + pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); + if (iov->num_vfs) { + DP_VERBOSE(cdev, + QED_MSG_IOV, + "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); + iov->num_vfs = 0; + } + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); + + pci_read_config_dword(cdev->pdev, + pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + + pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); + + pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + DP_VERBOSE(cdev, + QED_MSG_IOV, + "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", + iov->nres, + iov->cap, + iov->ctrl, + iov->total_vfs, + iov->initial_vfs, + iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); + + /* Some sanity checks */ + if (iov->num_vfs > NUM_OF_VFS(cdev) || + iov->total_vfs > NUM_OF_VFS(cdev)) { + /* This can happen only due to a bug. In this case we set + * num_vfs to zero to avoid memory corruption in the code that + * assumes max number of vfs + */ + DP_NOTICE(cdev, + "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", + iov->num_vfs); + + iov->num_vfs = 0; + iov->total_vfs = 0; + } + + return 0; +} + +static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_igu_block *p_sb; + u16 sb_id; + u32 val; + + if (!p_hwfn->hw_info.p_igu_info) { + DP_ERR(p_hwfn, + "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n"); + return; + } + + for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + sb_id++) { + p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; + if ((p_sb->status & QED_IGU_STATUS_FREE) && + !(p_sb->status & QED_IGU_STATUS_PF)) { + val = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sb_id * 4); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); + } + } +} + +static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + struct qed_bulletin_content *p_bulletin_virt; + dma_addr_t req_p, rply_p, bulletin_p; + union pfvf_tlvs *p_reply_virt_addr; + union vfpf_tlvs *p_req_virt_addr; + u8 idx = 0; + + memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); + + p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; + req_p = p_iov_info->mbx_msg_phys_addr; + p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; + rply_p = p_iov_info->mbx_reply_phys_addr; + p_bulletin_virt = p_iov_info->p_bulletins; + bulletin_p = p_iov_info->bulletins_phys; + if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { + DP_ERR(p_hwfn, + "qed_iov_setup_vfdb called without allocating mem first\n"); + return; + } + + for (idx = 0; idx < p_iov->total_vfs; idx++) { + struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; + u32 concrete; + + vf->vf_mbx.req_virt = p_req_virt_addr + idx; + vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); + vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; + vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); + + vf->state = VF_STOPPED; + vf->b_init = false; + + vf->bulletin.phys = idx * + sizeof(struct qed_bulletin_content) + + bulletin_p; + vf->bulletin.p_virt = p_bulletin_virt + idx; + vf->bulletin.size = sizeof(struct qed_bulletin_content); + + vf->relative_vf_id = idx; + vf->abs_vf_id = idx + p_iov->first_vf_in_pf; + concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); + vf->concrete_fid = concrete; + vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | + (vf->abs_vf_id << 8); + vf->vport_id = idx + 1; + } +} + +static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + void **p_v_addr; + u16 num_vfs = 0; + + num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "qed_iov_allocate_vfdb for %d VFs\n", num_vfs); + + /* Allocate PF Mailbox buffer (per-VF) */ + p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_msg_virt_addr; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_msg_size, + &p_iov_info->mbx_msg_phys_addr, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + /* Allocate PF Mailbox Reply buffer (per-VF) */ + p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_reply_virt_addr; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_reply_size, + &p_iov_info->mbx_reply_phys_addr, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * + num_vfs; + p_v_addr = &p_iov_info->p_bulletins; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->bulletins_size, + &p_iov_info->bulletins_phys, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", + p_iov_info->mbx_msg_virt_addr, + (u64) p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_reply_virt_addr, + (u64) p_iov_info->mbx_reply_phys_addr, + p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys); + + return 0; +} + +static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + + if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_msg_size, + p_iov_info->mbx_msg_virt_addr, + p_iov_info->mbx_msg_phys_addr); + + if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_reply_size, + p_iov_info->mbx_reply_virt_addr, + p_iov_info->mbx_reply_phys_addr); + + if (p_iov_info->p_bulletins) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->bulletins_size, + p_iov_info->p_bulletins, + p_iov_info->bulletins_phys); +} + +int qed_iov_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_sriov; + + if (!IS_PF_SRIOV(p_hwfn)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No SR-IOV - no need for IOV db\n"); + return 0; + } + + p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); + if (!p_sriov) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n"); + return -ENOMEM; + } + + p_hwfn->pf_iov_info = p_sriov; + + return qed_iov_allocate_vfdb(p_hwfn); +} + +void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) + return; + + qed_iov_setup_vfdb(p_hwfn); + qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); +} + +void qed_iov_free(struct qed_hwfn *p_hwfn) +{ + if (IS_PF_SRIOV_ALLOC(p_hwfn)) { + qed_iov_free_vfdb(p_hwfn); + kfree(p_hwfn->pf_iov_info); + } +} + +void qed_iov_free_hw_info(struct qed_dev *cdev) +{ + kfree(cdev->p_iov_info); + cdev->p_iov_info = NULL; +} + +int qed_iov_hw_info(struct qed_hwfn *p_hwfn) +{ + struct qed_dev *cdev = p_hwfn->cdev; + int pos; + int rc; + + if (IS_VF(p_hwfn->cdev)) + return 0; + + /* Learn the PCI configuration */ + pos = pci_find_ext_capability(p_hwfn->cdev->pdev, + PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); + return 0; + } + + /* Allocate a new struct for IOV information */ + cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); + if (!cdev->p_iov_info) { + DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n"); + return -ENOMEM; + } + cdev->p_iov_info->pos = pos; + + rc = qed_iov_pci_cfg_info(cdev); + if (rc) + return rc; + + /* We want PF IOV to be synonemous with the existance of p_iov_info; + * In case the capability is published but there are no VFs, simply + * de-allocate the struct. + */ + if (!cdev->p_iov_info->total_vfs) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "IOV capabilities, but no VFs are published\n"); + kfree(cdev->p_iov_info); + cdev->p_iov_info = NULL; + return 0; + } + + /* Calculate the first VF index - this is a bit tricky; Basically, + * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin + * after the first engine's VFs. + */ + cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset + + p_hwfn->abs_pf_id - 16; + if (QED_PATH_ID(p_hwfn)) + cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "First VF in hwfn 0x%08x\n", + cdev->p_iov_info->first_vf_in_pf); + + return 0; +} + +static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +{ + /* Check PF supports sriov */ + if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || + !IS_PF_SRIOV_ALLOC(p_hwfn)) + return false; + + /* Check VF validity */ + if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) + return false; + + return true; +} + +static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, + u16 rel_vf_id, u8 to_disable) +{ + struct qed_vf_info *vf; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!vf) + continue; + + vf->to_disable = to_disable; + } +} + +void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) +{ + u16 i; + + if (!IS_QED_SRIOV(cdev)) + return; + + for (i = 0; i < cdev->p_iov_info->total_vfs; i++) + qed_iov_set_vf_to_disable(cdev, i, to_disable); +} + +static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 abs_vfid) +{ + qed_wr(p_hwfn, p_ptt, + PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, + 1 << (abs_vfid & 0x1f)); +} + +static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_vf_info *vf) +{ + int i; + + /* Set VF masks and configuration - pretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + + qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + + /* iterate over all queues, clear sb consumer */ + for (i = 0; i < vf->num_sbs; i++) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, true); +} + +static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, bool enable) +{ + u32 igu_vf_conf; + + qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + + igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); + + if (enable) + igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; + else + igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; + + qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); +} + +static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; + int rc; + + if (vf->to_disable) + return 0; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Enable internal access for vf %x [abs %x]\n", + vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); + + qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); + + qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + + rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); + if (rc) + return rc; + + qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid); + + SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); + STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); + + qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, + p_hwfn->hw_info.hw_mode); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + + if (vf->state != VF_STOPPED) { + DP_NOTICE(p_hwfn, "VF[%02x] is already started\n", + vf->abs_vf_id); + return -EINVAL; + } + + /* Start VF */ + rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid); + if (rc) + DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); + + vf->state = VF_FREE; + + return rc; +} + +/** + * @brief qed_iov_config_perm_table - configure the permission + * zone table. + * In E4, queue zone permission table size is 320x9. There + * are 320 VF queues for single engine device (256 for dual + * engine device), and each entry has the following format: + * {Valid, VF[7:0]} + * @param p_hwfn + * @param p_ptt + * @param vf + * @param enable + */ +static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, u8 enable) +{ + u32 reg_addr, val; + u16 qzone_id = 0; + int qid; + + for (qid = 0; qid < vf->num_rxqs; qid++) { + qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, + &qzone_id); + + reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; + val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; + qed_wr(p_hwfn, p_ptt, reg_addr, val); + } +} + +static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + /* Reset vf in IGU - interrupts are still disabled */ + qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); + + /* Permission Table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); +} + +static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, u16 num_rx_queues) +{ + struct qed_igu_block *igu_blocks; + int qid = 0, igu_id = 0; + u32 val = 0; + + igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; + + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) + num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; + p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); + + while ((qid < num_rx_queues) && + (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) { + if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) { + struct cau_sb_entry sb_entry; + + vf->igu_sbs[qid] = (u16)igu_id; + igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE; + + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); + + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, + val); + + /* Configure igu sb in CAU which were marked valid */ + qed_init_cau_sb_entry(p_hwfn, &sb_entry, + p_hwfn->rel_pf_id, + vf->abs_vf_id, 1); + qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + igu_id * sizeof(u64), 2, 0); + qid++; + } + igu_id++; + } + + vf->num_sbs = (u8) num_rx_queues; + + return vf->num_sbs; +} + +static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + int idx, igu_id; + u32 addr, val; + + /* Invalidate igu CAM lines and mark them as free */ + for (idx = 0; idx < vf->num_sbs; idx++) { + igu_id = vf->igu_sbs[idx]; + addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; + + val = qed_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); + qed_wr(p_hwfn, p_ptt, addr, val); + + p_info->igu_map.igu_blocks[igu_id].status |= + QED_IGU_STATUS_FREE; + + p_hwfn->hw_info.p_igu_info->free_blks++; + } + + vf->num_sbs = 0; +} + +static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 rel_vf_id, u16 num_rx_queues) +{ + u8 num_of_vf_avaiable_chains = 0; + struct qed_vf_info *vf = NULL; + int rc = 0; + u32 cids; + u8 i; + + vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!vf) { + DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n"); + return -EINVAL; + } + + if (vf->b_init) { + DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id); + return -EINVAL; + } + + /* Limit number of queues according to number of CIDs */ + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", + vf->relative_vf_id, num_rx_queues, (u16) cids); + num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids)); + + num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, + p_ptt, + vf, + num_rx_queues); + if (!num_of_vf_avaiable_chains) { + DP_ERR(p_hwfn, "no available igu sbs\n"); + return -ENOMEM; + } + + /* Choose queue number and index ranges */ + vf->num_rxqs = num_of_vf_avaiable_chains; + vf->num_txqs = num_of_vf_avaiable_chains; + + for (i = 0; i < vf->num_rxqs; i++) { + u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn, + vf->igu_sbs[i]); + + if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) { + DP_NOTICE(p_hwfn, + "VF[%d] will require utilizing of out-of-bounds queues - %04x\n", + vf->relative_vf_id, queue_id); + return -EINVAL; + } + + /* CIDs are per-VF, so no problem having them 0-based. */ + vf->vf_queues[i].fw_rx_qid = queue_id; + vf->vf_queues[i].fw_tx_qid = queue_id; + vf->vf_queues[i].fw_cid = i; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n", + vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i); + } + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); + if (!rc) { + vf->b_init = true; + + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->p_iov_info->num_vfs++; + } + + return rc; +} + +static void qed_iov_set_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *params, + struct qed_mcp_link_state *link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + p_bulletin->req_autoneg = params->speed.autoneg; + p_bulletin->req_adv_speed = params->speed.advertised_speeds; + p_bulletin->req_forced_speed = params->speed.forced_speed; + p_bulletin->req_autoneg_pause = params->pause.autoneg; + p_bulletin->req_forced_rx = params->pause.forced_rx; + p_bulletin->req_forced_tx = params->pause.forced_tx; + p_bulletin->req_loopback = params->loopback_mode; + + p_bulletin->link_up = link->link_up; + p_bulletin->speed = link->speed; + p_bulletin->full_duplex = link->full_duplex; + p_bulletin->autoneg = link->an; + p_bulletin->autoneg_complete = link->an_complete; + p_bulletin->parallel_detection = link->parallel_detection; + p_bulletin->pfc_enabled = link->pfc_enabled; + p_bulletin->partner_adv_speed = link->partner_adv_speed; + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; + p_bulletin->partner_adv_pause = link->partner_adv_pause; + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; + + p_bulletin->capability_speed = p_caps->speed_capabilities; +} + +static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 rel_vf_id) +{ + struct qed_mcp_link_capabilities caps; + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; + struct qed_vf_info *vf = NULL; + int rc = 0; + + vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!vf) { + DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n"); + return -EINVAL; + } + + if (vf->bulletin.p_virt) + memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); + + memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); + + /* Get the link configuration back in bulletin so + * that when VFs are re-enabled they get the actual + * link configuration. + */ + memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); + memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); + qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); + + if (vf->state != VF_STOPPED) { + /* Stopping the VF */ + rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid); + + if (rc != 0) { + DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", + rc); + return rc; + } + + vf->state = VF_STOPPED; + } + + /* disablng interrupts and resetting permission table was done during + * vf-close, however, we could get here without going through vf_close + */ + /* Disable Interrupts for VF */ + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); + + /* Reset Permission table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); + + vf->num_rxqs = 0; + vf->num_txqs = 0; + qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); + + if (vf->b_init) { + vf->b_init = false; + + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->p_iov_info->num_vfs--; + } + + return 0; +} + +static bool qed_iov_tlv_supported(u16 tlvtype) +{ + return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; +} + +/* place a given tlv on the tlv buffer, continuing current tlv list */ +void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) +{ + struct channel_tlv *tl = (struct channel_tlv *)*offset; + + tl->type = type; + tl->length = length; + + /* Offset should keep pointing to next TLV (the end of the last) */ + *offset += length; + + /* Return a pointer to the start of the added tlv */ + return *offset - length; +} + +/* list the types and lengths of the tlvs on the buffer */ +void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) +{ + u16 i = 1, total_length = 0; + struct channel_tlv *tlv; + + do { + tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); + + /* output tlv */ + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "TLV number %d: type %d, length %d\n", + i, tlv->type, tlv->length); + + if (tlv->type == CHANNEL_TLV_LIST_END) + return; + + /* Validate entry - protect against malicious VFs */ + if (!tlv->length) { + DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); + return; + } + + total_length += tlv->length; + + if (total_length >= sizeof(struct tlv_buffer_size)) { + DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); + return; + } + + i++; + } while (1); +} + +static void qed_iov_send_response(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + u16 length, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct qed_dmae_params params; + u8 eng_vf_id; + + mbx->reply_virt->default_resp.hdr.status = status; + + qed_dp_tlv_list(p_hwfn, mbx->reply_virt); + + eng_vf_id = p_vf->abs_vf_id; + + memset(¶ms, 0, sizeof(struct qed_dmae_params)); + params.flags = QED_DMAE_FLAG_VF_DST; + params.dst_vfid = eng_vf_id; + + qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), + mbx->req_virt->first_tlv.reply_address + + sizeof(u64), + (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, + ¶ms); + + qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, + mbx->req_virt->first_tlv.reply_address, + sizeof(u64) / 4, ¶ms); + + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); +} + +static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, + enum qed_iov_vport_update_flag flag) +{ + switch (flag) { + case QED_IOV_VP_UPDATE_ACTIVATE: + return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + case QED_IOV_VP_UPDATE_VLAN_STRIP: + return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + case QED_IOV_VP_UPDATE_TX_SWITCH: + return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + case QED_IOV_VP_UPDATE_MCAST: + return CHANNEL_TLV_VPORT_UPDATE_MCAST; + case QED_IOV_VP_UPDATE_ACCEPT_PARAM: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + case QED_IOV_VP_UPDATE_RSS: + return CHANNEL_TLV_VPORT_UPDATE_RSS; + case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + case QED_IOV_VP_UPDATE_SGE_TPA: + return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + default: + return 0; + } +} + +static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_iov_vf_mbx *p_mbx, + u8 status, + u16 tlvs_mask, u16 tlvs_accepted) +{ + struct pfvf_def_resp_tlv *resp; + u16 size, total_len, i; + + memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); + p_mbx->offset = (u8 *)p_mbx->reply_virt; + size = sizeof(struct pfvf_def_resp_tlv); + total_len = size; + + qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); + + /* Prepare response for all extended tlvs if they are found by PF */ + for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { + if (!(tlvs_mask & (1 << i))) + continue; + + resp = qed_add_tlv(p_hwfn, &p_mbx->offset, + qed_iov_vport_to_tlv(p_hwfn, i), size); + + if (tlvs_accepted & (1 << i)) + resp->hdr.status = status; + else + resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - vport_update response: TLV %d, status %02x\n", + p_vf->relative_vf_id, + qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); + + total_len += size; + } + + qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + return total_len; +} + +static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf_info, + u16 type, u16 length, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; + + mbx->offset = (u8 *)mbx->reply_virt; + + qed_add_tlv(p_hwfn, &mbx->offset, type, length); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); +} + +struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct qed_vf_info *vf = NULL; + + vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); + if (!vf) + return NULL; + + return &vf->p_vf_info; +} + +void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) +{ + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); + + if (!vf_info) + return; + + /* Clear the VF mac */ + memset(vf_info->mac, 0, ETH_ALEN); +} + +static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + u32 i; + + p_vf->vf_bulletin = 0; + p_vf->vport_instance = 0; + p_vf->num_mac_filters = 0; + p_vf->num_vlan_filters = 0; + p_vf->configured_features = 0; + + /* If VF previously requested less resources, go back to default */ + p_vf->num_rxqs = p_vf->num_sbs; + p_vf->num_txqs = p_vf->num_sbs; + + p_vf->num_active_rxqs = 0; + + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) + p_vf->vf_queues[i].rxq_active = 0; + + memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); + qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); +} + +static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; + u8 i, vfpf_status = PFVF_STATUS_SUCCESS; + struct pf_vf_resc *resc = &resp->resc; + + /* Validate FW compatibility */ + if (req->vfdev_info.fw_major != FW_MAJOR_VERSION || + req->vfdev_info.fw_minor != FW_MINOR_VERSION || + req->vfdev_info.fw_revision != FW_REVISION_VERSION || + req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) { + DP_INFO(p_hwfn, + "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n", + vf->abs_vf_id, + req->vfdev_info.fw_major, + req->vfdev_info.fw_minor, + req->vfdev_info.fw_revision, + req->vfdev_info.fw_engineering, + FW_MAJOR_VERSION, + FW_MINOR_VERSION, + FW_REVISION_VERSION, FW_ENGINEERING_VERSION); + vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + /* On 100g PFs, prevent old VFs from loading */ + if ((p_hwfn->cdev->num_hwfns > 1) && + !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { + DP_INFO(p_hwfn, + "VF[%d] is running an old driver that doesn't support 100g\n", + vf->abs_vf_id); + vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + memset(resp, 0, sizeof(*resp)); + + /* Fill in vf info stuff */ + vf->opaque_fid = req->vfdev_info.opaque_fid; + vf->num_mac_filters = 1; + vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + + vf->vf_bulletin = req->bulletin_addr; + vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? + vf->bulletin.size : req->bulletin_size; + + /* fill in pfdev info */ + pfdev_info->chip_num = p_hwfn->cdev->chip_num; + pfdev_info->db_size = 0; + pfdev_info->indices_per_sb = PIS_PER_SB; + + pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | + PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; + if (p_hwfn->cdev->num_hwfns > 1) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; + + pfdev_info->stats_info.mstats.address = + PXP_VF_BAR0_START_MSDM_ZONE_B + + offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.mstats.len = + sizeof(struct eth_mstorm_per_queue_stat); + + pfdev_info->stats_info.ustats.address = + PXP_VF_BAR0_START_USDM_ZONE_B + + offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.ustats.len = + sizeof(struct eth_ustorm_per_queue_stat); + + pfdev_info->stats_info.pstats.address = + PXP_VF_BAR0_START_PSDM_ZONE_B + + offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.pstats.len = + sizeof(struct eth_pstorm_per_queue_stat); + + pfdev_info->stats_info.tstats.address = 0; + pfdev_info->stats_info.tstats.len = 0; + + memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); + + pfdev_info->fw_major = FW_MAJOR_VERSION; + pfdev_info->fw_minor = FW_MINOR_VERSION; + pfdev_info->fw_rev = FW_REVISION_VERSION; + pfdev_info->fw_eng = FW_ENGINEERING_VERSION; + pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; + qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); + + pfdev_info->dev_type = p_hwfn->cdev->type; + pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; + + resc->num_rxqs = vf->num_rxqs; + resc->num_txqs = vf->num_txqs; + resc->num_sbs = vf->num_sbs; + for (i = 0; i < resc->num_sbs; i++) { + resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i]; + resc->hw_sbs[i].sb_qid = 0; + } + + for (i = 0; i < resc->num_rxqs; i++) { + qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid, + (u16 *)&resc->hw_qid[i]); + resc->cid[i] = vf->vf_queues[i].fw_cid; + } + + resc->num_mac_filters = min_t(u8, vf->num_mac_filters, + req->resc_request.num_mac_filters); + resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters, + req->resc_request.num_vlan_filters); + + /* This isn't really required as VF isn't limited, but some VFs might + * actually test this value, so need to provide it. + */ + resc->num_mc_filters = req->resc_request.num_mc_filters; + + /* Fill agreed size of bulletin board in response */ + resp->bulletin_size = vf->bulletin.size; + qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" + "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", + vf->abs_vf_id, + resp->pfdev_info.chip_num, + resp->pfdev_info.db_size, + resp->pfdev_info.indices_per_sb, + resp->pfdev_info.capabilities, + resc->num_rxqs, + resc->num_txqs, + resc->num_sbs, + resc->num_mac_filters, + resc->num_vlan_filters); + vf->state = VF_ACQUIRED; + + /* Prepare Response */ +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, + sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); +} + +static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, bool val) +{ + struct qed_sp_vport_update_params params; + int rc; + + if (val == p_vf->spoof_chk) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk value[%d] is already configured\n", val); + return 0; + } + + memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); + params.opaque_fid = p_vf->opaque_fid; + params.vport_id = p_vf->vport_id; + params.update_anti_spoofing_en_flg = 1; + params.anti_spoofing_en = val; + + rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + p_vf->spoof_chk = val; + p_vf->req_spoofchk_val = p_vf->spoof_chk; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk val[%d] configured\n", val); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk configuration[val:%d] failed for VF[%d]\n", + val, p_vf->relative_vf_id); + } + + return rc; +} + +static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + struct qed_filter_ucast filter; + int rc = 0; + int i; + + memset(&filter, 0, sizeof(filter)); + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.opcode = QED_FILTER_ADD; + + /* Reconfigure vlans */ + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (!p_vf->shadow_config.vlans[i].used) + continue; + + filter.type = QED_FILTER_VLAN; + filter.vlan = p_vf->shadow_config.vlans[i].vid; + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + rc = qed_sp_eth_filter_ucast(p_hwfn, + p_vf->opaque_fid, + &filter, + QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to configure VLAN [%04x] to VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + break; + } + } + + return rc; +} + +static int +qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u64 events) +{ + int rc = 0; + + if ((events & (1 << VLAN_ADDR_FORCED)) && + !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) + rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); + + return rc; +} + +static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u64 events) +{ + int rc = 0; + struct qed_filter_ucast filter; + + if (!p_vf->vport_instance) + return -EINVAL; + + if (events & (1 << MAC_ADDR_FORCED)) { + /* Since there's no way [currently] of removing the MAC, + * we can always assume this means we need to force it. + */ + memset(&filter, 0, sizeof(filter)); + filter.type = QED_FILTER_MAC; + filter.opcode = QED_FILTER_REPLACE; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); + + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure MAC for VF\n"); + return rc; + } + + p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + } + + if (events & (1 << VLAN_ADDR_FORCED)) { + struct qed_sp_vport_update_params vport_update; + u8 removal; + int i; + + memset(&filter, 0, sizeof(filter)); + filter.type = QED_FILTER_VLAN; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.vlan = p_vf->bulletin.p_virt->pvid; + filter.opcode = filter.vlan ? QED_FILTER_REPLACE : + QED_FILTER_FLUSH; + + /* Send the ramrod */ + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure VLAN for VF\n"); + return rc; + } + + /* Update the default-vlan & silent vlan stripping */ + memset(&vport_update, 0, sizeof(vport_update)); + vport_update.opaque_fid = p_vf->opaque_fid; + vport_update.vport_id = p_vf->vport_id; + vport_update.update_default_vlan_enable_flg = 1; + vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; + vport_update.update_default_vlan_flg = 1; + vport_update.default_vlan = filter.vlan; + + vport_update.update_inner_vlan_removal_flg = 1; + removal = filter.vlan ? 1 + : p_vf->shadow_config.inner_vlan_removal; + vport_update.inner_vlan_removal_flg = removal; + vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; + rc = qed_sp_vport_update(p_hwfn, + &vport_update, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure VF vport for vlan\n"); + return rc; + } + + /* Update all the Rx queues */ + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { + u16 qid; + + if (!p_vf->vf_queues[i].rxq_active) + continue; + + qid = p_vf->vf_queues[i].fw_rx_qid; + + rc = qed_sp_eth_rx_queues_update(p_hwfn, qid, + 1, 0, 1, + QED_SPQ_MODE_EBLOCK, + NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send Rx update fo queue[0x%04x]\n", + qid); + return rc; + } + } + + if (filter.vlan) + p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; + else + p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED); + } + + /* If forced features are terminated, we need to configure the shadow + * configuration back again. + */ + if (events) + qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); + + return rc; +} + +static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_sp_vport_start_params params = { 0 }; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_vport_start_tlv *start; + u8 status = PFVF_STATUS_SUCCESS; + struct qed_vf_info *vf_info; + u64 *p_bitmap; + int sb_id; + int rc; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Failed to get VF info, invalid vfid [%d]\n", + vf->relative_vf_id); + return; + } + + vf->state = VF_ENABLED; + start = &mbx->req_virt->start_vport; + + /* Initialize Status block in CAU */ + for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { + if (!start->sb_addr[sb_id]) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] did not fill the address of SB %d\n", + vf->relative_vf_id, sb_id); + break; + } + + qed_int_cau_conf_sb(p_hwfn, p_ptt, + start->sb_addr[sb_id], + vf->igu_sbs[sb_id], + vf->abs_vf_id, 1); + } + qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); + + vf->mtu = start->mtu; + vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; + + /* Take into consideration configuration forced by hypervisor; + * If none is configured, use the supplied VF values [for old + * vfs that would still be fine, since they passed '0' as padding]. + */ + p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; + if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { + u8 vf_req = start->only_untagged; + + vf_info->bulletin.p_virt->default_only_untagged = vf_req; + *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; + } + + params.tpa_mode = start->tpa_mode; + params.remove_inner_vlan = start->inner_vlan_removal; + params.tx_switching = true; + + params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; + params.drop_ttl0 = false; + params.concrete_fid = vf->concrete_fid; + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.max_buffers_per_cqe = start->max_buffers_per_cqe; + params.mtu = vf->mtu; + + rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); + if (rc != 0) { + DP_ERR(p_hwfn, + "qed_iov_vf_mbx_start_vport returned error %d\n", rc); + status = PFVF_STATUS_FAILURE; + } else { + vf->vport_instance++; + + /* Force configuration if needed on the newly opened vport */ + qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); + + __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); + } + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u8 status = PFVF_STATUS_SUCCESS; + int rc; + + vf->vport_instance--; + vf->spoof_chk = false; + + rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); + if (rc != 0) { + DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n", + rc); + status = PFVF_STATUS_FAILURE; + } + + /* Forget the configuration on the vport */ + vf->configured_features = 0; + memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct pfvf_def_resp_tlv), status); +} + +#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A +#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) + +static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct pfvf_start_queue_resp_tlv *p_tlv; + struct vfpf_start_rxq_tlv *req; + + mbx->offset = (u8 *)mbx->reply_virt; + + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, + sizeof(*p_tlv)); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* Update the TLV with the response */ + if (status == PFVF_STATUS_SUCCESS) { + u16 hw_qid = 0; + + req = &mbx->req_virt->start_rxq; + qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid, + &hw_qid); + + p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) + + hw_qid * MSTORM_QZONE_SIZE + + offsetof(struct mstorm_eth_queue_zone, + rx_producers); + } + + qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status); +} + +static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_queue_start_common_params params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_SUCCESS; + struct vfpf_start_rxq_tlv *req; + int rc; + + memset(¶ms, 0, sizeof(params)); + req = &mbx->req_virt->start_rxq; + params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid; + params.vport_id = vf->vport_id; + params.sb = req->hw_sb; + params.sb_idx = req->sb_index; + + rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, + vf->vf_queues[req->rx_qid].fw_cid, + ¶ms, + vf->abs_vf_id + 0x10, + req->bd_max_bytes, + req->rxq_addr, + req->cqe_pbl_addr, req->cqe_pbl_size); + + if (rc) { + status = PFVF_STATUS_FAILURE; + } else { + vf->vf_queues[req->rx_qid].rxq_active = true; + vf->num_active_rxqs++; + } + + qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status); +} + +static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_queue_start_common_params params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + union qed_qm_pq_params pq_params; + u8 status = PFVF_STATUS_SUCCESS; + struct vfpf_start_txq_tlv *req; + int rc; + + /* Prepare the parameters which would choose the right PQ */ + memset(&pq_params, 0, sizeof(pq_params)); + pq_params.eth.is_vf = 1; + pq_params.eth.vf_id = vf->relative_vf_id; + + memset(¶ms, 0, sizeof(params)); + req = &mbx->req_virt->start_txq; + params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid; + params.vport_id = vf->vport_id; + params.sb = req->hw_sb; + params.sb_idx = req->sb_index; + + rc = qed_sp_eth_txq_start_ramrod(p_hwfn, + vf->opaque_fid, + vf->vf_queues[req->tx_qid].fw_cid, + ¶ms, + vf->abs_vf_id + 0x10, + req->pbl_addr, + req->pbl_size, &pq_params); + + if (rc) + status = PFVF_STATUS_FAILURE; + else + vf->vf_queues[req->tx_qid].txq_active = true; + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ, + length, status); +} + +static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + u16 rxq_id, u8 num_rxqs, bool cqe_completion) +{ + int rc = 0; + int qid; + + if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues)) + return -EINVAL; + + for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { + if (vf->vf_queues[qid].rxq_active) { + rc = qed_sp_eth_rx_queue_stop(p_hwfn, + vf->vf_queues[qid]. + fw_rx_qid, false, + cqe_completion); + + if (rc) + return rc; + } + vf->vf_queues[qid].rxq_active = false; + vf->num_active_rxqs--; + } + + return rc; +} + +static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, u16 txq_id, u8 num_txqs) +{ + int rc = 0; + int qid; + + if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues)) + return -EINVAL; + + for (qid = txq_id; qid < txq_id + num_txqs; qid++) { + if (vf->vf_queues[qid].txq_active) { + rc = qed_sp_eth_tx_queue_stop(p_hwfn, + vf->vf_queues[qid]. + fw_tx_qid); + + if (rc) + return rc; + } + vf->vf_queues[qid].txq_active = false; + } + return rc; +} + +static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_SUCCESS; + struct vfpf_stop_rxqs_tlv *req; + int rc; + + /* We give the option of starting from qid != 0, in this case we + * need to make sure that qid + num_qs doesn't exceed the actual + * amount of queues that exist. + */ + req = &mbx->req_virt->stop_rxqs; + rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, + req->num_rxqs, req->cqe_completion); + if (rc) + status = PFVF_STATUS_FAILURE; + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, + length, status); +} + +static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_SUCCESS; + struct vfpf_stop_txqs_tlv *req; + int rc; + + /* We give the option of starting from qid != 0, in this case we + * need to make sure that qid + num_qs doesn't exceed the actual + * amount of queues that exist. + */ + req = &mbx->req_virt->stop_txqs; + rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs); + if (rc) + status = PFVF_STATUS_FAILURE; + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, + length, status); +} + +static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_rxq_tlv *req; + u8 status = PFVF_STATUS_SUCCESS; + u8 complete_event_flg; + u8 complete_cqe_flg; + u16 qid; + int rc; + u8 i; + + req = &mbx->req_virt->update_rxq; + complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); + complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); + + for (i = 0; i < req->num_rxqs; i++) { + qid = req->rx_qid + i; + + if (!vf->vf_queues[qid].rxq_active) { + DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n", + qid); + status = PFVF_STATUS_FAILURE; + break; + } + + rc = qed_sp_eth_rx_queues_update(p_hwfn, + vf->vf_queues[qid].fw_rx_qid, + 1, + complete_cqe_flg, + complete_event_flg, + QED_SPQ_MODE_EBLOCK, NULL); + + if (rc) { + status = PFVF_STATUS_FAILURE; + break; + } + } + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, + length, status); +} + +void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type) +{ + struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; + int len = 0; + + do { + if (!p_tlv->length) { + DP_NOTICE(p_hwfn, "Zero length TLV found\n"); + return NULL; + } + + if (p_tlv->type == req_type) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Extended tlv type %d, length %d found\n", + p_tlv->type, p_tlv->length); + return p_tlv; + } + + len += p_tlv->length; + p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); + + if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { + DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); + return NULL; + } + } while (p_tlv->type != CHANNEL_TLV_LIST_END); + + return NULL; +} + +static void +qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_activate_tlv *p_act_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + + p_act_tlv = (struct vfpf_vport_update_activate_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_act_tlv) + return; + + p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; + p_data->vport_active_rx_flg = p_act_tlv->active_rx; + p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; + p_data->vport_active_tx_flg = p_act_tlv->active_tx; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; +} + +static void +qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_vf_info *p_vf, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + + p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_vlan_tlv) + return; + + p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; + + /* Ignore the VF request if we're forcing a vlan */ + if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) { + p_data->update_inner_vlan_removal_flg = 1; + p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; + } + + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; +} + +static void +qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + + p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + tlv); + if (!p_tx_switch_tlv) + return; + + p_data->update_tx_switching_flg = 1; + p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; +} + +static void +qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; + + p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_mcast_tlv) + return; + + p_data->update_approx_mcast_flg = 1; + memcpy(p_data->bins, p_mcast_tlv->bins, + sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; +} + +static void +qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + + p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_accept_tlv) + return; + + p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; + p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; + p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; + p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; +} + +static void +qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + + p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + tlv); + if (!p_accept_any_vlan) + return; + + p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; + p_data->update_accept_any_vlan_flg = + p_accept_any_vlan->update_accept_any_vlan_flg; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; +} + +static void +qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + struct qed_sp_vport_update_params *p_data, + struct qed_rss_params *p_rss, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; + u16 i, q_idx, max_q_idx; + u16 table_size; + + p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_rss_tlv) { + p_data->rss_params = NULL; + return; + } + + memset(p_rss, 0, sizeof(struct qed_rss_params)); + + p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CONFIG_FLAG); + p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CAPS_FLAG); + p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_IND_TABLE_FLAG); + p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_KEY_FLAG); + + p_rss->rss_enable = p_rss_tlv->rss_enable; + p_rss->rss_eng_id = vf->relative_vf_id + 1; + p_rss->rss_caps = p_rss_tlv->rss_caps; + p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; + memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, + sizeof(p_rss->rss_ind_table)); + memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); + + table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), + (1 << p_rss_tlv->rss_table_size_log)); + + max_q_idx = ARRAY_SIZE(vf->vf_queues); + + for (i = 0; i < table_size; i++) { + u16 index = vf->vf_queues[0].fw_rx_qid; + + q_idx = p_rss->rss_ind_table[i]; + if (q_idx >= max_q_idx) + DP_NOTICE(p_hwfn, + "rss_ind_table[%d] = %d, rxq is out of range\n", + i, q_idx); + else if (!vf->vf_queues[q_idx].rxq_active) + DP_NOTICE(p_hwfn, + "rss_ind_table[%d] = %d, rxq is not active\n", + i, q_idx); + else + index = vf->vf_queues[q_idx].fw_rx_qid; + p_rss->rss_ind_table[i] = index; + } + + p_data->rss_params = p_rss; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; +} + +static void +qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + struct qed_sp_vport_update_params *p_data, + struct qed_sge_tpa_params *p_sge_tpa, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + + p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + + if (!p_sge_tpa_tlv) { + p_data->sge_tpa_params = NULL; + return; + } + + memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); + + p_sge_tpa->update_tpa_en_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); + p_sge_tpa->update_tpa_param_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & + VFPF_UPDATE_TPA_PARAM_FLAG); + + p_sge_tpa->tpa_ipv4_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); + p_sge_tpa->tpa_ipv6_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); + p_sge_tpa->tpa_pkt_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); + p_sge_tpa->tpa_hdr_data_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); + p_sge_tpa->tpa_gro_consistent_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); + + p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; + p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; + p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; + p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; + p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; + + p_data->sge_tpa_params = p_sge_tpa; + + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; +} + +static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_sp_vport_update_params params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct qed_sge_tpa_params sge_tpa_params; + struct qed_rss_params rss_params; + u8 status = PFVF_STATUS_SUCCESS; + u16 tlvs_mask = 0; + u16 length; + int rc; + + memset(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.rss_params = NULL; + + /* Search for extended tlvs list and update values + * from VF in struct qed_sp_vport_update_params. + */ + qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); + qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, + mbx, &tlvs_mask); + qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, + &sge_tpa_params, mbx, &tlvs_mask); + + /* Just log a message if there is no single extended tlv in buffer. + * When all features of vport update ramrod would be requested by VF + * as extended TLVs in buffer then an error can be returned in response + * if there is no extended TLV present in buffer. + */ + if (!tlvs_mask) { + DP_NOTICE(p_hwfn, + "No feature tlvs found for vport update\n"); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); + + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, + tlvs_mask, tlvs_mask); + qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); +} + +static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int i; + + if (p_params->type == QED_FILTER_MAC) + return 0; + + /* First remove entries and then add new ones */ + if (p_params->opcode == QED_FILTER_REMOVE) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + if (p_vf->shadow_config.vlans[i].used && + p_vf->shadow_config.vlans[i].vid == + p_params->vlan) { + p_vf->shadow_config.vlans[i].used = false; + break; + } + if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Tries to remove a non-existing vlan\n", + p_vf->relative_vf_id); + return -EINVAL; + } + } else if (p_params->opcode == QED_FILTER_REPLACE || + p_params->opcode == QED_FILTER_FLUSH) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + p_vf->shadow_config.vlans[i].used = false; + } + + /* In forced mode, we're willing to remove entries - but we don't add + * new ones. + */ + if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)) + return 0; + + if (p_params->opcode == QED_FILTER_ADD || + p_params->opcode == QED_FILTER_REPLACE) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (p_vf->shadow_config.vlans[i].used) + continue; + + p_vf->shadow_config.vlans[i].used = true; + p_vf->shadow_config.vlans[i].vid = p_params->vlan; + break; + } + + if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Tries to configure more than %d vlan filters\n", + p_vf->relative_vf_id, + QED_ETH_VF_NUM_VLAN_FILTERS + 1); + return -EINVAL; + } + } + + return 0; +} + +int qed_iov_chk_ucast(struct qed_hwfn *hwfn, + int vfid, struct qed_filter_ucast *params) +{ + struct qed_public_vf_info *vf; + + vf = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf) + return -EINVAL; + + /* No real decision to make; Store the configured MAC */ + if (params->type == QED_FILTER_MAC || + params->type == QED_FILTER_MAC_VLAN) + ether_addr_copy(vf->mac, params->mac); + + return 0; +} + +static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_ucast_filter_tlv *req; + u8 status = PFVF_STATUS_SUCCESS; + struct qed_filter_ucast params; + int rc; + + /* Prepare the unicast filter params */ + memset(¶ms, 0, sizeof(struct qed_filter_ucast)); + req = &mbx->req_virt->ucast_filter; + params.opcode = (enum qed_filter_opcode)req->opcode; + params.type = (enum qed_filter_ucast_type)req->type; + + params.is_rx_filter = 1; + params.is_tx_filter = 1; + params.vport_to_remove_from = vf->vport_id; + params.vport_to_add_to = vf->vport_id; + memcpy(params.mac, req->mac, ETH_ALEN); + params.vlan = req->vlan; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", + vf->abs_vf_id, params.opcode, params.type, + params.is_rx_filter ? "RX" : "", + params.is_tx_filter ? "TX" : "", + params.vport_to_add_to, + params.mac[0], params.mac[1], + params.mac[2], params.mac[3], + params.mac[4], params.mac[5], params.vlan); + + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Update shadow copy of the VF configuration */ + if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Determine if the unicast filtering is acceptible by PF */ + if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && + (params.type == QED_FILTER_VLAN || + params.type == QED_FILTER_MAC_VLAN)) { + /* Once VLAN is forced or PVID is set, do not allow + * to add/replace any further VLANs. + */ + if (params.opcode == QED_FILTER_ADD || + params.opcode == QED_FILTER_REPLACE) + status = PFVF_STATUS_FORCED; + goto out; + } + + if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) && + (params.type == QED_FILTER_MAC || + params.type == QED_FILTER_MAC_VLAN)) { + if (!ether_addr_equal(p_bulletin->mac, params.mac) || + (params.opcode != QED_FILTER_ADD && + params.opcode != QED_FILTER_REPLACE)) + status = PFVF_STATUS_FORCED; + goto out; + } + + rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); + if (rc) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, + QED_SPQ_MODE_CB, NULL); + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + int i; + + /* Reset the SBs */ + for (i = 0; i < vf->num_sbs; i++) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, false); + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_SUCCESS); +} + +static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + + /* Disable Interrupts for VF */ + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); + + /* Reset Permission table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, + length, status); +} + +static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + + qed_iov_vf_cleanup(p_hwfn, p_vf); + + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, + length, PFVF_STATUS_SUCCESS); +} + +static int +qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + int cnt; + u32 val; + + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid); + + for (cnt = 0; cnt < 50; cnt++) { + val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); + if (!val) + break; + msleep(20); + } + qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); + + if (cnt == 50) { + DP_ERR(p_hwfn, + "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", + p_vf->abs_vf_id, val); + return -EBUSY; + } + + return 0; +} + +static int +qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; + int i, cnt; + + /* Read initial consumers & producers */ + for (i = 0; i < MAX_NUM_VOQS; i++) { + u32 prod; + + cons[i] = qed_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + + i * 0x40); + prod = qed_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + + i * 0x40); + distance[i] = prod - cons[i]; + } + + /* Wait for consumers to pass the producers */ + i = 0; + for (cnt = 0; cnt < 50; cnt++) { + for (; i < MAX_NUM_VOQS; i++) { + u32 tmp; + + tmp = qed_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + + i * 0x40); + if (distance[i] > tmp - cons[i]) + break; + } + + if (i == MAX_NUM_VOQS) + break; + + msleep(20); + } + + if (cnt == 50) { + DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", + p_vf->abs_vf_id, i); + return -EBUSY; + } + + return 0; +} + +static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + int rc; + + rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + return 0; +} + +static int +qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 rel_vf_id, u32 *ack_vfs) +{ + struct qed_vf_info *p_vf; + int rc = 0; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!p_vf) + return 0; + + if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & + (1ULL << (rel_vf_id % 64))) { + u16 vfid = p_vf->abs_vf_id; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - Handling FLR\n", vfid); + + qed_iov_vf_cleanup(p_hwfn, p_vf); + + /* If VF isn't active, no need for anything but SW */ + if (!p_vf->b_init) + goto cleanup; + + rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); + if (rc) + goto cleanup; + + rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); + if (rc) { + DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); + return rc; + } + + /* VF_STOPPED has to be set only after final cleanup + * but prior to re-enabling the VF. + */ + p_vf->state = VF_STOPPED; + + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); + if (rc) { + DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", + vfid); + return rc; + } +cleanup: + /* Mark VF for ack and clean pending state */ + if (p_vf->state == VF_RESET) + p_vf->state = VF_STOPPED; + ack_vfs[vfid / 32] |= (1 << (vfid % 32)); + p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= + ~(1ULL << (rel_vf_id % 64)); + p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= + ~(1ULL << (rel_vf_id % 64)); + } + + return rc; +} + +int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 ack_vfs[VF_MAX_STATIC / 32]; + int rc = 0; + u16 i; + + memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); + + /* Since BRB <-> PRS interface can't be tested as part of the flr + * polling due to HW limitations, simply sleep a bit. And since + * there's no need to wait per-vf, do it before looping. + */ + msleep(100); + + for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) + qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); + + rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); + return rc; +} + +int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) +{ + u16 i, found = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "[%08x,...,%08x]: %08x\n", + i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); + + if (!p_hwfn->cdev->p_iov_info) { + DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); + return 0; + } + + /* Mark VFs */ + for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { + struct qed_vf_info *p_vf; + u8 vfid; + + p_vf = qed_iov_get_vf_info(p_hwfn, i, false); + if (!p_vf) + continue; + + vfid = p_vf->abs_vf_id; + if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { + u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; + u16 rel_vf_id = p_vf->relative_vf_id; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] [rel %d] got FLR-ed\n", + vfid, rel_vf_id); + + p_vf->state = VF_RESET; + + /* No need to lock here, since pending_flr should + * only change here and before ACKing MFw. Since + * MFW will not trigger an additional attention for + * VF flr until ACKs, we're safe. + */ + p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); + found = 1; + } + } + + return found; +} + +static void qed_iov_get_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *p_params, + struct qed_mcp_link_state *p_link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + + if (p_params) + __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); + if (p_link) + __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); + if (p_caps) + __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); +} + +static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int vfid) +{ + struct qed_iov_vf_mbx *mbx; + struct qed_vf_info *p_vf; + int i; + + p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!p_vf) + return; + + mbx = &p_vf->vf_mbx; + + /* qed_iov_process_mbx_request */ + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id); + + mbx->first_tlv = mbx->req_virt->first_tlv; + + /* check if tlv type is known */ + if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { + switch (mbx->first_tlv.tl.type) { + case CHANNEL_TLV_ACQUIRE: + qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_START: + qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_TEARDOWN: + qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_RXQ: + qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_TXQ: + qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_RXQS: + qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_TXQS: + qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UPDATE_RXQ: + qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_UPDATE: + qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UCAST_FILTER: + qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_CLOSE: + qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_INT_CLEANUP: + qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_RELEASE: + qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); + break; + } + } else { + /* unknown TLV - this may belong to a VF driver from the future + * - a version written after this PF driver was written, which + * supports features unknown as of yet. Too bad since we don't + * support them. Or this may be because someone wrote a crappy + * VF driver and is sending garbage over the channel. + */ + DP_ERR(p_hwfn, + "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); + + for (i = 0; i < 20; i++) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "%x ", + mbx->req_virt->tlv_buf_size.tlv_buffer[i]); + } + } +} + +void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid) +{ + u64 add_bit = 1ULL << (vfid % 64); + + p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; +} + +static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn, + u64 *events) +{ + u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; + + memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH); + memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); +} + +static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, + u16 abs_vfid, struct regpair *vf_msg) +{ + u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; + struct qed_vf_info *p_vf; + + if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n", + abs_vfid); + return 0; + } + p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; + + /* List the physical address of the request so that handler + * could later on copy the message from it. + */ + p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; + + /* Mark the event and schedule the workqueue */ + qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id); + qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); + + return 0; +} + +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, __le16 echo, union event_ring_data *data) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), + &data->vf_pf_channel.msg_addr); + default: + DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", + opcode); + return -EINVAL; + } +} + +u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + u16 i; + + if (!p_iov) + goto out; + + for (i = rel_vf_id; i < p_iov->total_vfs; i++) + if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true)) + return i; + +out: + return MAX_NUM_VFS; +} + +static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, + int vfid) +{ + struct qed_dmae_params params; + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf_info) + return -EINVAL; + + memset(¶ms, 0, sizeof(struct qed_dmae_params)); + params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST; + params.src_vfid = vf_info->abs_vf_id; + + if (qed_dmae_host2host(p_hwfn, ptt, + vf_info->vf_mbx.pending_req, + vf_info->vf_mbx.req_phys, + sizeof(union vfpf_tlvs) / 4, ¶ms)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Failed to copy message from VF 0x%02x\n", vfid); + + return -EIO; + } + + return 0; +} + +static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, + u8 *mac, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + + feature = 1 << MAC_ADDR_FORCED; + memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + /* Forced MAC will disable MAC_ADDR */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~(1 << VFPF_BULLETIN_MAC_ADDR); + + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + +void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, + u16 pvid, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + + feature = 1 << VLAN_ADDR_FORCED; + vf_info->bulletin.p_virt->pvid = pvid; + if (pvid) + vf_info->bulletin.p_virt->valid_bitmap |= feature; + else + vf_info->bulletin.p_virt->valid_bitmap &= ~feature; + + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + +static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *p_vf_info; + + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!p_vf_info) + return false; + + return !!p_vf_info->vport_instance; +} + +bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *p_vf_info; + + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!p_vf_info) + return true; + + return p_vf_info->state == VF_STOPPED; +} + +static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf_info) + return false; + + return vf_info->spoof_chk; +} + +int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) +{ + struct qed_vf_info *vf; + int rc = -EINVAL; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set spoofchk\n"); + goto out; + } + + vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf) + goto out; + + if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { + /* After VF VPORT start PF will configure spoof check */ + vf->req_spoofchk_val = val; + rc = 0; + goto out; + } + + rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); + +out: + return rc; +} + +static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return NULL; + + return p_vf->bulletin.p_virt->mac; +} + +u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return 0; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))) + return 0; + + return p_vf->bulletin.p_virt->pvid; +} + +static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int vfid, int val) +{ + struct qed_vf_info *vf; + u8 abs_vp_id = 0; + int rc; + + vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf) + return -EINVAL; + + rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); + if (rc) + return rc; + + return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); +} + +int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) +{ + struct qed_vf_info *vf; + u8 vport_id; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set min rate\n"); + return -EINVAL; + } + } + + vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); + vport_id = vf->vport_id; + + return qed_configure_vport_wfq(cdev, vport_id, rate); +} + +static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_wfq_data *vf_vp_wfq; + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true); + if (!vf_info) + return 0; + + vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; + + if (vf_vp_wfq->configured) + return vf_vp_wfq->min_speed; + else + return 0; +} + +/** + * qed_schedule_iov - schedules IOV task for VF and PF + * @hwfn: hardware function pointer + * @flag: IOV flag for VF/PF + */ +void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) +{ + smp_mb__before_atomic(); + set_bit(flag, &hwfn->iov_task_flags); + smp_mb__after_atomic(); + DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); + queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); +} + +void qed_vf_start_iov_wq(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) + queue_delayed_work(cdev->hwfns[i].iov_wq, + &cdev->hwfns[i].iov_task, 0); +} + +int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) +{ + int i, j; + + for_each_hwfn(cdev, i) + if (cdev->hwfns[i].iov_wq) + flush_workqueue(cdev->hwfns[i].iov_wq); + + /* Mark VFs for disablement */ + qed_iov_set_vfs_to_disable(cdev, true); + + if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) + pci_disable_sriov(cdev->pdev); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + /* Failure to acquire the ptt in 100g creates an odd error + * where the first engine has already relased IOV. + */ + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + return -EBUSY; + } + + /* Clean WFQ db and configure equal weight for all vports */ + qed_clean_wfq_db(hwfn, ptt); + + qed_for_each_vf(hwfn, j) { + int k; + + if (!qed_iov_is_valid_vfid(hwfn, j, true)) + continue; + + /* Wait until VF is disabled before releasing */ + for (k = 0; k < 100; k++) { + if (!qed_iov_is_vf_stopped(hwfn, j)) + msleep(20); + else + break; + } + + if (k < 100) + qed_iov_release_hw_for_vf(&cdev->hwfns[i], + ptt, j); + else + DP_ERR(hwfn, + "Timeout waiting for VF's FLR to end\n"); + } + + qed_ptt_release(hwfn, ptt); + } + + qed_iov_set_vfs_to_disable(cdev, false); + + return 0; +} + +static int qed_sriov_enable(struct qed_dev *cdev, int num) +{ + struct qed_sb_cnt_info sb_cnt_info; + int i, j, rc; + + if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { + DP_NOTICE(cdev, "Can start at most %d VFs\n", + RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); + return -EINVAL; + } + + /* Initialize HW for VF access */ + for_each_hwfn(cdev, j) { + struct qed_hwfn *hwfn = &cdev->hwfns[j]; + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + int num_sbs = 0, limit = 16; + + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + if (IS_MF_DEFAULT(hwfn)) + limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine; + + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(hwfn, &sb_cnt_info); + num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit); + + for (i = 0; i < num; i++) { + if (!qed_iov_is_valid_vfid(hwfn, i, false)) + continue; + + rc = qed_iov_init_hw_for_vf(hwfn, + ptt, i, num_sbs / num); + if (rc) { + DP_ERR(cdev, "Failed to enable VF[%d]\n", i); + qed_ptt_release(hwfn, ptt); + goto err; + } + } + + qed_ptt_release(hwfn, ptt); + } + + /* Enable SRIOV PCIe functions */ + rc = pci_enable_sriov(cdev->pdev, num); + if (rc) { + DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); + goto err; + } + + return num; + +err: + qed_sriov_disable(cdev, false); + return rc; +} + +static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) +{ + if (!IS_QED_SRIOV(cdev)) { + DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); + return -EOPNOTSUPP; + } + + if (num_vfs_param) + return qed_sriov_enable(cdev, num_vfs_param); + else + return qed_sriov_disable(cdev, true); +} + +static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) +{ + int i; + + if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set a VF MAC; Sriov is not enabled\n"); + return -EINVAL; + } + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set VF[%d] MAC (VF is not active)\n", vfid); + return -EINVAL; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf_info) + continue; + + /* Set the forced MAC, and schedule the IOV task */ + ether_addr_copy(vf_info->forced_mac, mac); + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); + } + + return 0; +} + +static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) +{ + int i; + + if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set a VF MAC; Sriov is not enabled\n"); + return -EINVAL; + } + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set VF[%d] MAC (VF is not active)\n", vfid); + return -EINVAL; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf_info) + continue; + + /* Set the forced vlan, and schedule the IOV task */ + vf_info->forced_vlan = vid; + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); + } + + return 0; +} + +static int qed_get_vf_config(struct qed_dev *cdev, + int vf_id, struct ifla_vf_info *ivi) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_public_vf_info *vf_info; + struct qed_mcp_link_state link; + u32 tx_rate; + + /* Sanitize request */ + if (IS_VF(cdev)) + return -EINVAL; + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "VF index [%d] isn't active\n", vf_id); + return -EINVAL; + } + + vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); + + qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + + /* Fill information about VF */ + ivi->vf = vf_id; + + if (is_valid_ether_addr(vf_info->forced_mac)) + ether_addr_copy(ivi->mac, vf_info->forced_mac); + else + ether_addr_copy(ivi->mac, vf_info->mac); + + ivi->vlan = vf_info->forced_vlan; + ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); + ivi->linkstate = vf_info->link_state; + tx_rate = vf_info->tx_rate; + ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; + ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); + + return 0; +} + +void qed_inform_vf_link_state(struct qed_hwfn *hwfn) +{ + struct qed_mcp_link_capabilities caps; + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; + int i; + + if (!hwfn->pf_iov_info) + return; + + /* Update bulletin of all future possible VFs with link configuration */ + for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, i, false); + if (!vf_info) + continue; + + memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); + memcpy(&caps, qed_mcp_get_link_capabilities(hwfn), + sizeof(caps)); + + /* Modify link according to the VF's configured link state */ + switch (vf_info->link_state) { + case IFLA_VF_LINK_STATE_DISABLE: + link.link_up = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + link.link_up = true; + /* Set speed according to maximum supported by HW. + * that is 40G for regular devices and 100G for CMT + * mode devices. + */ + link.speed = (hwfn->cdev->num_hwfns > 1) ? + 100000 : 40000; + default: + /* In auto mode pass PF link image to VF */ + break; + } + + if (link.link_up && vf_info->tx_rate) { + struct qed_ptt *ptt; + int rate; + + rate = min_t(int, vf_info->tx_rate, link.speed); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Failed to acquire PTT\n"); + return; + } + + if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { + vf_info->tx_rate = rate; + link.speed = rate; + } + + qed_ptt_release(hwfn, ptt); + } + + qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); + } + + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); +} + +static int qed_set_vf_link_state(struct qed_dev *cdev, + int vf_id, int link_state) +{ + int i; + + /* Sanitize request */ + if (IS_VF(cdev)) + return -EINVAL; + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "VF index [%d] isn't active\n", vf_id); + return -EINVAL; + } + + /* Handle configuration of link state */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); + if (!vf) + continue; + + if (vf->link_state == link_state) + continue; + + vf->link_state = link_state; + qed_inform_vf_link_state(&cdev->hwfns[i]); + } + + return 0; +} + +static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) +{ + int i, rc = -EINVAL; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); + if (rc) + break; + } + + return rc; +} + +static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set tx rate\n"); + return -EINVAL; + } + + vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); + + vf->tx_rate = rate; + + qed_inform_vf_link_state(p_hwfn); + } + + return 0; +} + +static int qed_set_vf_rate(struct qed_dev *cdev, + int vfid, u32 min_rate, u32 max_rate) +{ + int rc_min = 0, rc_max = 0; + + if (max_rate) + rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); + + if (min_rate) + rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); + + if (rc_max | rc_min) + return -EINVAL; + + return 0; +} + +static void qed_handle_vf_msg(struct qed_hwfn *hwfn) +{ + u64 events[QED_VF_ARRAY_LENGTH]; + struct qed_ptt *ptt; + int i; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Can't acquire PTT; re-scheduling\n"); + qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); + return; + } + + qed_iov_pf_get_and_clear_pending_events(hwfn, events); + + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", + events[0], events[1], events[2]); + + qed_for_each_vf(hwfn, i) { + /* Skip VFs with no pending messages */ + if (!(events[i / 64] & (1ULL << (i % 64)))) + continue; + + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", + i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + /* Copy VF's message to PF's request buffer for that VF */ + if (qed_iov_copy_vf_msg(hwfn, ptt, i)) + continue; + + qed_iov_process_mbx_req(hwfn, ptt, i); + } + + qed_ptt_release(hwfn, ptt); +} + +static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) +{ + int i; + + qed_for_each_vf(hwfn, i) { + struct qed_public_vf_info *info; + bool update = false; + u8 *mac; + + info = qed_iov_get_public_vf_info(hwfn, i, true); + if (!info) + continue; + + /* Update data on bulletin board */ + mac = qed_iov_bulletin_get_forced_mac(hwfn, i); + if (is_valid_ether_addr(info->forced_mac) && + (!mac || !ether_addr_equal(mac, info->forced_mac))) { + DP_VERBOSE(hwfn, + QED_MSG_IOV, + "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", + i, + hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + /* Update bulletin board with forced MAC */ + qed_iov_bulletin_set_forced_mac(hwfn, + info->forced_mac, i); + update = true; + } + + if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ + info->forced_vlan) { + DP_VERBOSE(hwfn, + QED_MSG_IOV, + "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", + info->forced_vlan, + i, + hwfn->cdev->p_iov_info->first_vf_in_pf + i); + qed_iov_bulletin_set_forced_vlan(hwfn, + info->forced_vlan, i); + update = true; + } + + if (update) + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } +} + +static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) +{ + struct qed_ptt *ptt; + int i; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + return; + } + + qed_for_each_vf(hwfn, i) + qed_iov_post_vf_bulletin(hwfn, i, ptt); + + qed_ptt_release(hwfn, ptt); +} + +void qed_iov_pf_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + iov_task.work); + int rc; + + if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) + return; + + if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + if (!ptt) { + qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); + return; + } + + rc = qed_iov_vf_flr_cleanup(hwfn, ptt); + if (rc) + qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); + + qed_ptt_release(hwfn, ptt); + } + + if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) + qed_handle_vf_msg(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, + &hwfn->iov_task_flags)) + qed_handle_pf_set_vf_unicast(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, + &hwfn->iov_task_flags)) + qed_handle_bulletin_post(hwfn); +} + +void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) +{ + int i; + + for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].iov_wq) + continue; + + if (schedule_first) { + qed_schedule_iov(&cdev->hwfns[i], + QED_IOV_WQ_STOP_WQ_FLAG); + cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); + } + + flush_workqueue(cdev->hwfns[i].iov_wq); + destroy_workqueue(cdev->hwfns[i].iov_wq); + } +} + +int qed_iov_wq_start(struct qed_dev *cdev) +{ + char name[NAME_SIZE]; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + /* PFs needs a dedicated workqueue only if they support IOV. + * VFs always require one. + */ + if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) + continue; + + snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", + cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); + + p_hwfn->iov_wq = create_singlethread_workqueue(name); + if (!p_hwfn->iov_wq) { + DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); + return -ENOMEM; + } + + if (IS_PF(cdev)) + INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); + else + INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); + } + + return 0; +} + +const struct qed_iov_hv_ops qed_iov_ops_pass = { + .configure = &qed_sriov_configure, + .set_mac = &qed_sriov_pf_set_mac, + .set_vlan = &qed_sriov_pf_set_vlan, + .get_config = &qed_get_vf_config, + .set_link_state = &qed_set_vf_link_state, + .set_spoof = &qed_spoof_configure, + .set_rate = &qed_set_vf_rate, +}; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h new file mode 100644 index 000000000000..c8667c65e685 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -0,0 +1,386 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_SRIOV_H +#define _QED_SRIOV_H +#include <linux/types.h> +#include "qed_vf.h" +#define QED_VF_ARRAY_LENGTH (3) + +#define IS_VF(cdev) ((cdev)->b_is_vf) +#define IS_PF(cdev) (!((cdev)->b_is_vf)) +#ifdef CONFIG_QED_SRIOV +#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) +#else +#define IS_PF_SRIOV(p_hwfn) (0) +#endif +#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) + +#define QED_MAX_VF_CHAINS_PER_PF 16 +#define QED_ETH_VF_NUM_VLAN_FILTERS 2 + +#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ + (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) + +enum qed_iov_vport_update_flag { + QED_IOV_VP_UPDATE_ACTIVATE, + QED_IOV_VP_UPDATE_VLAN_STRIP, + QED_IOV_VP_UPDATE_TX_SWITCH, + QED_IOV_VP_UPDATE_MCAST, + QED_IOV_VP_UPDATE_ACCEPT_PARAM, + QED_IOV_VP_UPDATE_RSS, + QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN, + QED_IOV_VP_UPDATE_SGE_TPA, + QED_IOV_VP_UPDATE_MAX, +}; + +struct qed_public_vf_info { + /* These copies will later be reflected in the bulletin board, + * but this copy should be newer. + */ + u8 forced_mac[ETH_ALEN]; + u16 forced_vlan; + u8 mac[ETH_ALEN]; + + /* IFLA_VF_LINK_STATE_<X> */ + int link_state; + + /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ + int tx_rate; +}; + +/* This struct is part of qed_dev and contains data relevant to all hwfns; + * Initialized only if SR-IOV cpabability is exposed in PCIe config space. + */ +struct qed_hw_sriov_info { + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_vfs; /* total VFs associated with the PF */ + u16 num_vfs; /* number of vfs that have been started */ + u16 initial_vfs; /* initial VFs associated with the PF */ + u16 nr_virtfn; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u16 vf_device_id; /* VF device id */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + + u32 first_vf_in_pf; +}; + +/* This mailbox is maintained per VF in its PF contains all information + * required for sending / receiving a message. + */ +struct qed_iov_vf_mbx { + union vfpf_tlvs *req_virt; + dma_addr_t req_phys; + union pfvf_tlvs *reply_virt; + dma_addr_t reply_phys; + + /* Address in VF where a pending message is located */ + dma_addr_t pending_req; + + u8 *offset; + + /* saved VF request header */ + struct vfpf_first_tlv first_tlv; +}; + +struct qed_vf_q_info { + u16 fw_rx_qid; + u16 fw_tx_qid; + u8 fw_cid; + u8 rxq_active; + u8 txq_active; +}; + +enum vf_state { + VF_FREE = 0, /* VF ready to be acquired holds no resc */ + VF_ACQUIRED, /* VF, acquired, but not initalized */ + VF_ENABLED, /* VF, Enabled */ + VF_RESET, /* VF, FLR'd, pending cleanup */ + VF_STOPPED /* VF, Stopped */ +}; + +struct qed_vf_vlan_shadow { + bool used; + u16 vid; +}; + +struct qed_vf_shadow_config { + /* Shadow copy of all guest vlans */ + struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; + + u8 inner_vlan_removal; +}; + +/* PFs maintain an array of this structure, per VF */ +struct qed_vf_info { + struct qed_iov_vf_mbx vf_mbx; + enum vf_state state; + bool b_init; + u8 to_disable; + + struct qed_bulletin bulletin; + dma_addr_t vf_bulletin; + + u32 concrete_fid; + u16 opaque_fid; + u16 mtu; + + u8 vport_id; + u8 relative_vf_id; + u8 abs_vf_id; +#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ + (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ + (p_vf)->abs_vf_id) + + u8 vport_instance; + u8 num_rxqs; + u8 num_txqs; + + u8 num_sbs; + + u8 num_mac_filters; + u8 num_vlan_filters; + struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF]; + u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; + u8 num_active_rxqs; + struct qed_public_vf_info p_vf_info; + bool spoof_chk; + bool req_spoofchk_val; + + /* Stores the configuration requested by VF */ + struct qed_vf_shadow_config shadow_config; + + /* A bitfield using bulletin's valid-map bits, used to indicate + * which of the bulletin board features have been configured. + */ + u64 configured_features; +#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ + (1 << VLAN_ADDR_FORCED)) +}; + +/* This structure is part of qed_hwfn and used only for PFs that have sriov + * capability enabled. + */ +struct qed_pf_iov { + struct qed_vf_info vfs_array[MAX_NUM_VFS]; + u64 pending_events[QED_VF_ARRAY_LENGTH]; + u64 pending_flr[QED_VF_ARRAY_LENGTH]; + + /* Allocate message address continuosuly and split to each VF */ + void *mbx_msg_virt_addr; + dma_addr_t mbx_msg_phys_addr; + u32 mbx_msg_size; + void *mbx_reply_virt_addr; + dma_addr_t mbx_reply_phys_addr; + u32 mbx_reply_size; + void *p_bulletins; + dma_addr_t bulletins_phys; + u32 bulletins_size; +}; + +enum qed_iov_wq_flag { + QED_IOV_WQ_MSG_FLAG, + QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, + QED_IOV_WQ_BULLETIN_UPDATE_FLAG, + QED_IOV_WQ_STOP_WQ_FLAG, + QED_IOV_WQ_FLR_FLAG, +}; + +#ifdef CONFIG_QED_SRIOV +/** + * @brief - Given a VF index, return index of next [including that] active VF. + * + * @param p_hwfn + * @param rel_vf_id + * + * @return MAX_NUM_VFS in case no further active VFs, otherwise index. + */ +u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief Read sriov related information and allocated resources + * reads from configuraiton space, shmem, etc. + * + * @param p_hwfn + * + * @return int + */ +int qed_iov_hw_info(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset + * + * @param p_hwfn + * @param p_iov + * @param type + * @param length + * + * @return pointer to the newly placed tlv + */ +void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); + +/** + * @brief list the types and lengths of the tlvs on the buffer + * + * @param p_hwfn + * @param tlvs_list + */ +void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); + +/** + * @brief qed_iov_alloc - allocate sriov related resources + * + * @param p_hwfn + * + * @return int + */ +int qed_iov_alloc(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_iov_setup - setup sriov related resources + * + * @param p_hwfn + * @param p_ptt + */ +void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * @brief qed_iov_free - free sriov related resources + * + * @param p_hwfn + */ +void qed_iov_free(struct qed_hwfn *p_hwfn); + +/** + * @brief free sriov related memory that was allocated during hw_prepare + * + * @param cdev + */ +void qed_iov_free_hw_info(struct qed_dev *cdev); + +/** + * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe. + * + * @param p_hwfn + * @param opcode + * @param echo + * @param data + */ +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, __le16 echo, union event_ring_data *data); + +/** + * @brief Mark structs of vfs that have been FLR-ed. + * + * @param p_hwfn + * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * + * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise. + */ +int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); + +/** + * @brief Search extended TLVs in request/reply buffer. + * + * @param p_hwfn + * @param p_tlvs_list - Pointer to tlvs list + * @param req_type - Type of TLV + * + * @return pointer to tlv type if found, otherwise returns NULL. + */ +void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type); + +void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); +int qed_iov_wq_start(struct qed_dev *cdev); + +void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); +void qed_vf_start_iov_wq(struct qed_dev *cdev); +int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); +void qed_inform_vf_link_state(struct qed_hwfn *hwfn); +#else +static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, + u16 rel_vf_id) +{ + return MAX_NUM_VFS; +} + +static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) +{ + return 0; +} + +static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) +{ + return 0; +} + +static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ +} + +static inline void qed_iov_free(struct qed_hwfn *p_hwfn) +{ +} + +static inline void qed_iov_free_hw_info(struct qed_dev *cdev) +{ +} + +static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, union event_ring_data *data) +{ + return -EINVAL; +} + +static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, + u32 *disabled_vfs) +{ + return 0; +} + +static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) +{ +} + +static inline int qed_iov_wq_start(struct qed_dev *cdev) +{ + return 0; +} + +static inline void qed_schedule_iov(struct qed_hwfn *hwfn, + enum qed_iov_wq_flag flag) +{ +} + +static inline void qed_vf_start_iov_wq(struct qed_dev *cdev) +{ +} + +static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) +{ + return 0; +} + +static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) +{ +} +#endif + +#define qed_for_each_vf(_p_hwfn, _i) \ + for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \ + _i < MAX_NUM_VFS; \ + _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1)) + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c new file mode 100644 index 000000000000..72e69c0ec10d --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -0,0 +1,1102 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#include <linux/crc32.h> +#include <linux/etherdevice.h> +#include "qed.h" +#include "qed_sriov.h" +#include "qed_vf.h" + +static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + void *p_tlv; + + /* This lock is released when we receive PF's response + * in qed_send_msg2pf(). + * So, qed_vf_pf_prep() and qed_send_msg2pf() + * must come in sequence. + */ + mutex_lock(&(p_iov->mutex)); + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "preparing to send 0x%04x tlv over vf pf channel\n", + type); + + /* Reset Requst offset */ + p_iov->offset = (u8 *)p_iov->vf2pf_request; + + /* Clear mailbox - both request and reply */ + memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); + memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + + /* Init type and length */ + p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); + + /* Init first tlv header */ + ((struct vfpf_first_tlv *)p_tlv)->reply_address = + (u64)p_iov->pf2vf_reply_phys; + + return p_tlv; +} + +static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size) +{ + union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; + struct ustorm_trigger_vf_zone trigger; + struct ustorm_vf_zone *zone_data; + int rc = 0, time = 100; + + zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; + + /* output tlvs list */ + qed_dp_tlv_list(p_hwfn, p_req); + + /* need to add the END TLV to the message size */ + resp_size += sizeof(struct channel_list_end_tlv); + + /* Send TLVs over HW channel */ + memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); + trigger.vf_pf_msg_valid = 1; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", + GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PFID), + upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), + lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), + &zone_data->non_trigger.vf_pf_msg_addr, + *((u32 *)&trigger), &zone_data->trigger); + + REG_WR(p_hwfn, + (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, + lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + REG_WR(p_hwfn, + (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, + upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + /* The message data must be written first, to prevent trigger before + * data is written. + */ + wmb(); + + REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); + + /* When PF would be done with the response, it would write back to the + * `done' address. Poll until then. + */ + while ((!*done) && time) { + msleep(25); + time--; + } + + if (!*done) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF <-- PF Timeout [Type %d]\n", + p_req->first_tlv.tl.type); + rc = -EBUSY; + goto exit; + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); + } + +exit: + mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); + + return rc; +} + +#define VF_ACQUIRE_THRESH 3 +#define VF_ACQUIRE_MAC_FILTERS 1 + +static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + u8 rx_count = 1, tx_count = 1, num_sbs = 1; + u8 num_mac = VF_ACQUIRE_MAC_FILTERS; + bool resources_acquired = false; + struct vfpf_acquire_tlv *req; + int rc = 0, attempts = 0; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + + /* starting filling the request */ + req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; + + req->resc_request.num_rxqs = rx_count; + req->resc_request.num_txqs = tx_count; + req->resc_request.num_sbs = num_sbs; + req->resc_request.num_mac_filters = num_mac; + req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + + req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; + req->vfdev_info.fw_major = FW_MAJOR_VERSION; + req->vfdev_info.fw_minor = FW_MINOR_VERSION; + req->vfdev_info.fw_revision = FW_REVISION_VERSION; + req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; + + /* Fill capability field with any non-deprecated config we support */ + req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; + + /* pf 2 vf bulletin board address */ + req->bulletin_addr = p_iov->bulletin.phys; + req->bulletin_size = p_iov->bulletin.size; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + while (!resources_acquired) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, "attempting to acquire resources\n"); + + /* send acquire request */ + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + /* copy acquire response from buffer to p_hwfn */ + memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); + + attempts++; + + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { + /* PF agrees to allocate our resources */ + if (!(resp->pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { + DP_INFO(p_hwfn, + "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n"); + return -EINVAL; + } + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); + resources_acquired = true; + } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && + attempts < VF_ACQUIRE_THRESH) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "PF unwilling to fullfill resource request. Try PF recommended amount\n"); + + /* humble our request */ + req->resc_request.num_txqs = resp->resc.num_txqs; + req->resc_request.num_rxqs = resp->resc.num_rxqs; + req->resc_request.num_sbs = resp->resc.num_sbs; + req->resc_request.num_mac_filters = + resp->resc.num_mac_filters; + req->resc_request.num_vlan_filters = + resp->resc.num_vlan_filters; + + /* Clear response buffer */ + memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + } else { + DP_ERR(p_hwfn, + "PF returned error %d to VF acquisition request\n", + resp->hdr.status); + return -EAGAIN; + } + } + + /* Update bulletin board size with response from PF */ + p_iov->bulletin.size = resp->bulletin_size; + + /* get HW info */ + p_hwfn->cdev->type = resp->pfdev_info.dev_type; + p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; + + p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; + + /* Learn of the possibility of CMT */ + if (IS_LEAD_HWFN(p_hwfn)) { + if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { + DP_NOTICE(p_hwfn, "100g VF\n"); + p_hwfn->cdev->num_hwfns = 2; + } + } + + return 0; +} + +int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov; + u32 reg; + + /* Set number of hwfns - might be overriden once leading hwfn learns + * actual configuration from PF. + */ + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->num_hwfns = 1; + + /* Set the doorbell bar. Assumption: regview is set */ + p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + + reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); + + reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); + + /* Allocate vf sriov info */ + p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); + if (!p_iov) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n"); + return -ENOMEM; + } + + /* Allocate vf2pf msg */ + p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + &p_iov->vf2pf_request_phys, + GFP_KERNEL); + if (!p_iov->vf2pf_request) { + DP_NOTICE(p_hwfn, + "Failed to allocate `vf2pf_request' DMA memory\n"); + goto free_p_iov; + } + + p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + &p_iov->pf2vf_reply_phys, + GFP_KERNEL); + if (!p_iov->pf2vf_reply) { + DP_NOTICE(p_hwfn, + "Failed to allocate `pf2vf_reply' DMA memory\n"); + goto free_vf2pf_request; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", + p_iov->vf2pf_request, + (u64) p_iov->vf2pf_request_phys, + p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); + + /* Allocate Bulletin board */ + p_iov->bulletin.size = sizeof(struct qed_bulletin_content); + p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov->bulletin.size, + &p_iov->bulletin.phys, + GFP_KERNEL); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", + p_iov->bulletin.p_virt, + (u64)p_iov->bulletin.phys, p_iov->bulletin.size); + + mutex_init(&p_iov->mutex); + + p_hwfn->vf_iov_info = p_iov; + + p_hwfn->hw_info.personality = QED_PCI_ETH; + + return qed_vf_pf_acquire(p_hwfn); + +free_vf2pf_request: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + p_iov->vf2pf_request, p_iov->vf2pf_request_phys); +free_p_iov: + kfree(p_iov); + + return -ENOMEM; +} + +int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + u8 rx_qid, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_start_queue_resp_tlv *resp; + struct vfpf_start_rxq_tlv *req; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); + + req->rx_qid = rx_qid; + req->cqe_pbl_addr = cqe_pbl_addr; + req->cqe_pbl_size = cqe_pbl_size; + req->rxq_addr = bd_chain_phys_addr; + req->hw_sb = sb; + req->sb_index = sb_index; + req->bd_max_bytes = bd_max_bytes; + req->stat_id = -1; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->queue_start; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + /* Learn the address of the producer from the response */ + if (pp_prod) { + u64 init_prod_val = 0; + + *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", + rx_qid, *pp_prod, resp->offset); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + (u32 *)&init_prod_val); + } + + return rc; +} + +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_rxqs_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); + + req->rx_qid = rx_qid; + req->num_rxqs = 1; + req->cqe_completion = cqe_completion; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return rc; +} + +int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_start_txq_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); + + req->tx_qid = tx_queue_id; + + /* Tx */ + req->pbl_addr = pbl_addr; + req->pbl_size = pbl_size; + req->hw_sb = sb; + req->sb_index = sb_index; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + if (pp_doorbell) { + u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; + + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); + } + + return rc; +} + +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_txqs_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); + + req->tx_qid = tx_qid; + req->num_txqs = 1; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return rc; +} + +int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, u8 only_untagged) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_start_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc, i; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); + + req->mtu = mtu; + req->vport_id = vport_id; + req->inner_vlan_removal = inner_vlan_removal; + req->tpa_mode = tpa_mode; + req->max_buffers_per_cqe = max_buffers_per_cqe; + req->only_untagged = only_untagged; + + /* status blocks */ + for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) + if (p_hwfn->sbs_info[i]) + req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return rc; +} + +int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return rc; +} + +static bool +qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + u16 tlv) +{ + switch (tlv) { + case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: + return !!(p_data->update_vport_active_rx_flg || + p_data->update_vport_active_tx_flg); + case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: + return !!p_data->update_tx_switching_flg; + case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: + return !!p_data->update_inner_vlan_removal_flg; + case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: + return !!p_data->update_accept_any_vlan_flg; + case CHANNEL_TLV_VPORT_UPDATE_MCAST: + return !!p_data->update_approx_mcast_flg; + case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: + return !!(p_data->accept_flags.update_rx_mode_config || + p_data->accept_flags.update_tx_mode_config); + case CHANNEL_TLV_VPORT_UPDATE_RSS: + return !!p_data->rss_params; + case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: + return !!p_data->sge_tpa_params; + default: + DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", + tlv); + return false; + } +} + +static void +qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *p_resp; + u16 tlv; + + for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { + if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) + continue; + + p_resp = (struct pfvf_def_resp_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, + tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "TLV[%d] Configuration %s\n", + tlv, + (p_resp && p_resp->hdr.status) ? "succeeded" + : "failed"); + } +} + +int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_update_tlv *req; + struct pfvf_def_resp_tlv *resp; + u8 update_rx, update_tx; + u32 resp_size = 0; + u16 size, tlv; + int rc; + + resp = &p_iov->pf2vf_reply->default_resp; + resp_size = sizeof(*resp); + + update_rx = p_params->update_vport_active_rx_flg; + update_tx = p_params->update_vport_active_tx_flg; + + /* clear mailbox and prep header tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); + + /* Prepare extended tlvs */ + if (update_rx || update_tx) { + struct vfpf_vport_update_activate_tlv *p_act_tlv; + + size = sizeof(struct vfpf_vport_update_activate_tlv); + p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (update_rx) { + p_act_tlv->update_rx = update_rx; + p_act_tlv->active_rx = p_params->vport_active_rx_flg; + } + + if (update_tx) { + p_act_tlv->update_tx = update_tx; + p_act_tlv->active_tx = p_params->vport_active_tx_flg; + } + } + + if (p_params->update_tx_switching_flg) { + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + + size = sizeof(struct vfpf_vport_update_tx_switch_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + tlv, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; + } + + if (p_params->update_approx_mcast_flg) { + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + + size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); + p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_MCAST, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + memcpy(p_mcast_tlv->bins, p_params->bins, + sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); + } + + update_rx = p_params->accept_flags.update_rx_mode_config; + update_tx = p_params->accept_flags.update_tx_mode_config; + + if (update_rx || update_tx) { + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + size = sizeof(struct vfpf_vport_update_accept_param_tlv); + p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (update_rx) { + p_accept_tlv->update_rx_mode = update_rx; + p_accept_tlv->rx_accept_filter = + p_params->accept_flags.rx_accept_filter; + } + + if (update_tx) { + p_accept_tlv->update_tx_mode = update_tx; + p_accept_tlv->tx_accept_filter = + p_params->accept_flags.tx_accept_filter; + } + } + + if (p_params->rss_params) { + struct qed_rss_params *rss_params = p_params->rss_params; + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + + size = sizeof(struct vfpf_vport_update_rss_tlv); + p_rss_tlv = qed_add_tlv(p_hwfn, + &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_RSS, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (rss_params->update_rss_config) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CONFIG_FLAG; + if (rss_params->update_rss_capabilities) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CAPS_FLAG; + if (rss_params->update_rss_ind_table) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_IND_TABLE_FLAG; + if (rss_params->update_rss_key) + p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; + + p_rss_tlv->rss_enable = rss_params->rss_enable; + p_rss_tlv->rss_caps = rss_params->rss_caps; + p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; + memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, + sizeof(rss_params->rss_ind_table)); + memcpy(p_rss_tlv->rss_key, rss_params->rss_key, + sizeof(rss_params->rss_key)); + } + + if (p_params->update_accept_any_vlan_flg) { + struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; + + size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); + + resp_size += sizeof(struct pfvf_def_resp_tlv); + p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; + p_any_vlan_tlv->update_accept_any_vlan_flg = + p_params->update_accept_any_vlan_flg; + } + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); + + return rc; +} + +int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EAGAIN; + + p_hwfn->b_int_enabled = 0; + + return 0; +} + +int qed_vf_pf_release(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + u32 size; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) + rc = -EAGAIN; + + p_hwfn->b_int_enabled = 0; + + if (p_iov->vf2pf_request) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + p_iov->vf2pf_request, + p_iov->vf2pf_request_phys); + if (p_iov->pf2vf_reply) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); + + if (p_iov->bulletin.p_virt) { + size = sizeof(struct qed_bulletin_content); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + size, + p_iov->bulletin.p_virt, p_iov->bulletin.phys); + } + + kfree(p_hwfn->vf_iov_info); + p_hwfn->vf_iov_info = NULL; + + return rc; +} + +void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd) +{ + struct qed_sp_vport_update_params sp_params; + int i; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.update_approx_mcast_flg = 1; + + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); + __set_bit(bit, sp_params.bins); + } + } + + qed_vf_pf_vport_update(p_hwfn, &sp_params); +} + +int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_ucast) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_ucast_filter_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); + req->opcode = (u8) p_ucast->opcode; + req->type = (u8) p_ucast->type; + memcpy(req->mac, p_ucast->mac, ETH_ALEN); + req->vlan = p_ucast->vlan; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EAGAIN; + + return 0; +} + +int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return -EINVAL; + + return 0; +} + +u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); + return 0; + } + + return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; +} + +int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct qed_bulletin_content shadow; + u32 crc, crc_size; + + crc_size = sizeof(p_iov->bulletin.p_virt->crc); + *p_change = 0; + + /* Need to guarantee PF is not in the middle of writing it */ + memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); + + /* If version did not update, no need to do anything */ + if (shadow.version == p_iov->bulletin_shadow.version) + return 0; + + /* Verify the bulletin we see is valid */ + crc = crc32(0, (u8 *)&shadow + crc_size, + p_iov->bulletin.size - crc_size); + if (crc != shadow.crc) + return -EAGAIN; + + /* Set the shadow bulletin and process it */ + memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Read a bulletin update %08x\n", shadow.version); + + *p_change = 1; + + return 0; +} + +void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *p_params, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_params, 0, sizeof(*p_params)); + + p_params->speed.autoneg = p_bulletin->req_autoneg; + p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; + p_params->speed.forced_speed = p_bulletin->req_forced_speed; + p_params->pause.autoneg = p_bulletin->req_autoneg_pause; + p_params->pause.forced_rx = p_bulletin->req_forced_rx; + p_params->pause.forced_tx = p_bulletin->req_forced_tx; + p_params->loopback_mode = p_bulletin->req_loopback; +} + +void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params) +{ + __qed_vf_get_link_params(p_hwfn, params, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + +void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_link, 0, sizeof(*p_link)); + + p_link->link_up = p_bulletin->link_up; + p_link->speed = p_bulletin->speed; + p_link->full_duplex = p_bulletin->full_duplex; + p_link->an = p_bulletin->autoneg; + p_link->an_complete = p_bulletin->autoneg_complete; + p_link->parallel_detection = p_bulletin->parallel_detection; + p_link->pfc_enabled = p_bulletin->pfc_enabled; + p_link->partner_adv_speed = p_bulletin->partner_adv_speed; + p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; + p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; + p_link->partner_adv_pause = p_bulletin->partner_adv_pause; + p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; +} + +void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link) +{ + __qed_vf_get_link_state(p_hwfn, link, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + +void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_link_caps, 0, sizeof(*p_link_caps)); + p_link_caps->speed_capabilities = p_bulletin->capability_speed; +} + +void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps) +{ + __qed_vf_get_link_caps(p_hwfn, p_link_caps, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + +void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) +{ + *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; +} + +void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) +{ + memcpy(port_mac, + p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); +} + +void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) +{ + struct qed_vf_iov *p_vf; + + p_vf = p_hwfn->vf_iov_info; + *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; +} + +bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) +{ + struct qed_bulletin_content *bulletin; + + bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; + if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return true; + + /* Forbid VF from changing a MAC enforced by PF */ + if (ether_addr_equal(bulletin->mac, mac)) + return false; + + return false; +} + +bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, + u8 *dst_mac, u8 *p_is_forced) +{ + struct qed_bulletin_content *bulletin; + + bulletin = &hwfn->vf_iov_info->bulletin_shadow; + + if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { + if (p_is_forced) + *p_is_forced = 1; + } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { + if (p_is_forced) + *p_is_forced = 0; + } else { + return false; + } + + ether_addr_copy(dst_mac, bulletin->mac); + + return true; +} + +void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng) +{ + struct pf_vf_pfdev_info *info; + + info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; + + *fw_major = info->fw_major; + *fw_minor = info->fw_minor; + *fw_rev = info->fw_rev; + *fw_eng = info->fw_eng; +} + +static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) +{ + struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; + u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; + void *cookie = hwfn->cdev->ops_cookie; + + is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, + &is_mac_forced); + if (is_mac_exist && is_mac_forced && cookie) + ops->force_mac(cookie, mac); + + /* Always update link configuration according to bulletin */ + qed_link_update(hwfn); +} + +void qed_iov_vf_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + iov_task.work); + u8 change = 0; + + if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) + return; + + /* Handle bulletin board changes */ + qed_vf_read_bulletin(hwfn, &change); + if (change) + qed_handle_bulletin_change(hwfn); + + /* As VF is polling bulletin board, need to constantly re-schedule */ + queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h new file mode 100644 index 000000000000..b82fda964bbd --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -0,0 +1,990 @@ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef _QED_VF_H +#define _QED_VF_H + +#include "qed_l2.h" +#include "qed_mcp.h" + +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY_SIZE 10 + +struct vf_pf_resc_request { + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u16 padding; +}; + +struct hw_sb_info { + u16 hw_sb_id; + u8 sb_qid; + u8 padding[5]; +}; + +#define TLV_BUFFER_SIZE 1024 + +enum { + PFVF_STATUS_WAITING, + PFVF_STATUS_SUCCESS, + PFVF_STATUS_FAILURE, + PFVF_STATUS_NOT_SUPPORTED, + PFVF_STATUS_NO_RESOURCE, + PFVF_STATUS_FORCED, +}; + +/* vf pf channel tlvs */ +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + u16 type; + u16 length; +}; + +/* header of first vf->pf tlv carries the offset used to calculate reponse + * buffer address + */ +struct vfpf_first_tlv { + struct channel_tlv tl; + u32 padding; + u64 reply_address; +}; + +/* header of pf->vf tlvs, carries the status of handling the request */ +struct pfvf_tlv { + struct channel_tlv tl; + u8 status; + u8 padding[3]; +}; + +/* response tlv used for most tlvs */ +struct pfvf_def_resp_tlv { + struct pfvf_tlv hdr; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + u8 padding[4]; +}; + +#define VFPF_ACQUIRE_OS_LINUX (0) +#define VFPF_ACQUIRE_OS_WINDOWS (1) +#define VFPF_ACQUIRE_OS_ESX (2) +#define VFPF_ACQUIRE_OS_SOLARIS (3) +#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4) + +struct vfpf_acquire_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_vfdev_info { +#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0) +#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */ + u64 capabilities; + u8 fw_major; + u8 fw_minor; + u8 fw_revision; + u8 fw_engineering; + u32 driver_version; + u16 opaque_fid; /* ME register value */ + u8 os_type; /* VFPF_ACQUIRE_OS_* value */ + u8 padding[5]; + } vfdev_info; + + struct vf_pf_resc_request resc_request; + + u64 bulletin_addr; + u32 bulletin_size; + u32 padding; +}; + +/* receive side scaling tlv */ +struct vfpf_vport_update_rss_tlv { + struct channel_tlv tl; + + u8 update_rss_flags; +#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0) +#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1) +#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2) +#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3) + + u8 rss_enable; + u8 rss_caps; + u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ + u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY_SIZE]; +}; + +struct pfvf_storm_stats { + u32 address; + u32 len; +}; + +struct pfvf_stats_info { + struct pfvf_storm_stats mstats; + struct pfvf_storm_stats pstats; + struct pfvf_storm_stats tstats; + struct pfvf_storm_stats ustats; +}; + +struct pfvf_acquire_resp_tlv { + struct pfvf_tlv hdr; + + struct pf_vf_pfdev_info { + u32 chip_num; + u32 mfw_ver; + + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + u64 capabilities; +#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0) +#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */ +/* There are old PF versions where the PF might mistakenly override the sanity + * mechanism [version-based] and allow a VF that can't be supported to pass + * the acquisition phase. + * To overcome this, PFs now indicate that they're past that point and the new + * VFs would fail probe on the older PFs that fail to do so. + */ +#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) + + u16 db_size; + u8 indices_per_sb; + u8 os_type; + + /* These should match the PF's qed_dev values */ + u16 chip_rev; + u8 dev_type; + + u8 padding; + + struct pfvf_stats_info stats_info; + + u8 port_mac[ETH_ALEN]; + u8 padding2[2]; + } pfdev_info; + + struct pf_vf_resc { +#define PFVF_MAX_QUEUES_PER_VF 16 +#define PFVF_MAX_SBS_PER_VF 16 + struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; + u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; + u8 cid[PFVF_MAX_QUEUES_PER_VF]; + + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 padding[2]; + } resc; + + u32 bulletin_size; + u32 padding; +}; + +struct pfvf_start_queue_resp_tlv { + struct pfvf_tlv hdr; + u32 offset; /* offset to consumer/producer of queue */ + u8 padding[4]; +}; + +/* Setup Queue */ +struct vfpf_start_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + u64 rxq_addr; + u64 deprecated_sge_addr; + u64 cqe_pbl_addr; + + u16 cqe_pbl_size; + u16 hw_sb; + u16 rx_qid; + u16 hc_rate; /* desired interrupts per sec. */ + + u16 bd_max_bytes; + u16 stat_id; + u8 sb_index; + u8 padding[3]; +}; + +struct vfpf_start_txq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + u64 pbl_addr; + u16 pbl_size; + u16 stat_id; + u16 tx_qid; + u16 hw_sb; + + u32 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 hc_rate; /* desired interrupts per sec. */ + u8 sb_index; + u8 padding[3]; +}; + +/* Stop RX Queue */ +struct vfpf_stop_rxqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 rx_qid; + u8 num_rxqs; + u8 cqe_completion; + u8 padding[4]; +}; + +/* Stop TX Queues */ +struct vfpf_stop_txqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 tx_qid; + u8 num_txqs; + u8 padding[5]; +}; + +struct vfpf_update_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF]; + + u16 rx_qid; + u8 num_rxqs; + u8 flags; +#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0) +#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1) +#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2) + + u8 padding[4]; +}; + +/* Set Queue Filters */ +struct vfpf_q_mac_vlan_filter { + u32 flags; +#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 +#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 +#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + + u8 mac[ETH_ALEN]; + u16 vlan_tag; + + u8 padding[4]; +}; + +/* Start a vport */ +struct vfpf_vport_start_tlv { + struct vfpf_first_tlv first_tlv; + + u64 sb_addr[PFVF_MAX_SBS_PER_VF]; + + u32 tpa_mode; + u16 dep1; + u16 mtu; + + u8 vport_id; + u8 inner_vlan_removal; + + u8 only_untagged; + u8 max_buffers_per_cqe; + + u8 padding[4]; +}; + +/* Extended tlvs - need to add rss, mcast, accept mode tlvs */ +struct vfpf_vport_update_activate_tlv { + struct channel_tlv tl; + u8 update_rx; + u8 update_tx; + u8 active_rx; + u8 active_tx; +}; + +struct vfpf_vport_update_tx_switch_tlv { + struct channel_tlv tl; + u8 tx_switching; + u8 padding[3]; +}; + +struct vfpf_vport_update_vlan_strip_tlv { + struct channel_tlv tl; + u8 remove_vlan; + u8 padding[3]; +}; + +struct vfpf_vport_update_mcast_bin_tlv { + struct channel_tlv tl; + u8 padding[4]; + + u64 bins[8]; +}; + +struct vfpf_vport_update_accept_param_tlv { + struct channel_tlv tl; + u8 update_rx_mode; + u8 update_tx_mode; + u8 rx_accept_filter; + u8 tx_accept_filter; +}; + +struct vfpf_vport_update_accept_any_vlan_tlv { + struct channel_tlv tl; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + + u8 padding[2]; +}; + +struct vfpf_vport_update_sge_tpa_tlv { + struct channel_tlv tl; + + u16 sge_tpa_flags; +#define VFPF_TPA_IPV4_EN_FLAG BIT(0) +#define VFPF_TPA_IPV6_EN_FLAG BIT(1) +#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2) +#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3) +#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4) + + u8 update_sge_tpa_flags; +#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0) +#define VFPF_UPDATE_TPA_EN_FLAG BIT(1) +#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2) + + u8 max_buffers_per_cqe; + + u16 deprecated_sge_buff_size; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; + + u8 tpa_max_aggs_num; + u8 padding[7]; +}; + +/* Primary tlv as a header for various extended tlvs for + * various functionalities in vport update ramrod. + */ +struct vfpf_vport_update_tlv { + struct vfpf_first_tlv first_tlv; +}; + +struct vfpf_ucast_filter_tlv { + struct vfpf_first_tlv first_tlv; + + u8 opcode; + u8 type; + + u8 mac[ETH_ALEN]; + + u16 vlan; + u16 padding[3]; +}; + +struct tlv_buffer_size { + u8 tlv_buffer[TLV_BUFFER_SIZE]; +}; + +union vfpf_tlvs { + struct vfpf_first_tlv first_tlv; + struct vfpf_acquire_tlv acquire; + struct vfpf_start_rxq_tlv start_rxq; + struct vfpf_start_txq_tlv start_txq; + struct vfpf_stop_rxqs_tlv stop_rxqs; + struct vfpf_stop_txqs_tlv stop_txqs; + struct vfpf_update_rxq_tlv update_rxq; + struct vfpf_vport_start_tlv start_vport; + struct vfpf_vport_update_tlv vport_update; + struct vfpf_ucast_filter_tlv ucast_filter; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +union pfvf_tlvs { + struct pfvf_def_resp_tlv default_resp; + struct pfvf_acquire_resp_tlv acquire_resp; + struct tlv_buffer_size tlv_buf_size; + struct pfvf_start_queue_resp_tlv queue_start; +}; + +enum qed_bulletin_bit { + /* Alert the VF that a forced MAC was set by the PF */ + MAC_ADDR_FORCED = 0, + /* Alert the VF that a forced VLAN was set by the PF */ + VLAN_ADDR_FORCED = 2, + + /* Indicate that `default_only_untagged' contains actual data */ + VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, + VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, + + /* Alert the VF that suggested mac was sent by the PF. + * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set. + */ + VFPF_BULLETIN_MAC_ADDR = 5 +}; + +struct qed_bulletin_content { + /* crc of structure to ensure is not in mid-update */ + u32 crc; + + u32 version; + + /* bitmap indicating which fields hold valid values */ + u64 valid_bitmap; + + /* used for MAC_ADDR or MAC_ADDR_FORCED */ + u8 mac[ETH_ALEN]; + + /* If valid, 1 => only untagged Rx if no vlan is configured */ + u8 default_only_untagged; + u8 padding; + + /* The following is a 'copy' of qed_mcp_link_state, + * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's + * possible the structs will increase further along the road we cannot + * have it here; Instead we need to have all of its fields. + */ + u8 req_autoneg; + u8 req_autoneg_pause; + u8 req_forced_rx; + u8 req_forced_tx; + u8 padding2[4]; + + u32 req_adv_speed; + u32 req_forced_speed; + u32 req_loopback; + u32 padding3; + + u8 link_up; + u8 full_duplex; + u8 autoneg; + u8 autoneg_complete; + u8 parallel_detection; + u8 pfc_enabled; + u8 partner_tx_flow_ctrl_en; + u8 partner_rx_flow_ctrl_en; + u8 partner_adv_pause; + u8 sfp_tx_fault; + u8 padding4[6]; + + u32 speed; + u32 partner_adv_speed; + + u32 capability_speed; + + /* Forced vlan */ + u16 pvid; + u16 padding5; +}; + +struct qed_bulletin { + dma_addr_t phys; + struct qed_bulletin_content *p_virt; + u32 size; +}; + +enum { + CHANNEL_TLV_NONE, /* ends tlv sequence */ + CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_VPORT_START, + CHANNEL_TLV_VPORT_UPDATE, + CHANNEL_TLV_VPORT_TEARDOWN, + CHANNEL_TLV_START_RXQ, + CHANNEL_TLV_START_TXQ, + CHANNEL_TLV_STOP_RXQS, + CHANNEL_TLV_STOP_TXQS, + CHANNEL_TLV_UPDATE_RXQ, + CHANNEL_TLV_INT_CLEANUP, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_UCAST_FILTER, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, + CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, + CHANNEL_TLV_VPORT_UPDATE_MCAST, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, + CHANNEL_TLV_VPORT_UPDATE_RSS, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, + CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, + CHANNEL_TLV_MAX, + + /* Required for iterating over vport-update tlvs. + * Will break in case non-sequential vport-update tlvs. + */ + CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, +}; + +/* This data is held in the qed_hwfn structure for VFs only. */ +struct qed_vf_iov { + union vfpf_tlvs *vf2pf_request; + dma_addr_t vf2pf_request_phys; + union pfvf_tlvs *pf2vf_reply; + dma_addr_t pf2vf_reply_phys; + + /* Should be taken whenever the mailbox buffers are accessed */ + struct mutex mutex; + u8 *offset; + + /* Bulletin Board */ + struct qed_bulletin bulletin; + struct qed_bulletin_content bulletin_shadow; + + /* we set aside a copy of the acquire response */ + struct pfvf_acquire_resp_tlv acquire_resp; +}; + +#ifdef CONFIG_QED_SRIOV +/** + * @brief Read the VF bulletin and act on it if needed + * + * @param p_hwfn + * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. + * + * @return enum _qed_status + */ +int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); + +/** + * @brief Get link paramters for VF from qed + * + * @param p_hwfn + * @param params - the link params structure to be filled for the VF + */ +void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params); + +/** + * @brief Get link state for VF from qed + * + * @param p_hwfn + * @param link - the link state structure to be filled for the VF + */ +void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link); + +/** + * @brief Get link capabilities for VF from qed + * + * @param p_hwfn + * @param p_link_caps - the link capabilities structure to be filled for the VF + */ +void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps); + +/** + * @brief Get number of Rx queues allocated for VF by qed + * + * @param p_hwfn + * @param num_rxqs - allocated RX queues + */ +void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); + +/** + * @brief Get port mac address for VF + * + * @param p_hwfn + * @param port_mac - destination location for port mac + */ +void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); + +/** + * @brief Get number of VLAN filters allocated for VF by qed + * + * @param p_hwfn + * @param num_rxqs - allocated VLAN filters + */ +void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, + u8 *num_vlan_filters); + +/** + * @brief Check if VF can set a MAC address + * + * @param p_hwfn + * @param mac + * + * @return bool + */ +bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); + +/** + * @brief Set firmware version information in dev_info from VFs acquire response tlv + * + * @param p_hwfn + * @param fw_major + * @param fw_minor + * @param fw_rev + * @param fw_eng + */ +void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng); + +/** + * @brief hw preparation for VF + * sends ACQUIRE message + * + * @param p_hwfn + * + * @return int + */ +int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); + +/** + * @brief VF - start the RX Queue by sending a message to the PF + * @param p_hwfn + * @param cid - zero based within the VF + * @param rx_queue_id - zero based within the VF + * @param sb - VF status block for this queue + * @param sb_index - Index within the status block + * @param bd_max_bytes - maximum number of bytes per bd + * @param bd_chain_phys_addr - physical address of bd chain + * @param cqe_pbl_addr - physical address of pbl + * @param cqe_pbl_size - pbl size + * @param pp_prod - pointer to the producer to be + * used in fastpath + * + * @return int + */ +int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + u8 rx_queue_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod); + +/** + * @brief VF - start the TX queue by sending a message to the + * PF. + * + * @param p_hwfn + * @param tx_queue_id - zero based within the VF + * @param sb - status block for this queue + * @param sb_index - index within the status block + * @param bd_chain_phys_addr - physical address of tx chain + * @param pp_doorbell - pointer to address to which to + * write the doorbell too.. + * + * @return int + */ +int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell); + +/** + * @brief VF - stop the RX queue by sending a message to the PF + * + * @param p_hwfn + * @param rx_qid + * @param cqe_completion + * + * @return int + */ +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + u16 rx_qid, bool cqe_completion); + +/** + * @brief VF - stop the TX queue by sending a message to the PF + * + * @param p_hwfn + * @param tx_qid + * + * @return int + */ +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid); + +/** + * @brief VF - send a vport update command + * + * @param p_hwfn + * @param params + * + * @return int + */ +int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params); + +/** + * + * @brief VF - send a close message to PF + * + * @param p_hwfn + * + * @return enum _qed_status + */ +int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); + +/** + * @brief VF - free vf`s memories + * + * @param p_hwfn + * + * @return enum _qed_status + */ +int qed_vf_pf_release(struct qed_hwfn *p_hwfn); + +/** + * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given + * sb_id. For VFs igu sbs don't have to be contiguous + * + * @param p_hwfn + * @param sb_id + * + * @return INLINE u16 + */ +u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); + +/** + * @brief qed_vf_pf_vport_start - perform vport start for VF. + * + * @param p_hwfn + * @param vport_id + * @param mtu + * @param inner_vlan_removal + * @param tpa_mode + * @param max_buffers_per_cqe, + * @param only_untagged - default behavior regarding vlan acceptance + * + * @return enum _qed_status + */ +int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, u8 only_untagged); + +/** + * @brief qed_vf_pf_vport_stop - stop the VF's vport + * + * @param p_hwfn + * + * @return enum _qed_status + */ +int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); + +int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_param); + +void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd); + +/** + * @brief qed_vf_pf_int_cleanup - clean the SB of the VF + * + * @param p_hwfn + * + * @return enum _qed_status + */ +int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); + +/** + * @brief - return the link params in a given bulletin board + * + * @param p_hwfn + * @param p_params - pointer to a struct to fill with link params + * @param p_bulletin + */ +void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *p_params, + struct qed_bulletin_content *p_bulletin); + +/** + * @brief - return the link state in a given bulletin board + * + * @param p_hwfn + * @param p_link - pointer to a struct to fill with link state + * @param p_bulletin + */ +void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content *p_bulletin); + +/** + * @brief - return the link capabilities in a given bulletin board + * + * @param p_hwfn + * @param p_link - pointer to a struct to fill with link capabilities + * @param p_bulletin + */ +void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin); + +void qed_iov_vf_task(struct work_struct *work); +#else +static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params) +{ +} + +static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link) +{ +} + +static inline void +qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps) +{ +} + +static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) +{ +} + +static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) +{ +} + +static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, + u8 *num_vlan_filters) +{ +} + +static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) +{ + return false; +} + +static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng) +{ +} + +static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + u8 rx_queue_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_adr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + u16 rx_qid, bool cqe_completion) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) +{ + return -EINVAL; +} + +static inline int +qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + return 0; +} + +static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, + u8 only_untagged) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_param) +{ + return -EINVAL; +} + +static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd) +{ +} + +static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params + *p_params, + struct qed_bulletin_content + *p_bulletin) +{ +} + +static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content + *p_bulletin) +{ +} + +static inline void +__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin) +{ +} + +static inline void qed_iov_vf_task(struct work_struct *work) +{ +} +#endif + +#endif diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index d023251544d9..47d6b22252f6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -25,15 +25,13 @@ #define QEDE_MAJOR_VERSION 8 #define QEDE_MINOR_VERSION 7 -#define QEDE_REVISION_VERSION 0 -#define QEDE_ENGINEERING_VERSION 0 +#define QEDE_REVISION_VERSION 1 +#define QEDE_ENGINEERING_VERSION 20 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ __stringify(QEDE_MINOR_VERSION) "." \ __stringify(QEDE_REVISION_VERSION) "." \ __stringify(QEDE_ENGINEERING_VERSION) -#define QEDE_ETH_INTERFACE_VERSION 300 - #define DRV_MODULE_SYM qede struct qede_stats { @@ -61,16 +59,16 @@ struct qede_stats { /* port */ u64 rx_64_byte_packets; - u64 rx_127_byte_packets; - u64 rx_255_byte_packets; - u64 rx_511_byte_packets; - u64 rx_1023_byte_packets; - u64 rx_1518_byte_packets; - u64 rx_1522_byte_packets; - u64 rx_2047_byte_packets; - u64 rx_4095_byte_packets; - u64 rx_9216_byte_packets; - u64 rx_16383_byte_packets; + u64 rx_65_to_127_byte_packets; + u64 rx_128_to_255_byte_packets; + u64 rx_256_to_511_byte_packets; + u64 rx_512_to_1023_byte_packets; + u64 rx_1024_to_1518_byte_packets; + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; u64 rx_crc_errors; u64 rx_mac_crtl_frames; u64 rx_pause_frames; @@ -114,6 +112,10 @@ struct qede_dev { u32 dp_module; u8 dp_level; + u32 flags; +#define QEDE_FLAG_IS_VF BIT(0) +#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF)) + const struct qed_eth_ops *ops; struct qed_dev_eth_info dev_info; @@ -156,6 +158,10 @@ struct qede_dev { SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) struct qede_stats stats; +#define QEDE_RSS_INDIR_INITED BIT(0) +#define QEDE_RSS_KEY_INITED BIT(1) +#define QEDE_RSS_CAPS_INITED BIT(2) + u32 rss_params_inited; /* bit-field to track initialized rss params */ struct qed_update_vport_rss_params rss_params; u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ @@ -167,6 +173,8 @@ struct qede_dev { bool accept_any_vlan; struct delayed_work sp_task; unsigned long sp_flags; + u16 vxlan_dst_port; + u16 geneve_dst_port; }; enum QEDE_STATE { @@ -286,8 +294,11 @@ struct qede_fastpath { #define QEDE_CSUM_ERROR BIT(0) #define QEDE_CSUM_UNNECESSARY BIT(1) +#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2) -#define QEDE_SP_RX_MODE 1 +#define QEDE_SP_RX_MODE 1 +#define QEDE_SP_VXLAN_PORT_CONFIG 2 +#define QEDE_SP_GENEVE_PORT_CONFIG 3 union qede_reload_args { u16 mtu; @@ -301,6 +312,10 @@ void qede_reload(struct qede_dev *edev, union qede_reload_args *args); int qede_change_mtu(struct net_device *dev, int new_mtu); void qede_fill_by_demand_stats(struct qede_dev *edev); +bool qede_has_rx_work(struct qede_rx_queue *rxq); +int qede_txq_has_work(struct qede_tx_queue *txq); +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, + u8 count); #define RX_RING_SIZE_POW 13 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW)) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index c49dc10ce151..1bc75358cbc4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -9,6 +9,7 @@ #include <linux/version.h> #include <linux/types.h> #include <linux/netdevice.h> +#include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/string.h> #include <linux/pci.h> @@ -27,6 +28,9 @@ #define QEDE_RQSTAT_STRING(stat_name) (#stat_name) #define QEDE_RQSTAT(stat_name) \ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)} + +#define QEDE_SELFTEST_POLL_COUNT 100 + static const struct { u64 offset; char string[ETH_GSTRING_LEN]; @@ -59,16 +63,16 @@ static const struct { QEDE_STAT(tx_bcast_pkts), QEDE_PF_STAT(rx_64_byte_packets), - QEDE_PF_STAT(rx_127_byte_packets), - QEDE_PF_STAT(rx_255_byte_packets), - QEDE_PF_STAT(rx_511_byte_packets), - QEDE_PF_STAT(rx_1023_byte_packets), - QEDE_PF_STAT(rx_1518_byte_packets), - QEDE_PF_STAT(rx_1522_byte_packets), - QEDE_PF_STAT(rx_2047_byte_packets), - QEDE_PF_STAT(rx_4095_byte_packets), - QEDE_PF_STAT(rx_9216_byte_packets), - QEDE_PF_STAT(rx_16383_byte_packets), + QEDE_PF_STAT(rx_65_to_127_byte_packets), + QEDE_PF_STAT(rx_128_to_255_byte_packets), + QEDE_PF_STAT(rx_256_to_511_byte_packets), + QEDE_PF_STAT(rx_512_to_1023_byte_packets), + QEDE_PF_STAT(rx_1024_to_1518_byte_packets), + QEDE_PF_STAT(rx_1519_to_1522_byte_packets), + QEDE_PF_STAT(rx_1519_to_2047_byte_packets), + QEDE_PF_STAT(rx_2048_to_4095_byte_packets), + QEDE_PF_STAT(rx_4096_to_9216_byte_packets), + QEDE_PF_STAT(rx_9217_to_16383_byte_packets), QEDE_PF_STAT(tx_64_byte_packets), QEDE_PF_STAT(tx_65_to_127_byte_packets), QEDE_PF_STAT(tx_128_to_255_byte_packets), @@ -116,11 +120,39 @@ static const struct { #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr) +enum { + QEDE_PRI_FLAG_CMT, + QEDE_PRI_FLAG_LEN, +}; + +static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = { + "Coupled-Function", +}; + +enum qede_ethtool_tests { + QEDE_ETHTOOL_INT_LOOPBACK, + QEDE_ETHTOOL_INTERRUPT_TEST, + QEDE_ETHTOOL_MEMORY_TEST, + QEDE_ETHTOOL_REGISTER_TEST, + QEDE_ETHTOOL_CLOCK_TEST, + QEDE_ETHTOOL_TEST_MAX +}; + +static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = { + "Internal loopback (offline)", + "Interrupt (online)\t", + "Memory (online)\t\t", + "Register (online)\t", + "Clock (online)\t\t", +}; + static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) { int i, j, k; for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) { + if (IS_VF(edev) && qede_stats_arr[i].pf_only) + continue; strcpy(buf + j * ETH_GSTRING_LEN, qede_stats_arr[i].string); j++; @@ -139,6 +171,14 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf) case ETH_SS_STATS: qede_get_strings_stats(edev, buf); break; + case ETH_SS_PRIV_FLAGS: + memcpy(buf, qede_private_arr, + ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN); + break; + case ETH_SS_TEST: + memcpy(buf, qede_tests_str_arr, + ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX); + break; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); @@ -156,8 +196,11 @@ static void qede_get_ethtool_stats(struct net_device *dev, mutex_lock(&edev->qede_lock); - for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) + for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) { + if (IS_VF(edev) && qede_stats_arr[sidx].pf_only) + continue; buf[cnt++] = QEDE_STATS_DATA(edev, sidx); + } for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) { buf[cnt] = 0; @@ -176,8 +219,18 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) switch (stringset) { case ETH_SS_STATS: - return num_stats + QEDE_NUM_RQSTATS; + if (IS_VF(edev)) { + int i; + for (i = 0; i < QEDE_NUM_STATS; i++) + if (qede_stats_arr[i].pf_only) + num_stats--; + } + return num_stats + QEDE_NUM_RQSTATS; + case ETH_SS_PRIV_FLAGS: + return QEDE_PRI_FLAG_LEN; + case ETH_SS_TEST: + return QEDE_ETHTOOL_TEST_MAX; default: DP_VERBOSE(edev, QED_MSG_DEBUG, "Unsupported stringset 0x%08x\n", stringset); @@ -185,6 +238,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset) } } +static u32 qede_get_priv_flags(struct net_device *dev) +{ + struct qede_dev *edev = netdev_priv(dev); + + return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT; +} + static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct qede_dev *edev = netdev_priv(dev); @@ -217,9 +277,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct qed_link_params params; u32 speed; - if (!edev->dev_info.common.is_mf_default) { + if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, - "Link parameters can not be changed in non-default mode\n"); + "Link settings are not allowed to be changed\n"); return -EOPNOTSUPP; } @@ -328,6 +388,12 @@ static int qede_nway_reset(struct net_device *dev) struct qed_link_output current_link; struct qed_link_params link_params; + if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { + DP_INFO(edev, + "Link settings are not allowed to be changed\n"); + return -EOPNOTSUPP; + } + if (!netif_running(dev)) return 0; @@ -428,9 +494,9 @@ static int qede_set_pauseparam(struct net_device *dev, struct qed_link_params params; struct qed_link_output current_link; - if (!edev->dev_info.common.is_mf_default) { + if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) { DP_INFO(edev, - "Pause parameters can not be updated in non-default mode\n"); + "Pause settings are not allowed to be changed\n"); return -EOPNOTSUPP; } @@ -569,6 +635,497 @@ static int qede_set_phys_id(struct net_device *dev, return 0; } +static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + info->data = RXH_IP_SRC | RXH_IP_DST; + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V4_FLOW: + if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case UDP_V6_FLOW: + if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + break; + case IPV4_FLOW: + case IPV6_FLOW: + break; + default: + info->data = 0; + break; + } + + return 0; +} + +static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, + u32 *rules __always_unused) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_GRXRINGS: + info->data = edev->num_rss; + return 0; + case ETHTOOL_GRXFH: + return qede_get_rss_flags(edev, info); + default: + DP_ERR(edev, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct qed_update_vport_params vport_update_params; + u8 set_caps = 0, clr_caps = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Set rss flags command parameters: flow type = %d, data = %llu\n", + info->flow_type, info->data); + + switch (info->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + /* For TCP only 4-tuple hash is supported */ + if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + DP_INFO(edev, "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + case UDP_V4_FLOW: + /* For UDP either 2-tuple hash or 4-tuple hash is supported */ + if (info->data == (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + set_caps = QED_RSS_IPV4_UDP; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "UDP 4-tuple enabled\n"); + } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { + clr_caps = QED_RSS_IPV4_UDP; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "UDP 4-tuple disabled\n"); + } else { + return -EINVAL; + } + break; + case UDP_V6_FLOW: + /* For UDP either 2-tuple hash or 4-tuple hash is supported */ + if (info->data == (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + set_caps = QED_RSS_IPV6_UDP; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "UDP 4-tuple enabled\n"); + } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { + clr_caps = QED_RSS_IPV6_UDP; + DP_VERBOSE(edev, QED_MSG_DEBUG, + "UDP 4-tuple disabled\n"); + } else { + return -EINVAL; + } + break; + case IPV4_FLOW: + case IPV6_FLOW: + /* For IP only 2-tuple hash is supported */ + if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { + DP_INFO(edev, "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: + case ESP_V4_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: + case ESP_V6_FLOW: + case IP_USER_FLOW: + case ETHER_FLOW: + /* RSS is not supported for these protocols */ + if (info->data) { + DP_INFO(edev, "Command parameters not supported\n"); + return -EINVAL; + } + return 0; + default: + return -EINVAL; + } + + /* No action is needed if there is no change in the rss capability */ + if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps & + ~clr_caps) | set_caps)) + return 0; + + /* Update internal configuration */ + edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) | + set_caps; + edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; + + /* Re-configure if possible */ + if (netif_running(edev->ndev)) { + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.update_rss_flg = 1; + vport_update_params.vport_id = 0; + memcpy(&vport_update_params.rss_params, &edev->rss_params, + sizeof(vport_update_params.rss_params)); + return edev->ops->vport_update(edev->cdev, + &vport_update_params); + } + + return 0; +} + +static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) +{ + struct qede_dev *edev = netdev_priv(dev); + + switch (info->cmd) { + case ETHTOOL_SRXFH: + return qede_set_rss_flags(edev, info); + default: + DP_INFO(edev, "Command parameters not supported\n"); + return -EOPNOTSUPP; + } +} + +static u32 qede_get_rxfh_indir_size(struct net_device *dev) +{ + return QED_RSS_IND_TABLE_SIZE; +} + +static u32 qede_get_rxfh_key_size(struct net_device *dev) +{ + struct qede_dev *edev = netdev_priv(dev); + + return sizeof(edev->rss_params.rss_key); +} + +static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) +{ + struct qede_dev *edev = netdev_priv(dev); + int i; + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (!indir) + return 0; + + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) + indir[i] = edev->rss_params.rss_ind_table[i]; + + if (key) + memcpy(key, edev->rss_params.rss_key, + qede_get_rxfh_key_size(dev)); + + return 0; +} + +static int qede_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct qed_update_vport_params vport_update_params; + struct qede_dev *edev = netdev_priv(dev); + int i; + + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + + if (!indir && !key) + return 0; + + if (indir) { + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) + edev->rss_params.rss_ind_table[i] = indir[i]; + edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; + } + + if (key) { + memcpy(&edev->rss_params.rss_key, key, + qede_get_rxfh_key_size(dev)); + edev->rss_params_inited |= QEDE_RSS_KEY_INITED; + } + + if (netif_running(edev->ndev)) { + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.update_rss_flg = 1; + vport_update_params.vport_id = 0; + memcpy(&vport_update_params.rss_params, &edev->rss_params, + sizeof(vport_update_params.rss_params)); + return edev->ops->vport_update(edev->cdev, + &vport_update_params); + } + + return 0; +} + +/* This function enables the interrupt generation and the NAPI on the device */ +static void qede_netif_start(struct qede_dev *edev) +{ + int i; + + if (!netif_running(edev->ndev)) + return; + + for_each_rss(i) { + /* Update and reenable interrupts */ + qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1); + napi_enable(&edev->fp_array[i].napi); + } +} + +/* This function disables the NAPI and the interrupt generation on the device */ +static void qede_netif_stop(struct qede_dev *edev) +{ + int i; + + for_each_rss(i) { + napi_disable(&edev->fp_array[i].napi); + /* Disable interrupts */ + qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0); + } +} + +static int qede_selftest_transmit_traffic(struct qede_dev *edev, + struct sk_buff *skb) +{ + struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0]; + struct eth_tx_1st_bd *first_bd; + dma_addr_t mapping; + int i, idx, val; + + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; + txq->sw_tx_ring[idx].skb = skb; + first_bd = qed_chain_produce(&txq->tx_pbl); + memset(first_bd, 0, sizeof(*first_bd)); + val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + first_bd->data.bd_flags.bitfields = val; + + /* Map skb linear data for DMA and set in the first BD */ + mapping = dma_map_single(&edev->pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { + DP_NOTICE(edev, "SKB mapping failed\n"); + return -ENOMEM; + } + BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); + + /* update the first BD with the actual num BDs */ + first_bd->data.nbds = 1; + txq->sw_tx_prod++; + /* 'next page' entries are counted in the producer value */ + val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); + txq->tx_db.data.bd_prod = val; + + /* wmb makes sure that the BDs data is updated before updating the + * producer, otherwise FW may read old data from the BDs. + */ + wmb(); + barrier(); + writel(txq->tx_db.raw, txq->doorbell_addr); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the queue lock is released and another start_xmit is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + mmiowb(); + + for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { + if (qede_txq_has_work(txq)) + break; + usleep_range(100, 200); + } + + if (!qede_txq_has_work(txq)) { + DP_NOTICE(edev, "Tx completion didn't happen\n"); + return -1; + } + + first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); + dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), + BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE); + txq->sw_tx_cons++; + txq->sw_tx_ring[idx].skb = NULL; + + return 0; +} + +static int qede_selftest_receive_traffic(struct qede_dev *edev) +{ + struct qede_rx_queue *rxq = edev->fp_array[0].rxq; + u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len; + struct eth_fast_path_rx_reg_cqe *fp_cqe; + struct sw_rx_data *sw_rx_data; + union eth_rx_cqe *cqe; + u8 *data_ptr; + int i; + + /* The packet is expected to receive on rx-queue 0 even though RSS is + * enabled. This is because the queue 0 is configured as the default + * queue and that the loopback traffic is not IP. + */ + for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) { + if (qede_has_rx_work(rxq)) + break; + usleep_range(100, 200); + } + + if (!qede_has_rx_work(rxq)) { + DP_NOTICE(edev, "Failed to receive the traffic\n"); + return -1; + } + + hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); + sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); + + /* Memory barrier to prevent the CPU from doing speculative reads of CQE + * / BD before reading hw_comp_cons. If the CQE is read before it is + * written by FW, then FW writes CQE and SB, and then the CPU reads the + * hw_comp_cons, it will use an old CQE. + */ + rmb(); + + /* Get the CQE from the completion ring */ + cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; + fp_cqe = &cqe->fast_path_regular; + len = le16_to_cpu(fp_cqe->len_on_first_bd); + data_ptr = (u8 *)(page_address(sw_rx_data->data) + + fp_cqe->placement_offset + sw_rx_data->page_offset); + for (i = ETH_HLEN; i < len; i++) + if (data_ptr[i] != (unsigned char)(i & 0xff)) { + DP_NOTICE(edev, "Loopback test failed\n"); + qede_recycle_rx_bd_ring(rxq, edev, 1); + return -1; + } + + qede_recycle_rx_bd_ring(rxq, edev, 1); + + return 0; +} + +static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode) +{ + struct qed_link_params link_params; + struct sk_buff *skb = NULL; + int rc = 0, i; + u32 pkt_size; + u8 *packet; + + if (!netif_running(edev->ndev)) { + DP_NOTICE(edev, "Interface is down\n"); + return -EINVAL; + } + + qede_netif_stop(edev); + + /* Bring up the link in Loopback mode */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = true; + link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; + link_params.loopback_mode = loopback_mode; + edev->ops->common->set_link(edev->cdev, &link_params); + + /* Wait for loopback configuration to apply */ + msleep_interruptible(500); + + /* prepare the loopback packet */ + pkt_size = edev->ndev->mtu + ETH_HLEN; + + skb = netdev_alloc_skb(edev->ndev, pkt_size); + if (!skb) { + DP_INFO(edev, "Can't allocate skb\n"); + rc = -ENOMEM; + goto test_loopback_exit; + } + packet = skb_put(skb, pkt_size); + ether_addr_copy(packet, edev->ndev->dev_addr); + ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr); + memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN))); + for (i = ETH_HLEN; i < pkt_size; i++) + packet[i] = (unsigned char)(i & 0xff); + + rc = qede_selftest_transmit_traffic(edev, skb); + if (rc) + goto test_loopback_exit; + + rc = qede_selftest_receive_traffic(edev); + if (rc) + goto test_loopback_exit; + + DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n"); + +test_loopback_exit: + dev_kfree_skb(skb); + + /* Bring up the link in Normal mode */ + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = true; + link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE; + link_params.loopback_mode = QED_LINK_LOOPBACK_NONE; + edev->ops->common->set_link(edev->cdev, &link_params); + + /* Wait for loopback configuration to apply */ + msleep_interruptible(500); + + qede_netif_start(edev); + + return rc; +} + +static void qede_self_test(struct net_device *dev, + struct ethtool_test *etest, u64 *buf) +{ + struct qede_dev *edev = netdev_priv(dev); + + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Self-test command parameters: offline = %d, external_lb = %d\n", + (etest->flags & ETH_TEST_FL_OFFLINE), + (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2); + + memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX); + + if (etest->flags & ETH_TEST_FL_OFFLINE) { + if (qede_selftest_run_loopback(edev, + QED_LINK_LOOPBACK_INT_PHY)) { + buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + } + + if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) { + buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (edev->ops->common->selftest->selftest_memory(edev->cdev)) { + buf[QEDE_ETHTOOL_MEMORY_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (edev->ops->common->selftest->selftest_register(edev->cdev)) { + buf[QEDE_ETHTOOL_REGISTER_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } + + if (edev->ops->common->selftest->selftest_clock(edev->cdev)) { + buf[QEDE_ETHTOOL_CLOCK_TEST] = 1; + etest->flags |= ETH_TEST_FL_FAILED; + } +} + static const struct ethtool_ops qede_ethtool_ops = { .get_settings = qede_get_settings, .set_settings = qede_set_settings, @@ -584,13 +1141,47 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_strings = qede_get_strings, .set_phys_id = qede_set_phys_id, .get_ethtool_stats = qede_get_ethtool_stats, + .get_priv_flags = qede_get_priv_flags, .get_sset_count = qede_get_sset_count, + .get_rxnfc = qede_get_rxnfc, + .set_rxnfc = qede_set_rxnfc, + .get_rxfh_indir_size = qede_get_rxfh_indir_size, + .get_rxfh_key_size = qede_get_rxfh_key_size, + .get_rxfh = qede_get_rxfh, + .set_rxfh = qede_set_rxfh, + .get_channels = qede_get_channels, + .set_channels = qede_set_channels, + .self_test = qede_self_test, +}; +static const struct ethtool_ops qede_vf_ethtool_ops = { + .get_settings = qede_get_settings, + .get_drvinfo = qede_get_drvinfo, + .get_msglevel = qede_get_msglevel, + .set_msglevel = qede_set_msglevel, + .get_link = qede_get_link, + .get_ringparam = qede_get_ringparam, + .set_ringparam = qede_set_ringparam, + .get_strings = qede_get_strings, + .get_ethtool_stats = qede_get_ethtool_stats, + .get_priv_flags = qede_get_priv_flags, + .get_sset_count = qede_get_sset_count, + .get_rxnfc = qede_get_rxnfc, + .set_rxnfc = qede_set_rxnfc, + .get_rxfh_indir_size = qede_get_rxfh_indir_size, + .get_rxfh_key_size = qede_get_rxfh_key_size, + .get_rxfh = qede_get_rxfh, + .set_rxfh = qede_set_rxfh, .get_channels = qede_get_channels, .set_channels = qede_set_channels, }; void qede_set_ethtool_ops(struct net_device *dev) { - dev->ethtool_ops = &qede_ethtool_ops; + struct qede_dev *edev = netdev_priv(dev); + + if (IS_VF(edev)) + dev->ethtool_ops = &qede_vf_ethtool_ops; + else + dev->ethtool_ops = &qede_ethtool_ops; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 518af329502d..73dd525fbf08 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -24,7 +24,12 @@ #include <linux/netdev_features.h> #include <linux/udp.h> #include <linux/tcp.h> +#ifdef CONFIG_QEDE_VXLAN #include <net/vxlan.h> +#endif +#ifdef CONFIG_QEDE_GENEVE +#include <net/geneve.h> +#endif #include <linux/ip.h> #include <net/ipv6.h> #include <net/tcp.h> @@ -58,6 +63,7 @@ static const struct qed_eth_ops *qed_ops; #define CHIP_NUM_57980S_100 0x1644 #define CHIP_NUM_57980S_50 0x1654 #define CHIP_NUM_57980S_25 0x1656 +#define CHIP_NUM_57980S_IOV 0x1664 #ifndef PCI_DEVICE_ID_NX2_57980E #define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 @@ -66,15 +72,22 @@ static const struct qed_eth_ops *qed_ops; #define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 #define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50 #define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 +#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV #endif +enum qede_pci_private { + QEDE_PRIVATE_PF, + QEDE_PRIVATE_VF +}; + static const struct pci_device_id qede_pci_tbl[] = { - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 }, - { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 }, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, + {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, { 0 } }; @@ -89,17 +102,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct qede_rx_queue *rxq); static void qede_link_update(void *dev, struct qed_link_output *link); +#ifdef CONFIG_QED_SRIOV +static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos) +{ + struct qede_dev *edev = netdev_priv(ndev); + + if (vlan > 4095) { + DP_NOTICE(edev, "Illegal vlan value %d\n", vlan); + return -EINVAL; + } + + DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n", + vlan, vf); + + return edev->ops->iov->set_vlan(edev->cdev, vlan, vf); +} + +static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac) +{ + struct qede_dev *edev = netdev_priv(ndev); + + DP_VERBOSE(edev, QED_MSG_IOV, + "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n", + mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx); + + if (!is_valid_ether_addr(mac)) { + DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n"); + return -EINVAL; + } + + return edev->ops->iov->set_mac(edev->cdev, mac, vfidx); +} + +static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param) +{ + struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev)); + struct qed_dev_info *qed_info = &edev->dev_info.common; + int rc; + + DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param); + + rc = edev->ops->iov->configure(edev->cdev, num_vfs_param); + + /* Enable/Disable Tx switching for PF */ + if ((rc == num_vfs_param) && netif_running(edev->ndev) && + qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { + struct qed_update_vport_params params; + + memset(¶ms, 0, sizeof(params)); + params.vport_id = 0; + params.update_tx_switching_flg = 1; + params.tx_switching_flg = num_vfs_param ? 1 : 0; + edev->ops->vport_update(edev->cdev, ¶ms); + } + + return rc; +} +#endif + static struct pci_driver qede_pci_driver = { .name = "qede", .id_table = qede_pci_tbl, .probe = qede_probe, .remove = qede_remove, +#ifdef CONFIG_QED_SRIOV + .sriov_configure = qede_sriov_configure, +#endif }; +static void qede_force_mac(void *dev, u8 *mac) +{ + struct qede_dev *edev = dev; + + ether_addr_copy(edev->ndev->dev_addr, mac); + ether_addr_copy(edev->primary_mac, mac); +} + static struct qed_eth_cb_ops qede_ll_ops = { { .link_update = qede_link_update, }, + .force_mac = qede_force_mac, }; static int qede_netdev_event(struct notifier_block *this, unsigned long event, @@ -141,19 +224,10 @@ static int __init qede_init(void) { int ret; - u32 qed_ver; pr_notice("qede_init: %s\n", version); - qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); - if (qed_ver != QEDE_ETH_INTERFACE_VERSION) { - pr_notice("Version mismatch [%08x != %08x]\n", - qed_ver, - QEDE_ETH_INTERFACE_VERSION); - return -EINVAL; - } - - qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION); + qed_ops = qed_get_eth_ops(); if (!qed_ops) { pr_notice("Failed to get qed ethtool operations\n"); return -EINVAL; @@ -319,6 +393,9 @@ static u32 qede_xmit_type(struct qede_dev *edev, (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) *ipv6_ext = 1; + if (skb->encapsulation) + rc |= XMIT_ENC; + if (skb_is_gso(skb)) rc |= XMIT_LSO; @@ -380,6 +457,16 @@ static int map_frag_to_bd(struct qede_dev *edev, return 0; } +static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt) +{ + if (is_encap_pkt) + return (skb_inner_transport_header(skb) + + inner_tcp_hdrlen(skb) - skb->data); + else + return (skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data); +} + /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, @@ -390,8 +477,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, if (xmit_type & XMIT_LSO) { int hlen; - hlen = skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data; + hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC); /* linear payload would require its own BD */ if (skb_headlen(skb) > hlen) @@ -421,7 +507,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, u8 xmit_type; u16 idx; u16 hlen; - bool data_split; + bool data_split = false; /* Get tx-queue context and netdev index */ txq_index = skb_get_queue_mapping(skb); @@ -499,7 +585,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; - first_bd->data.bitfields |= cpu_to_le16(temp); + if (xmit_type & XMIT_ENC) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + } else { + /* In cases when OS doesn't indicate for inner offloads + * when packet is tunnelled, we need to override the HW + * tunnel configuration so that packets are treated as + * regular non tunnelled packets and no inner offloads + * are done by the hardware. + */ + first_bd->data.bitfields |= cpu_to_le16(temp); + } /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't @@ -515,10 +612,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, third_bd->data.lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); - first_bd->data.bd_flags.bitfields |= - 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; - hlen = skb_transport_header(skb) + - tcp_hdrlen(skb) - skb->data; + if (unlikely(xmit_type & XMIT_ENC)) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, true); + } else { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + hlen = qede_get_skb_hlen(skb, false); + } /* @@@TBD - if will not be removed need to check */ third_bd->data.bitfields |= @@ -644,7 +746,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static int qede_txq_has_work(struct qede_tx_queue *txq) +int qede_txq_has_work(struct qede_tx_queue *txq) { u16 hw_bd_cons; @@ -727,7 +829,7 @@ static int qede_tx_int(struct qede_dev *edev, return 0; } -static bool qede_has_rx_work(struct qede_rx_queue *rxq) +bool qede_has_rx_work(struct qede_rx_queue *rxq) { u16 hw_comp_cons, sw_comp_cons; @@ -750,6 +852,12 @@ static bool qede_has_tx_work(struct qede_fastpath *fp) return false; } +static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) +{ + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; +} + /* This function reuses the buffer(from an offset) from * consumer index to producer index in the bd ring */ @@ -773,6 +881,21 @@ static inline void qede_reuse_page(struct qede_dev *edev, curr_cons->data = NULL; } +/* In case of allocation failures reuse buffers + * from consumer index to produce buffers for firmware + */ +void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, + struct qede_dev *edev, u8 count) +{ + struct sw_rx_data *curr_cons; + + for (; count > 0; count--) { + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + qede_reuse_page(edev, rxq, curr_cons); + qede_rx_bd_ring_consume(rxq); + } +} + static inline int qede_realloc_rx_buffer(struct qede_dev *edev, struct qede_rx_queue *rxq, struct sw_rx_data *curr_cons) @@ -781,8 +904,14 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev, curr_cons->page_offset += rxq->rx_buf_seg_size; if (curr_cons->page_offset == PAGE_SIZE) { - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { + /* Since we failed to allocate new buffer + * current buffer can be used again. + */ + curr_cons->page_offset -= rxq->rx_buf_seg_size; + return -ENOMEM; + } dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, PAGE_SIZE, DMA_FROM_DEVICE); @@ -791,7 +920,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev, * network stack to take the ownership of the page * which can be recycled multiple times by the driver. */ - atomic_inc(&curr_cons->data->_count); + page_ref_inc(curr_cons->data); qede_reuse_page(edev, rxq, curr_cons); } @@ -852,6 +981,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag) if (csum_flag & QEDE_CSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) + skb->csum_level = 1; } static inline void qede_skb_receive(struct qede_dev *edev, @@ -901,7 +1033,10 @@ static int qede_fill_frag_skb(struct qede_dev *edev, len_on_bd); if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { - tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + /* Incr page ref count to reuse on allocation failure + * so that it doesn't get freed while freeing SKB. + */ + page_ref_inc(current_bd->data); goto out; } @@ -915,6 +1050,8 @@ static int qede_fill_frag_skb(struct qede_dev *edev, return 0; out: + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + qede_recycle_rx_bd_ring(rxq, edev, 1); return -ENOMEM; } @@ -966,8 +1103,9 @@ static void qede_tpa_start(struct qede_dev *edev, tpa_info->skb = netdev_alloc_skb(edev->ndev, le16_to_cpu(cqe->len_on_first_bd)); if (unlikely(!tpa_info->skb)) { + DP_NOTICE(edev, "Failed to allocate SKB for gro\n"); tpa_info->agg_state = QEDE_AGG_STATE_ERROR; - return; + goto cons_buf; } skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); @@ -990,6 +1128,7 @@ static void qede_tpa_start(struct qede_dev *edev, /* This is needed in order to enable forwarding support */ qede_set_gro_params(edev, tpa_info->skb, cqe); +cons_buf: /* We still need to handle bd_len_list to consume buffers */ if (likely(cqe->ext_bd_len_list[0])) qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, le16_to_cpu(cqe->ext_bd_len_list[0])); @@ -1007,7 +1146,6 @@ static void qede_gro_ip_csum(struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th; - skb_set_network_header(skb, 0); skb_set_transport_header(skb, sizeof(struct iphdr)); th = tcp_hdr(skb); @@ -1022,7 +1160,6 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb) struct ipv6hdr *iph = ipv6_hdr(skb); struct tcphdr *th; - skb_set_network_header(skb, 0); skb_set_transport_header(skb, sizeof(struct ipv6hdr)); th = tcp_hdr(skb); @@ -1037,8 +1174,21 @@ static void qede_gro_receive(struct qede_dev *edev, struct sk_buff *skb, u16 vlan_tag) { + /* FW can send a single MTU sized packet from gro flow + * due to aggregation timeout/last segment etc. which + * is not expected to be a gro packet. If a skb has zero + * frags then simply push it in the stack as non gso skb. + */ + if (unlikely(!skb->data_len)) { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + goto send_skb; + } + #ifdef CONFIG_INET if (skb_shinfo(skb)->gso_size) { + skb_set_network_header(skb, 0); + switch (skb->protocol) { case htons(ETH_P_IP): qede_gro_ip_csum(skb); @@ -1053,6 +1203,8 @@ static void qede_gro_receive(struct qede_dev *edev, } } #endif + +send_skb: skb_record_rx_queue(skb, fp->rss_id); qede_skb_receive(edev, fp, skb, vlan_tag); } @@ -1141,13 +1293,47 @@ err: tpa_info->skb = NULL; } -static u8 qede_check_csum(u16 flag) +static bool qede_tunn_exist(u16 flag) +{ + return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << + PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT)); +} + +static u8 qede_check_tunn_csum(u16 flag) +{ + u16 csum_flag = 0; + u8 tcsum = 0; + + if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT)) + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; + + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + tcsum = QEDE_TUNN_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | + PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return QEDE_CSUM_UNNECESSARY | tcsum; +} + +static u8 qede_check_notunn_csum(u16 flag) { u16 csum_flag = 0; u8 csum = 0; - if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << - PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { + if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) { csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; csum = QEDE_CSUM_UNNECESSARY; @@ -1162,6 +1348,14 @@ static u8 qede_check_csum(u16 flag) return csum; } +static u8 qede_check_csum(u16 flag) +{ + if (!qede_tunn_exist(flag)) + return qede_check_notunn_csum(flag); + else + return qede_check_tunn_csum(flag); +} + static int qede_rx_int(struct qede_fastpath *fp, int budget) { struct qede_dev *edev = fp->edev; @@ -1244,17 +1438,17 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", sw_comp_cons, parse_flag); rxq->rx_hw_errors++; - qede_reuse_page(edev, rxq, sw_rx_data); - goto next_rx; + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); + goto next_cqe; } skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); if (unlikely(!skb)) { DP_NOTICE(edev, "Build_skb failed, dropping incoming packet\n"); - qede_reuse_page(edev, rxq, sw_rx_data); + qede_recycle_rx_bd_ring(rxq, edev, fp_cqe->bd_num); rxq->rx_alloc_errors++; - goto next_rx; + goto next_cqe; } /* Copy data into SKB */ @@ -1288,11 +1482,22 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) if (unlikely(qede_realloc_rx_buffer(edev, rxq, sw_rx_data))) { DP_ERR(edev, "Failed to allocate rx buffer\n"); + /* Incr page ref count to reuse on allocation + * failure so that it doesn't get freed while + * freeing SKB. + */ + + page_ref_inc(sw_rx_data->data); rxq->rx_alloc_errors++; + qede_recycle_rx_bd_ring(rxq, edev, + fp_cqe->bd_num); + dev_kfree_skb_any(skb); goto next_cqe; } } + qede_rx_bd_ring_consume(rxq); + if (fp_cqe->bd_num != 1) { u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); u8 num_frags; @@ -1303,18 +1508,27 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) num_frags--) { u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : pkt_len; + if (unlikely(!cur_size)) { + DP_ERR(edev, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + qede_recycle_rx_bd_ring(rxq, edev, + num_frags); + dev_kfree_skb_any(skb); + goto next_cqe; + } - WARN_ONCE(!cur_size, - "Still got %d BDs for mapping jumbo, but length became 0\n", - num_frags); - - if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) { + qede_recycle_rx_bd_ring(rxq, edev, + num_frags); + dev_kfree_skb_any(skb); goto next_cqe; + } - rxq->sw_rx_cons++; sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; - qed_chain_consume(&rxq->rx_bd_ring); + qede_rx_bd_ring_consume(rxq); + dma_unmap_page(&edev->pdev->dev, sw_rx_data->mapping, PAGE_SIZE, DMA_FROM_DEVICE); @@ -1330,7 +1544,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) pkt_len -= cur_size; } - if (pkt_len) + if (unlikely(pkt_len)) DP_ERR(edev, "Mapped all BDs of jumbo, but still have %d bytes\n", pkt_len); @@ -1349,10 +1563,6 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) skb_record_rx_queue(skb, fp->rss_id); qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); - - qed_chain_consume(&rxq->rx_bd_ring); -next_rx: - rxq->sw_rx_cons++; next_rx_only: rx_pkt++; @@ -1506,16 +1716,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev) edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes; edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets; - edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets; - edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets; - edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets; - edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets; - edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets; - edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets; - edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets; - edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets; - edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets; - edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets; + edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets; + edev->stats.rx_128_to_255_byte_packets = + stats.rx_128_to_255_byte_packets; + edev->stats.rx_256_to_511_byte_packets = + stats.rx_256_to_511_byte_packets; + edev->stats.rx_512_to_1023_byte_packets = + stats.rx_512_to_1023_byte_packets; + edev->stats.rx_1024_to_1518_byte_packets = + stats.rx_1024_to_1518_byte_packets; + edev->stats.rx_1519_to_1522_byte_packets = + stats.rx_1519_to_1522_byte_packets; + edev->stats.rx_1519_to_2047_byte_packets = + stats.rx_1519_to_2047_byte_packets; + edev->stats.rx_2048_to_4095_byte_packets = + stats.rx_2048_to_4095_byte_packets; + edev->stats.rx_4096_to_9216_byte_packets = + stats.rx_4096_to_9216_byte_packets; + edev->stats.rx_9217_to_16383_byte_packets = + stats.rx_9217_to_16383_byte_packets; edev->stats.rx_crc_errors = stats.rx_crc_errors; edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames; edev->stats.rx_pause_frames = stats.rx_pause_frames; @@ -1589,6 +1808,49 @@ static struct rtnl_link_stats64 *qede_get_stats64( return stats; } +#ifdef CONFIG_QED_SRIOV +static int qede_get_vf_config(struct net_device *dev, int vfidx, + struct ifla_vf_info *ivi) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->get_config(edev->cdev, vfidx, ivi); +} + +static int qede_set_vf_rate(struct net_device *dev, int vfidx, + int min_tx_rate, int max_tx_rate) +{ + struct qede_dev *edev = netdev_priv(dev); + + return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate, + max_tx_rate); +} + +static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->set_spoof(edev->cdev, vfidx, val); +} + +static int qede_set_vf_link_state(struct net_device *dev, int vfidx, + int link_state) +{ + struct qede_dev *edev = netdev_priv(dev); + + if (!edev->ops) + return -EINVAL; + + return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state); +} +#endif + static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) { struct qed_update_vport_params params; @@ -1830,6 +2092,76 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) edev->accept_any_vlan = false; } +#ifdef CONFIG_QEDE_VXLAN +static void qede_add_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_del_vxlan_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (t_port != edev->vxlan_dst_port) + return; + + edev->vxlan_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port); + + set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} +#endif + +#ifdef CONFIG_QEDE_GENEVE +static void qede_add_geneve_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (edev->geneve_dst_port) + return; + + edev->geneve_dst_port = t_port; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} + +static void qede_del_geneve_port(struct net_device *dev, + sa_family_t sa_family, __be16 port) +{ + struct qede_dev *edev = netdev_priv(dev); + u16 t_port = ntohs(port); + + if (t_port != edev->geneve_dst_port) + return; + + edev->geneve_dst_port = 0; + + DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port); + set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags); + schedule_delayed_work(&edev->sp_task, 0); +} +#endif + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -1838,9 +2170,27 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, +#ifdef CONFIG_QED_SRIOV + .ndo_set_vf_mac = qede_set_vf_mac, + .ndo_set_vf_vlan = qede_set_vf_vlan, +#endif .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_get_stats64 = qede_get_stats64, +#ifdef CONFIG_QED_SRIOV + .ndo_set_vf_link_state = qede_set_vf_link_state, + .ndo_set_vf_spoofchk = qede_set_vf_spoofchk, + .ndo_get_vf_config = qede_get_vf_config, + .ndo_set_vf_rate = qede_set_vf_rate, +#endif +#ifdef CONFIG_QEDE_VXLAN + .ndo_add_vxlan_port = qede_add_vxlan_port, + .ndo_del_vxlan_port = qede_del_vxlan_port, +#endif +#ifdef CONFIG_QEDE_GENEVE + .ndo_add_geneve_port = qede_add_geneve_port, + .ndo_del_geneve_port = qede_del_geneve_port, +#endif }; /* ------------------------------------------------------------------------- @@ -1875,8 +2225,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->q_num_rx_buffers = NUM_RX_BDS_DEF; edev->q_num_tx_buffers = NUM_TX_BDS_DEF; - DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n"); - SET_NETDEV_DEV(ndev, &pdev->dev); memset(&edev->stats, 0, sizeof(edev->stats)); @@ -1913,6 +2261,14 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + /* Encap features*/ + hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_TSO_ECN; + ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN | + NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM; + ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HIGHDMA; ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | @@ -2013,6 +2369,8 @@ static void qede_sp_task(struct work_struct *work) { struct qede_dev *edev = container_of(work, struct qede_dev, sp_task.work); + struct qed_dev *cdev = edev->cdev; + mutex_lock(&edev->qede_lock); if (edev->state == QEDE_STATE_OPEN) { @@ -2020,6 +2378,24 @@ static void qede_sp_task(struct work_struct *work) qede_config_rx_mode(edev->ndev); } + if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) { + struct qed_tunn_params tunn_params; + + memset(&tunn_params, 0, sizeof(tunn_params)); + tunn_params.update_vxlan_port = 1; + tunn_params.vxlan_port = edev->vxlan_dst_port; + qed_ops->tunn_config(cdev, &tunn_params); + } + + if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) { + struct qed_tunn_params tunn_params; + + memset(&tunn_params, 0, sizeof(tunn_params)); + tunn_params.update_geneve_port = 1; + tunn_params.geneve_port = edev->geneve_dst_port; + qed_ops->tunn_config(cdev, &tunn_params); + } + mutex_unlock(&edev->qede_lock); } @@ -2027,9 +2403,9 @@ static void qede_update_pf_params(struct qed_dev *cdev) { struct qed_pf_params pf_params; - /* 16 rx + 16 tx */ + /* 64 rx + 64 tx */ memset(&pf_params, 0, sizeof(struct qed_pf_params)); - pf_params.eth_pf_params.num_cons = 32; + pf_params.eth_pf_params.num_cons = 128; qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -2038,8 +2414,9 @@ enum qede_probe_mode { }; static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, - enum qede_probe_mode mode) + bool is_vf, enum qede_probe_mode mode) { + struct qed_probe_params probe_params; struct qed_slowpath_params params; struct qed_dev_eth_info dev_info; struct qede_dev *edev; @@ -2049,8 +2426,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, if (unlikely(dp_level & QED_LEVEL_INFO)) pr_notice("Starting qede probe\n"); - cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH, - dp_module, dp_level); + memset(&probe_params, 0, sizeof(probe_params)); + probe_params.protocol = QED_PROTOCOL_ETH; + probe_params.dp_module = dp_module; + probe_params.dp_level = dp_level; + probe_params.is_vf = is_vf; + cdev = qed_ops->common->probe(pdev, &probe_params); if (!cdev) { rc = -ENODEV; goto err0; @@ -2084,6 +2465,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, goto err2; } + if (is_vf) + edev->flags |= QEDE_FLAG_IS_VF; + qede_init_ndev(edev); rc = register_netdev(edev->ndev); @@ -2115,12 +2499,24 @@ err0: static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + bool is_vf = false; u32 dp_module = 0; u8 dp_level = 0; + switch ((enum qede_pci_private)id->driver_data) { + case QEDE_PRIVATE_VF: + if (debug & QED_LOG_VERBOSE_MASK) + dev_err(&pdev->dev, "Probing a VF\n"); + is_vf = true; + break; + default: + if (debug & QED_LOG_VERBOSE_MASK) + dev_err(&pdev->dev, "Probing a PF\n"); + } + qede_config_debug(debug, &dp_module, &dp_level); - return __qede_probe(pdev, dp_module, dp_level, + return __qede_probe(pdev, dp_module, dp_level, is_vf, QEDE_PROBE_NORMAL); } @@ -2257,7 +2653,7 @@ static void qede_free_sge_mem(struct qede_dev *edev, struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; struct sw_rx_data *replace_buf = &tpa_info->replace_buf; - if (replace_buf) { + if (replace_buf->data) { dma_unmap_page(&edev->pdev->dev, dma_unmap_addr(replace_buf, mapping), PAGE_SIZE, DMA_FROM_DEVICE); @@ -2377,7 +2773,7 @@ err: static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { - int i, rc, size, num_allocated; + int i, rc, size; rxq->num_rx_buffers = edev->q_num_rx_buffers; @@ -2394,6 +2790,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); if (!rxq->sw_rx_ring) { DP_ERR(edev, "Rx buffers ring allocation failed\n"); + rc = -ENOMEM; goto err; } @@ -2421,26 +2818,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, /* Allocate buffers for the Rx ring */ for (i = 0; i < rxq->num_rx_buffers; i++) { rc = qede_alloc_rx_buffer(edev, rxq); - if (rc) - break; - } - num_allocated = i; - if (!num_allocated) { - DP_ERR(edev, "Rx buffers allocation failed\n"); - goto err; - } else if (num_allocated < rxq->num_rx_buffers) { - DP_NOTICE(edev, - "Allocated less buffers than desired (%d allocated)\n", - num_allocated); + if (rc) { + DP_ERR(edev, + "Rx buffers allocation failed at index %d\n", i); + goto err; + } } - qede_alloc_sge_mem(edev, rxq); - - return 0; - + rc = qede_alloc_sge_mem(edev, rxq); err: - qede_free_mem_rxq(edev, rxq); - return -ENOMEM; + return rc; } static void qede_free_mem_txq(struct qede_dev *edev, @@ -2523,10 +2910,8 @@ static int qede_alloc_mem_fp(struct qede_dev *edev, } return 0; - err: - qede_free_mem_fp(edev, fp); - return -ENOMEM; + return rc; } static void qede_free_mem_load(struct qede_dev *edev) @@ -2549,22 +2934,13 @@ static int qede_alloc_mem_load(struct qede_dev *edev) struct qede_fastpath *fp = &edev->fp_array[rss_id]; rc = qede_alloc_mem_fp(edev, fp); - if (rc) - break; - } - - if (rss_id != QEDE_RSS_CNT(edev)) { - /* Failed allocating memory for all the queues */ - if (!rss_id) { + if (rc) { DP_ERR(edev, - "Failed to allocate memory for the leading queue\n"); - rc = -ENOMEM; - } else { - DP_NOTICE(edev, - "Failed to allocate memory for all of RSS queues\n Desired: %d queues, allocated: %d queues\n", - QEDE_RSS_CNT(edev), rss_id); + "Failed to allocate memory for fastpath - rss id = %d\n", + rss_id); + qede_free_mem_load(edev); + return rc; } - edev->num_rss = rss_id; } return 0; @@ -2835,10 +3211,11 @@ static int qede_start_queues(struct qede_dev *edev) int rc, tc, i; int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; - struct qed_update_vport_rss_params *rss_params = &edev->rss_params; struct qed_update_vport_params vport_update_params; struct qed_queue_start_common_params q_params; + struct qed_dev_info *qed_info = &edev->dev_info.common; struct qed_start_vport_params start = {0}; + bool reset_rss_indir = false; if (!edev->num_rss) { DP_ERR(edev, @@ -2930,19 +3307,59 @@ static int qede_start_queues(struct qede_dev *edev) vport_update_params.update_vport_active_flg = 1; vport_update_params.vport_active_flg = 1; + if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && + qed_info->tx_switching) { + vport_update_params.update_tx_switching_flg = 1; + vport_update_params.tx_switching_flg = 1; + } + /* Fill struct with RSS params */ if (QEDE_RSS_CNT(edev) > 1) { vport_update_params.update_rss_flg = 1; - for (i = 0; i < 128; i++) - rss_params->rss_ind_table[i] = - ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev)); - netdev_rss_key_fill(rss_params->rss_key, - sizeof(rss_params->rss_key)); + + /* Need to validate current RSS config uses valid entries */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + if (edev->rss_params.rss_ind_table[i] >= + edev->num_rss) { + reset_rss_indir = true; + break; + } + } + + if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || + reset_rss_indir) { + u16 val; + + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + u16 indir_val; + + val = QEDE_RSS_CNT(edev); + indir_val = ethtool_rxfh_indir_default(i, val); + edev->rss_params.rss_ind_table[i] = indir_val; + } + edev->rss_params_inited |= QEDE_RSS_INDIR_INITED; + } + + if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) { + netdev_rss_key_fill(edev->rss_params.rss_key, + sizeof(edev->rss_params.rss_key)); + edev->rss_params_inited |= QEDE_RSS_KEY_INITED; + } + + if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) { + edev->rss_params.rss_caps = QED_RSS_IPV4 | + QED_RSS_IPV6 | + QED_RSS_IPV4_TCP | + QED_RSS_IPV6_TCP; + edev->rss_params_inited |= QEDE_RSS_CAPS_INITED; + } + + memcpy(&vport_update_params.rss_params, &edev->rss_params, + sizeof(vport_update_params.rss_params)); } else { - memset(rss_params, 0, sizeof(*rss_params)); + memset(&vport_update_params.rss_params, 0, + sizeof(vport_update_params.rss_params)); } - memcpy(&vport_update_params.rss_params, rss_params, - sizeof(*rss_params)); rc = edev->ops->vport_update(cdev, &vport_update_params); if (rc) { @@ -3124,12 +3541,24 @@ void qede_reload(struct qede_dev *edev, static int qede_open(struct net_device *ndev) { struct qede_dev *edev = netdev_priv(ndev); + int rc; netif_carrier_off(ndev); edev->ops->common->set_power_state(edev->cdev, PCI_D0); - return qede_load(edev, QEDE_LOAD_NORMAL); + rc = qede_load(edev, QEDE_LOAD_NORMAL); + + if (rc) + return rc; + +#ifdef CONFIG_QEDE_VXLAN + vxlan_get_rx_port(ndev); +#endif +#ifdef CONFIG_QEDE_GENEVE + geneve_get_rx_port(ndev); +#endif + return 0; } static int qede_close(struct net_device *ndev) @@ -3180,6 +3609,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p) return -EFAULT; } + if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) { + DP_NOTICE(edev, "qed prevents setting MAC\n"); + return -EINVAL; + } + ether_addr_copy(ndev->dev_addr, addr->sa_data); if (!netif_running(ndev)) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 55007f1e6bbc..caf6ddb7ea76 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -37,8 +37,8 @@ #define _QLCNIC_LINUX_MAJOR 5 #define _QLCNIC_LINUX_MINOR 3 -#define _QLCNIC_LINUX_SUBVERSION 63 -#define QLCNIC_LINUX_VERSIONID "5.3.63" +#define _QLCNIC_LINUX_SUBVERSION 64 +#define QLCNIC_LINUX_VERSIONID "5.3.64" #define QLCNIC_DRV_IDC_VER 0x01 #define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 1205f6f9c941..1c29105b6c36 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -3952,8 +3952,14 @@ static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev, static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev) { - return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : - PCI_ERS_RESULT_RECOVERED; + pci_ers_result_t res; + + rtnl_lock(); + res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT : + PCI_ERS_RESULT_RECOVERED; + rtnl_unlock(); + + return res; } static void qlcnic_82xx_io_resume(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index cda9e604a95f..0844b7c75767 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1417,6 +1417,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump; struct pci_dev *pdev = adapter->pdev; bool extended = false; + int ret; prev_version = adapter->fw_version; current_version = qlcnic_83xx_get_fw_version(adapter); @@ -1427,8 +1428,11 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter) if (qlcnic_83xx_md_check_extended_dump_capability(adapter)) extended = !qlcnic_83xx_extend_md_capab(adapter); - if (!qlcnic_fw_cmd_get_minidump_temp(adapter)) - dev_info(&pdev->dev, "Supports FW dump capability\n"); + ret = qlcnic_fw_cmd_get_minidump_temp(adapter); + if (ret) + return; + + dev_info(&pdev->dev, "Supports FW dump capability\n"); /* Once we have minidump template with extended iSCSI dump * capability, update the minidump capture mask to 0x1f as diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index b28e73ea2c25..83d72106471c 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4687,7 +4687,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, /* * Set up the operating parameters. */ - qdev->workqueue = create_singlethread_workqueue(ndev->name); + qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM); INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 1ef03939d25f..6e2add979471 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -719,7 +719,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev) qca->stats.ring_full++; } - dev->trans_start = jiffies; + netif_trans_update(dev); if (qca->spi_thread && qca->spi_thread->state != TASK_RUNNING) @@ -734,7 +734,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) struct qcaspi *qca = netdev_priv(dev); netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", - jiffies, jiffies - dev->trans_start); + jiffies, jiffies - dev_trans_start(dev)); qca->net_dev->stats.tx_errors++; /* Trigger tx queue flush and QCA7000 reset */ qca->sync = QCASPI_SYNC_UNKNOWN; diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index d77d60ea8202..5cb96785fb63 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -544,7 +544,7 @@ static void tx_timeout(struct net_device *dev) dev->stats.tx_errors++; /* Try to restart the adapter. */ hardware_init(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); dev->stats.tx_errors++; } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 94f08f1e841c..0e62d74b09b3 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -345,7 +345,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); static int rx_buf_sz = 16383; -static int use_dac; +static int use_dac = -1; static struct { u32 msg_enable; } debug = { -1 }; @@ -8224,20 +8224,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_mwi_2; } - tp->cp_cmd = 0; - - if ((sizeof(dma_addr_t) > 4) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { - tp->cp_cmd |= PCIDAC; - dev->features |= NETIF_F_HIGHDMA; - } else { - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc < 0) { - netif_err(tp, probe, dev, "DMA configuration failed\n"); - goto err_out_free_res_3; - } - } - /* ioremap MMIO region */ ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); if (!ioaddr) { @@ -8253,6 +8239,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Identify chip attached to board */ rtl8169_get_mac_version(tp, dev, cfg->default_ver); + tp->cp_cmd = 0; + + if ((sizeof(dma_addr_t) > 4) && + (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) && + tp->mac_version >= RTL_GIGA_MAC_VER_18)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + + /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */ + if (!pci_is_pcie(pdev)) + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_unmap_4; + } + } + rtl_init_rxcfg(tp); rtl_irq_disable(tp); @@ -8412,12 +8417,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) &tp->counters_phys_addr, GFP_KERNEL); if (!tp->counters) { rc = -ENOMEM; - goto err_out_msi_4; + goto err_out_msi_5; } rc = register_netdev(dev); if (rc < 0) - goto err_out_cnt_5; + goto err_out_cnt_6; pci_set_drvdata(pdev, dev); @@ -8451,12 +8456,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) out: return rc; -err_out_cnt_5: +err_out_cnt_6: dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters, tp->counters_phys_addr); -err_out_msi_4: +err_out_msi_5: netif_napi_del(&tp->napi); rtl_disable_msi(pdev, tp); +err_out_unmap_4: iounmap(ioaddr); err_out_free_res_3: pci_release_regions(pdev); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b2160d1b9c71..4e5d5e953e15 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -157,6 +157,7 @@ enum ravb_reg { TIC = 0x0378, TIS = 0x037C, ISS = 0x0380, + CIE = 0x0384, /* R-Car Gen3 only */ GCCR = 0x0390, GMTT = 0x0394, GPTC = 0x0398, @@ -170,6 +171,15 @@ enum ravb_reg { GCT0 = 0x03B8, GCT1 = 0x03BC, GCT2 = 0x03C0, + GIE = 0x03CC, /* R-Car Gen3 only */ + GID = 0x03D0, /* R-Car Gen3 only */ + DIL = 0x0440, /* R-Car Gen3 only */ + RIE0 = 0x0460, /* R-Car Gen3 only */ + RID0 = 0x0464, /* R-Car Gen3 only */ + RIE2 = 0x0470, /* R-Car Gen3 only */ + RID2 = 0x0474, /* R-Car Gen3 only */ + TIE = 0x0478, /* R-Car Gen3 only */ + TID = 0x047c, /* R-Car Gen3 only */ /* E-MAC registers */ ECMR = 0x0500, @@ -556,6 +566,16 @@ enum ISS_BIT { ISS_DPS15 = 0x80000000, }; +/* CIE (R-Car Gen3 only) */ +enum CIE_BIT { + CIE_CRIE = 0x00000001, + CIE_CTIE = 0x00000100, + CIE_RQFM = 0x00010000, + CIE_CL0M = 0x00020000, + CIE_RFWL = 0x00040000, + CIE_RFFL = 0x00080000, +}; + /* GCCR */ enum GCCR_BIT { GCCR_TCR = 0x00000003, @@ -592,6 +612,188 @@ enum GIS_BIT { GIS_PTMF = 0x00000004, }; +/* GIE (R-Car Gen3 only) */ +enum GIE_BIT { + GIE_PTCS = 0x00000001, + GIE_PTOS = 0x00000002, + GIE_PTMS0 = 0x00000004, + GIE_PTMS1 = 0x00000008, + GIE_PTMS2 = 0x00000010, + GIE_PTMS3 = 0x00000020, + GIE_PTMS4 = 0x00000040, + GIE_PTMS5 = 0x00000080, + GIE_PTMS6 = 0x00000100, + GIE_PTMS7 = 0x00000200, + GIE_ATCS0 = 0x00010000, + GIE_ATCS1 = 0x00020000, + GIE_ATCS2 = 0x00040000, + GIE_ATCS3 = 0x00080000, + GIE_ATCS4 = 0x00100000, + GIE_ATCS5 = 0x00200000, + GIE_ATCS6 = 0x00400000, + GIE_ATCS7 = 0x00800000, + GIE_ATCS8 = 0x01000000, + GIE_ATCS9 = 0x02000000, + GIE_ATCS10 = 0x04000000, + GIE_ATCS11 = 0x08000000, + GIE_ATCS12 = 0x10000000, + GIE_ATCS13 = 0x20000000, + GIE_ATCS14 = 0x40000000, + GIE_ATCS15 = 0x80000000, +}; + +/* GID (R-Car Gen3 only) */ +enum GID_BIT { + GID_PTCD = 0x00000001, + GID_PTOD = 0x00000002, + GID_PTMD0 = 0x00000004, + GID_PTMD1 = 0x00000008, + GID_PTMD2 = 0x00000010, + GID_PTMD3 = 0x00000020, + GID_PTMD4 = 0x00000040, + GID_PTMD5 = 0x00000080, + GID_PTMD6 = 0x00000100, + GID_PTMD7 = 0x00000200, + GID_ATCD0 = 0x00010000, + GID_ATCD1 = 0x00020000, + GID_ATCD2 = 0x00040000, + GID_ATCD3 = 0x00080000, + GID_ATCD4 = 0x00100000, + GID_ATCD5 = 0x00200000, + GID_ATCD6 = 0x00400000, + GID_ATCD7 = 0x00800000, + GID_ATCD8 = 0x01000000, + GID_ATCD9 = 0x02000000, + GID_ATCD10 = 0x04000000, + GID_ATCD11 = 0x08000000, + GID_ATCD12 = 0x10000000, + GID_ATCD13 = 0x20000000, + GID_ATCD14 = 0x40000000, + GID_ATCD15 = 0x80000000, +}; + +/* RIE0 (R-Car Gen3 only) */ +enum RIE0_BIT { + RIE0_FRS0 = 0x00000001, + RIE0_FRS1 = 0x00000002, + RIE0_FRS2 = 0x00000004, + RIE0_FRS3 = 0x00000008, + RIE0_FRS4 = 0x00000010, + RIE0_FRS5 = 0x00000020, + RIE0_FRS6 = 0x00000040, + RIE0_FRS7 = 0x00000080, + RIE0_FRS8 = 0x00000100, + RIE0_FRS9 = 0x00000200, + RIE0_FRS10 = 0x00000400, + RIE0_FRS11 = 0x00000800, + RIE0_FRS12 = 0x00001000, + RIE0_FRS13 = 0x00002000, + RIE0_FRS14 = 0x00004000, + RIE0_FRS15 = 0x00008000, + RIE0_FRS16 = 0x00010000, + RIE0_FRS17 = 0x00020000, +}; + +/* RID0 (R-Car Gen3 only) */ +enum RID0_BIT { + RID0_FRD0 = 0x00000001, + RID0_FRD1 = 0x00000002, + RID0_FRD2 = 0x00000004, + RID0_FRD3 = 0x00000008, + RID0_FRD4 = 0x00000010, + RID0_FRD5 = 0x00000020, + RID0_FRD6 = 0x00000040, + RID0_FRD7 = 0x00000080, + RID0_FRD8 = 0x00000100, + RID0_FRD9 = 0x00000200, + RID0_FRD10 = 0x00000400, + RID0_FRD11 = 0x00000800, + RID0_FRD12 = 0x00001000, + RID0_FRD13 = 0x00002000, + RID0_FRD14 = 0x00004000, + RID0_FRD15 = 0x00008000, + RID0_FRD16 = 0x00010000, + RID0_FRD17 = 0x00020000, +}; + +/* RIE2 (R-Car Gen3 only) */ +enum RIE2_BIT { + RIE2_QFS0 = 0x00000001, + RIE2_QFS1 = 0x00000002, + RIE2_QFS2 = 0x00000004, + RIE2_QFS3 = 0x00000008, + RIE2_QFS4 = 0x00000010, + RIE2_QFS5 = 0x00000020, + RIE2_QFS6 = 0x00000040, + RIE2_QFS7 = 0x00000080, + RIE2_QFS8 = 0x00000100, + RIE2_QFS9 = 0x00000200, + RIE2_QFS10 = 0x00000400, + RIE2_QFS11 = 0x00000800, + RIE2_QFS12 = 0x00001000, + RIE2_QFS13 = 0x00002000, + RIE2_QFS14 = 0x00004000, + RIE2_QFS15 = 0x00008000, + RIE2_QFS16 = 0x00010000, + RIE2_QFS17 = 0x00020000, + RIE2_RFFS = 0x80000000, +}; + +/* RID2 (R-Car Gen3 only) */ +enum RID2_BIT { + RID2_QFD0 = 0x00000001, + RID2_QFD1 = 0x00000002, + RID2_QFD2 = 0x00000004, + RID2_QFD3 = 0x00000008, + RID2_QFD4 = 0x00000010, + RID2_QFD5 = 0x00000020, + RID2_QFD6 = 0x00000040, + RID2_QFD7 = 0x00000080, + RID2_QFD8 = 0x00000100, + RID2_QFD9 = 0x00000200, + RID2_QFD10 = 0x00000400, + RID2_QFD11 = 0x00000800, + RID2_QFD12 = 0x00001000, + RID2_QFD13 = 0x00002000, + RID2_QFD14 = 0x00004000, + RID2_QFD15 = 0x00008000, + RID2_QFD16 = 0x00010000, + RID2_QFD17 = 0x00020000, + RID2_RFFD = 0x80000000, +}; + +/* TIE (R-Car Gen3 only) */ +enum TIE_BIT { + TIE_FTS0 = 0x00000001, + TIE_FTS1 = 0x00000002, + TIE_FTS2 = 0x00000004, + TIE_FTS3 = 0x00000008, + TIE_TFUS = 0x00000100, + TIE_TFWS = 0x00000200, + TIE_MFUS = 0x00000400, + TIE_MFWS = 0x00000800, + TIE_TDPS0 = 0x00010000, + TIE_TDPS1 = 0x00020000, + TIE_TDPS2 = 0x00040000, + TIE_TDPS3 = 0x00080000, +}; + +/* TID (R-Car Gen3 only) */ +enum TID_BIT { + TID_FTD0 = 0x00000001, + TID_FTD1 = 0x00000002, + TID_FTD2 = 0x00000004, + TID_FTD3 = 0x00000008, + TID_TFUD = 0x00000100, + TID_TFWD = 0x00000200, + TID_MFUD = 0x00000400, + TID_MFWD = 0x00000800, + TID_TDPD0 = 0x00010000, + TID_TDPD1 = 0x00020000, + TID_TDPD2 = 0x00040000, + TID_TDPD3 = 0x00080000, +}; + /* ECMR */ enum ECMR_BIT { ECMR_PRM = 0x00000001, @@ -817,6 +1019,8 @@ struct ravb_private { int duplex; int emac_irq; enum ravb_chip_id chip_id; + int rx_irqs[NUM_RX_QUEUE]; + int tx_irqs[NUM_TX_QUEUE]; unsigned no_avb_link:1; unsigned avb_link_active_low:1; @@ -841,7 +1045,7 @@ void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, u32 set); int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value); -irqreturn_t ravb_ptp_interrupt(struct net_device *ndev); +void ravb_ptp_interrupt(struct net_device *ndev); void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev); void ravb_ptp_stop(struct net_device *ndev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 087e14a3fba7..867caf6e7a5a 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -42,6 +42,16 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) +static const char *ravb_rx_irqs[NUM_RX_QUEUE] = { + "ch0", /* RAVB_BE */ + "ch1", /* RAVB_NC */ +}; + +static const char *ravb_tx_irqs[NUM_TX_QUEUE] = { + "ch18", /* RAVB_BE */ + "ch19", /* RAVB_NC */ +}; + void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, u32 set) { @@ -236,10 +246,9 @@ static void ravb_ring_format(struct net_device *ndev, int q) for (i = 0; i < priv->num_rx_ring[q]; i++) { /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; - /* The size of the buffer should be on 16-byte boundary. */ - rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); + rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ); dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, - ALIGN(PKT_BUF_SZ, 16), + PKT_BUF_SZ, DMA_FROM_DEVICE); /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... @@ -365,6 +374,7 @@ static void ravb_emac_init(struct net_device *ndev) /* Device init function for Ethernet AVB */ static int ravb_dmac_init(struct net_device *ndev) { + struct ravb_private *priv = netdev_priv(ndev); int error; /* Set CONFIG mode */ @@ -401,6 +411,12 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, TCCR_TFEN, TCCR); /* Interrupt init: */ + if (priv->chip_id == RCAR_GEN3) { + /* Clear DIL.DPLx */ + ravb_write(ndev, 0, DIL); + /* Set queue specific interrupt */ + ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE); + } /* Frame receive */ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); /* Disable FIFO full warning */ @@ -541,7 +557,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - ALIGN(PKT_BUF_SZ, 16), + PKT_BUF_SZ, DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : @@ -571,8 +587,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q][entry]; - /* The size of the buffer should be on 16-byte boundary. */ - desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); + desc->ds_cc = cpu_to_le16(PKT_BUF_SZ); if (!priv->rx_skb[q][entry]) { skb = netdev_alloc_skb(ndev, @@ -643,7 +658,7 @@ static int ravb_stop_dma(struct net_device *ndev) } /* E-MAC interrupt handler */ -static void ravb_emac_interrupt(struct net_device *ndev) +static void ravb_emac_interrupt_unlocked(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); u32 ecsr, psr; @@ -669,6 +684,18 @@ static void ravb_emac_interrupt(struct net_device *ndev) } } +static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + + spin_lock(&priv->lock); + ravb_emac_interrupt_unlocked(ndev); + mmiowb(); + spin_unlock(&priv->lock); + return IRQ_HANDLED; +} + /* Error interrupt handler */ static void ravb_error_interrupt(struct net_device *ndev) { @@ -695,6 +722,50 @@ static void ravb_error_interrupt(struct net_device *ndev) } } +static bool ravb_queue_interrupt(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 ris0 = ravb_read(ndev, RIS0); + u32 ric0 = ravb_read(ndev, RIC0); + u32 tis = ravb_read(ndev, TIS); + u32 tic = ravb_read(ndev, TIC); + + if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { + if (napi_schedule_prep(&priv->napi[q])) { + /* Mask RX and TX interrupts */ + if (priv->chip_id == RCAR_GEN2) { + ravb_write(ndev, ric0 & ~BIT(q), RIC0); + ravb_write(ndev, tic & ~BIT(q), TIC); + } else { + ravb_write(ndev, BIT(q), RID0); + ravb_write(ndev, BIT(q), TID); + } + __napi_schedule(&priv->napi[q]); + } else { + netdev_warn(ndev, + "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", + ris0, ric0); + netdev_warn(ndev, + " tx status 0x%08x, tx mask 0x%08x.\n", + tis, tic); + } + return true; + } + return false; +} + +static bool ravb_timestamp_interrupt(struct net_device *ndev) +{ + u32 tis = ravb_read(ndev, TIS); + + if (tis & TIS_TFUF) { + ravb_write(ndev, ~TIS_TFUF, TIS); + ravb_get_tx_tstamp(ndev); + return true; + } + return false; +} + static irqreturn_t ravb_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; @@ -708,46 +779,22 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) /* Received and transmitted interrupts */ if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { - u32 ris0 = ravb_read(ndev, RIS0); - u32 ric0 = ravb_read(ndev, RIC0); - u32 tis = ravb_read(ndev, TIS); - u32 tic = ravb_read(ndev, TIC); int q; /* Timestamp updated */ - if (tis & TIS_TFUF) { - ravb_write(ndev, ~TIS_TFUF, TIS); - ravb_get_tx_tstamp(ndev); + if (ravb_timestamp_interrupt(ndev)) result = IRQ_HANDLED; - } /* Network control and best effort queue RX/TX */ for (q = RAVB_NC; q >= RAVB_BE; q--) { - if (((ris0 & ric0) & BIT(q)) || - ((tis & tic) & BIT(q))) { - if (napi_schedule_prep(&priv->napi[q])) { - /* Mask RX and TX interrupts */ - ric0 &= ~BIT(q); - tic &= ~BIT(q); - ravb_write(ndev, ric0, RIC0); - ravb_write(ndev, tic, TIC); - __napi_schedule(&priv->napi[q]); - } else { - netdev_warn(ndev, - "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", - ris0, ric0); - netdev_warn(ndev, - " tx status 0x%08x, tx mask 0x%08x.\n", - tis, tic); - } + if (ravb_queue_interrupt(ndev, q)) result = IRQ_HANDLED; - } } } /* E-MAC status summary */ if (iss & ISS_MS) { - ravb_emac_interrupt(ndev); + ravb_emac_interrupt_unlocked(ndev); result = IRQ_HANDLED; } @@ -757,14 +804,77 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) + /* gPTP interrupt status summary */ + if (iss & ISS_CGIS) { + ravb_ptp_interrupt(ndev); result = IRQ_HANDLED; + } mmiowb(); spin_unlock(&priv->lock); return result; } +/* Timestamp/Error/gPTP interrupt handler */ +static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + u32 iss; + + spin_lock(&priv->lock); + /* Get interrupt status */ + iss = ravb_read(ndev, ISS); + + /* Timestamp updated */ + if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev)) + result = IRQ_HANDLED; + + /* Error status summary */ + if (iss & ISS_ES) { + ravb_error_interrupt(ndev); + result = IRQ_HANDLED; + } + + /* gPTP interrupt status summary */ + if (iss & ISS_CGIS) { + ravb_ptp_interrupt(ndev); + result = IRQ_HANDLED; + } + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + + spin_lock(&priv->lock); + + /* Network control/Best effort queue RX/TX */ + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_be_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_BE); +} + +static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_NC); +} + static int ravb_poll(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; @@ -804,8 +914,13 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - ravb_modify(ndev, RIC0, mask, mask); - ravb_modify(ndev, TIC, mask, mask); + if (priv->chip_id == RCAR_GEN2) { + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); + } else { + ravb_write(ndev, mask, RIE0); + ravb_write(ndev, mask, TIE); + } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -1208,35 +1323,72 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ts_info = ravb_get_ts_info, }; +static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, + struct net_device *ndev, struct device *dev, + const char *ch) +{ + char *name; + int error; + + name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); + if (!name) + return -ENOMEM; + error = request_irq(irq, handler, 0, name, ndev); + if (error) + netdev_err(ndev, "cannot request IRQ %s\n", name); + + return error; +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; int error; napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); - error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name, - ndev); - if (error) { - netdev_err(ndev, "cannot request IRQ\n"); - goto out_napi_off; - } - - if (priv->chip_id == RCAR_GEN3) { - error = request_irq(priv->emac_irq, ravb_interrupt, - IRQF_SHARED, ndev->name, ndev); + if (priv->chip_id == RCAR_GEN2) { + error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, + ndev->name, ndev); if (error) { netdev_err(ndev, "cannot request IRQ\n"); - goto out_free_irq; + goto out_napi_off; } + } else { + error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, + dev, "ch22:multi"); + if (error) + goto out_napi_off; + error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, + dev, "ch24:emac"); + if (error) + goto out_free_irq; + error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch0:rx_be"); + if (error) + goto out_free_irq_emac; + error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch18:tx_be"); + if (error) + goto out_free_irq_be_rx; + error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch1:rx_nc"); + if (error) + goto out_free_irq_be_tx; + error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch19:tx_nc"); + if (error) + goto out_free_irq_nc_rx; } /* Device init */ error = ravb_dmac_init(ndev); if (error) - goto out_free_irq2; + goto out_free_irq_nc_tx; ravb_emac_init(ndev); /* Initialise PTP Clock driver */ @@ -1256,9 +1408,18 @@ out_ptp_stop: /* Stop PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_stop(ndev); -out_free_irq2: - if (priv->chip_id == RCAR_GEN3) - free_irq(priv->emac_irq, ndev); +out_free_irq_nc_tx: + if (priv->chip_id == RCAR_GEN2) + goto out_free_irq; + free_irq(priv->tx_irqs[RAVB_NC], ndev); +out_free_irq_nc_rx: + free_irq(priv->rx_irqs[RAVB_NC], ndev); +out_free_irq_be_tx: + free_irq(priv->tx_irqs[RAVB_BE], ndev); +out_free_irq_be_rx: + free_irq(priv->rx_irqs[RAVB_BE], ndev); +out_free_irq_emac: + free_irq(priv->emac_irq, ndev); out_free_irq: free_irq(ndev->irq, ndev); out_napi_off: @@ -1506,6 +1667,13 @@ static int ravb_close(struct net_device *ndev) priv->phydev = NULL; } + if (priv->chip_id != RCAR_GEN2) { + free_irq(priv->tx_irqs[RAVB_NC], ndev); + free_irq(priv->rx_irqs[RAVB_NC], ndev); + free_irq(priv->tx_irqs[RAVB_BE], ndev); + free_irq(priv->rx_irqs[RAVB_BE], ndev); + free_irq(priv->emac_irq, ndev); + } free_irq(ndev->irq, ndev); napi_disable(&priv->napi[RAVB_NC]); @@ -1691,6 +1859,9 @@ static int ravb_set_gti(struct net_device *ndev) rate = clk_get_rate(clk); clk_put(clk); + if (!rate) + return -EINVAL; + inc = 1000000000ULL << 20; do_div(inc, rate); @@ -1713,6 +1884,7 @@ static int ravb_probe(struct platform_device *pdev) struct net_device *ndev; int error, irq, q; struct resource *res; + int i; if (!np) { dev_err(&pdev->dev, @@ -1782,6 +1954,22 @@ static int ravb_probe(struct platform_device *pdev) goto out_release; } priv->emac_irq = irq; + for (i = 0; i < NUM_RX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->rx_irqs[i] = irq; + } + for (i = 0; i < NUM_TX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->tx_irqs[i] = irq; + } } priv->chip_id = chip_id; diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 57992ccc4657..eede70ec37f8 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -194,7 +194,12 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); + else if (on) + ravb_write(ndev, GIE_PTCS, GIE); + else + ravb_write(ndev, GID_PTCD, GID); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -241,7 +246,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); + else + ravb_write(ndev, GIE_PTMS0, GIE); } } else { spin_lock_irqsave(&priv->lock, flags); @@ -250,7 +258,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - ravb_modify(ndev, GIC, GIC_PTME, 0); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTME, 0); + else + ravb_write(ndev, GID_PTMD0, GID); } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -285,7 +296,7 @@ static const struct ptp_clock_info ravb_ptp_info = { }; /* Caller must hold the lock */ -irqreturn_t ravb_ptp_interrupt(struct net_device *ndev) +void ravb_ptp_interrupt(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); u32 gis = ravb_read(ndev, GIS); @@ -308,12 +319,7 @@ irqreturn_t ravb_ptp_interrupt(struct net_device *ndev) } } - if (gis) { - ravb_write(ndev, ~gis, GIS); - return IRQ_HANDLED; - } - - return IRQ_NONE; + ravb_write(ndev, ~gis, GIS); } void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 004e2d7560fd..04cd39f66cc9 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -482,7 +482,7 @@ static void sh_eth_chip_reset(struct net_device *ndev) struct sh_eth_private *mdp = netdev_priv(ndev); /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); + sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); mdelay(1); } @@ -537,11 +537,7 @@ static struct sh_eth_cpu_data r7s72100_data = { static void sh_eth_chip_reset_r8a7740(struct net_device *ndev) { - struct sh_eth_private *mdp = netdev_priv(ndev); - - /* reset device */ - sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); - mdelay(1); + sh_eth_chip_reset(ndev); sh_eth_select_mii(ndev); } @@ -725,8 +721,8 @@ static struct sh_eth_cpu_data sh7757_data = { #define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0) static void sh_eth_chip_reset_giga(struct net_device *ndev) { - int i; u32 mahr[2], malr[2]; + int i; /* save MAHR and MALR */ for (i = 0; i < 2; i++) { @@ -734,9 +730,7 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev) mahr[i] = ioread32((void *)GIGA_MAHR(i)); } - /* reset device */ - iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800)); - mdelay(1); + sh_eth_chip_reset(ndev); /* restore MAHR and MALR */ for (i = 0; i < 2; i++) { @@ -899,7 +893,7 @@ static int sh_eth_check_reset(struct net_device *ndev) int cnt = 100; while (cnt > 0) { - if (!(sh_eth_read(ndev, EDMR) & 0x3)) + if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER)) break; mdelay(1); cnt--; @@ -1229,7 +1223,7 @@ ring_free: return -ENOMEM; } -static int sh_eth_dev_init(struct net_device *ndev, bool start) +static int sh_eth_dev_init(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); int ret; @@ -1279,10 +1273,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) RFLR); sh_eth_modify(ndev, EESR, 0, 0); - if (start) { - mdp->irq_enabled = true; - sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); - } + mdp->irq_enabled = true; + sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); /* PAUSE Prohibition */ sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | @@ -1295,8 +1287,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); /* E-MAC Interrupt Enable register */ - if (start) - sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); + sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); /* Set MAC address */ update_mac_address(ndev); @@ -1309,10 +1300,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) if (mdp->cd->tpauser) sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); - if (start) { - /* Setting the Rx mode will start the Rx process. */ - sh_eth_write(ndev, EDRRR_R, EDRRR); - } + /* Setting the Rx mode will start the Rx process. */ + sh_eth_write(ndev, EDRRR_R, EDRRR); return ret; } @@ -2194,17 +2183,13 @@ static int sh_eth_set_ringparam(struct net_device *ndev, __func__); return ret; } - ret = sh_eth_dev_init(ndev, false); + ret = sh_eth_dev_init(ndev); if (ret < 0) { netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__); return ret; } - mdp->irq_enabled = true; - sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); - /* Setting the Rx mode will start the Rx process. */ - sh_eth_write(ndev, EDRRR_R, EDRRR); netif_device_attach(ndev); } @@ -2250,7 +2235,7 @@ static int sh_eth_open(struct net_device *ndev) goto out_free_irq; /* device init */ - ret = sh_eth_dev_init(ndev, true); + ret = sh_eth_dev_init(ndev); if (ret) goto out_free_irq; @@ -2303,7 +2288,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev) } /* device init */ - sh_eth_dev_init(ndev, true); + sh_eth_dev_init(ndev); netif_start_queue(ndev); } diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 8fa4ef3a7fdd..c62380e34a1d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -394,7 +394,7 @@ enum RPADIR_BIT { #define DEFAULT_FDR_INIT 0x00000707 /* ARSTR */ -enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, }; +enum ARSTR_BIT { ARSTR_ARST = 0x00000001, }; /* TSU_FWEN0 */ enum TSU_FWEN0_BIT { diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 0e758bcb26b0..1ca796316173 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2727,7 +2727,7 @@ static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port, return ofdpa_port_fib_ipv4(ofdpa_port, trans, htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, 0); + fib4->fi, fib4->tb_id, 0); } static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port, @@ -2737,7 +2737,7 @@ static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port, return ofdpa_port_fib_ipv4(ofdpa_port, NULL, htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, + fib4->fi, fib4->tb_id, OFDPA_OP_FLAG_REMOVE); } diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index ca7336605748..c2bd5378ffda 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -572,7 +572,7 @@ static inline int sgiseeq_reset(struct net_device *dev) if (err) return err; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); return 0; @@ -648,7 +648,7 @@ static void timeout(struct net_device *dev) printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); sgiseeq_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 98d33d462c6c..1681084cc96f 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context, return 0; } + if (nic_data->datapath_caps & + 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN) + return -EOPNOTSUPP; + MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID, nic_data->vport_id); MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type); @@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, bool replacing) { struct efx_ef10_nic_data *nic_data = efx->nic_data; + u32 flags = spec->flags; memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN); + /* Remove RSS flag if we don't have an RSS context. */ + if (flags & EFX_FILTER_FLAG_RX_RSS && + spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT && + nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) + flags &= ~EFX_FILTER_FLAG_RX_RSS; + if (replacing) { MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP, MC_CMD_FILTER_OP_IN_OP_REPLACE); @@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx, spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ? 0 : spec->dmaq_id); MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE, - (spec->flags & EFX_FILTER_FLAG_RX_RSS) ? + (flags & EFX_FILTER_FLAG_RX_RSS) ? MC_CMD_FILTER_OP_IN_RX_MODE_RSS : MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE); - if (spec->flags & EFX_FILTER_FLAG_RX_RSS) + if (flags & EFX_FILTER_FLAG_RX_RSS) MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT, spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT ? diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 5eac523b4b0c..aaa80f13859b 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -708,7 +708,7 @@ static int meth_tx(struct sk_buff *skb, struct net_device *dev) mace->eth.dma_ctrl = priv->dma_ctrl; meth_add_to_tx_ring(priv, skb); - dev->trans_start = jiffies; /* save the timestamp */ + netif_trans_update(dev); /* save the timestamp */ /* If TX ring is full, tell the upper layer to stop sending packets */ if (meth_tx_full(dev)) { @@ -756,7 +756,7 @@ static void meth_tx_timeout(struct net_device *dev) /* Enable interrupt */ spin_unlock_irqrestore(&priv->meth_lock, flags); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index fd812d2e5e1c..95001ee408ab 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1575,7 +1575,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) spin_unlock_irqrestore(&sis_priv->lock, flags); - net_dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(net_dev); /* prevent tx timeout */ /* load Transmit Descriptor Register */ sw32(txdp, sis_priv->tx_ring_dma); diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 443f1da9fc9e..7186b89269ad 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -889,7 +889,7 @@ static void epic_tx_timeout(struct net_device *dev) ew32(COMMAND, TxQueued); } - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; if (!ep->tx_full) netif_wake_queue(dev); diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index a733868a43aa..cb49c9654f0a 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -499,7 +499,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev) /* DMA complete IRQ will free buffer and set jiffies */ #else SMC_PUSH_DATA(lp, buf, len); - dev->trans_start = jiffies; + netif_trans_update(dev); dev_kfree_skb_irq(skb); #endif if (!lp->tx_throttle) { @@ -1189,7 +1189,7 @@ smc911x_tx_dma_irq(void *data) DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); BUG_ON(skb == NULL); dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); - dev->trans_start = jiffies; + netif_trans_update(dev); dev_kfree_skb_irq(skb); lp->current_tx_skb = NULL; if (lp->pending_tx_skb != NULL) @@ -1283,7 +1283,7 @@ static void smc911x_timeout(struct net_device *dev) schedule_work(&lp->phy_configure); /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index 664f596971b5..d496888b85d3 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -663,7 +663,7 @@ static void smc_hardware_send_packet( struct net_device * dev ) lp->saved_skb = NULL; dev_kfree_skb_any (skb); - dev->trans_start = jiffies; + netif_trans_update(dev); /* we can send another packet */ netif_wake_queue(dev); @@ -1104,7 +1104,7 @@ static void smc_timeout(struct net_device *dev) /* "kick" the adaptor */ smc_reset( dev->base_addr ); smc_enable( dev->base_addr ); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* clear anything saved */ ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL; netif_wake_queue(dev); diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 3449893aea8d..db3c696d7002 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1172,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev) smc->saved_skb = NULL; dev_kfree_skb_irq(skb); - dev->trans_start = jiffies; + netif_trans_update(dev); netif_start_queue(dev); } @@ -1187,7 +1187,7 @@ static void smc_tx_timeout(struct net_device *dev) inw(ioaddr)&0xff, inw(ioaddr + 2)); dev->stats.tx_errors++; smc_reset(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ smc->saved_skb = NULL; netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index c5ed27c54724..18ac52ded696 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -619,7 +619,7 @@ static void smc_hardware_send_pkt(unsigned long data) SMC_SET_MMU_CMD(lp, MC_ENQUEUE); smc_special_unlock(&lp->lock, flags); - dev->trans_start = jiffies; + netif_trans_update(dev); dev->stats.tx_packets++; dev->stats.tx_bytes += len; @@ -1364,7 +1364,7 @@ static void smc_timeout(struct net_device *dev) schedule_work(&lp->phy_configure); /* We can accept TX packets again */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index b3901616f4f6..0fb362d5a722 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -2,7 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ - mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y) + mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \ + dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y) # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index f96d257308b0..fc60368df2e7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -41,6 +41,8 @@ /* Synopsys Core versions */ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 +#define DWMAC_CORE_4_00 0x40 +#define STMMAC_CHAN0 0 /* Always supported and default for all chips */ #define DMA_TX_SIZE 512 #define DMA_RX_SIZE 512 @@ -167,6 +169,9 @@ struct stmmac_extra_stats { unsigned long mtl_rx_fifo_ctrl_active; unsigned long mac_rx_frame_ctrl_fifo; unsigned long mac_gmii_rx_proto_engine; + /* TSO */ + unsigned long tx_tso_frames; + unsigned long tx_tso_nfrags; }; /* CSR Frequency Access Defines*/ @@ -243,6 +248,7 @@ enum rx_frame_status { csum_none = 0x2, llc_snap = 0x4, dma_own = 0x8, + rx_not_ls = 0x10, }; /* Tx status */ @@ -269,6 +275,7 @@ enum dma_irq_status { #define CORE_PCS_ANE_COMPLETE (1 << 5) #define CORE_PCS_LINK_STATUS (1 << 6) #define CORE_RGMII_IRQ (1 << 7) +#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8) /* Physical Coding Sublayer */ struct rgmii_adv { @@ -300,8 +307,10 @@ struct dma_features { /* 802.3az - Energy-Efficient Ethernet (EEE) */ unsigned int eee; unsigned int av; + unsigned int tsoen; /* TX and RX csum */ unsigned int tx_coe; + unsigned int rx_coe; unsigned int rx_coe_type1; unsigned int rx_coe_type2; unsigned int rxfifo_over_2048; @@ -348,6 +357,10 @@ struct stmmac_desc_ops { void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, bool ls); + void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1, + int len2, bool tx_own, bool ls, + unsigned int tcphdrlen, + unsigned int tcppayloadlen); /* Set/get the owner of the descriptor */ void (*set_tx_owner) (struct dma_desc *p); int (*get_tx_owner) (struct dma_desc *p); @@ -380,6 +393,10 @@ struct stmmac_desc_ops { u64(*get_timestamp) (void *desc, u32 ats); /* get rx timestamp status */ int (*get_rx_timestamp_status) (void *desc, u32 ats); + /* Display ring */ + void (*display_ring)(void *head, unsigned int size, bool rx); + /* set MSS via context descriptor */ + void (*set_mss)(struct dma_desc *p, unsigned int mss); }; extern const struct stmmac_desc_ops enh_desc_ops; @@ -412,9 +429,15 @@ struct stmmac_dma_ops { int (*dma_interrupt) (void __iomem *ioaddr, struct stmmac_extra_stats *x); /* If supported then get the optional core features */ - unsigned int (*get_hw_feature) (void __iomem *ioaddr); + void (*get_hw_feature)(void __iomem *ioaddr, + struct dma_features *dma_cap); /* Program the HW RX Watchdog */ void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt); + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len); + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len); + void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); }; struct mac_device_info; @@ -463,6 +486,7 @@ struct stmmac_hwtimestamp { }; extern const struct stmmac_hwtimestamp stmmac_ptp; +extern const struct stmmac_mode_ops dwmac4_ring_mode_ops; struct mac_link { int port; @@ -495,7 +519,6 @@ struct mac_device_info { const struct stmmac_hwtimestamp *ptp; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; - unsigned int synopsys_uid; void __iomem *pcsr; /* vpointer to device CSRs */ int multicast_filter_bins; int unicast_filter_entries; @@ -504,18 +527,47 @@ struct mac_device_info { }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries); -struct mac_device_info *dwmac100_setup(void __iomem *ioaddr); + int perfect_uc_entries, + int *synopsys_id); +struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id); +struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, + int perfect_uc_entries, int *synopsys_id); void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6], unsigned int high, unsigned int low); void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, unsigned int high, unsigned int low); - void stmmac_set_mac(void __iomem *ioaddr, bool enable); +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], + unsigned int high, unsigned int low); +void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low); +void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable); + void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_mode_ops ring_mode_ops; extern const struct stmmac_mode_ops chain_mode_ops; - +extern const struct stmmac_desc_ops dwmac4_desc_ops; + +/** + * stmmac_get_synopsys_id - return the SYINID. + * @priv: driver private structure + * Description: this simple function is to decode and return the SYINID + * starting from the HW core register. + */ +static inline u32 stmmac_get_synopsys_id(u32 hwid) +{ + /* Check Synopsys Id (not available on old chips) */ + if (likely(hwid)) { + u32 uid = ((hwid & 0x0000ff00) >> 8); + u32 synid = (hwid & 0x000000ff); + + pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", + uid, synid); + + return synid; + } + return 0; +} #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index f0d797ab74d8..f13499fa1f58 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -34,6 +34,9 @@ #define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003 #define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010 +#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028 +#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004 + #define EMAC_SPLITTER_CTRL_REG 0x0 #define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3 #define EMAC_SPLITTER_CTRL_SPEED_10 0x2 @@ -89,15 +92,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * struct device_node *np_splitter; struct resource res_splitter; - dwmac->stmmac_rst = devm_reset_control_get(dev, - STMMAC_RESOURCE_NAME); - if (IS_ERR(dwmac->stmmac_rst)) { - dev_info(dev, "Could not get reset control!\n"); - if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER) - return -EPROBE_DEFER; - dwmac->stmmac_rst = NULL; - } - dwmac->interface = of_get_phy_mode(np); sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon"); @@ -142,13 +136,13 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device * return 0; } -static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) +static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac) { struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr; int phymode = dwmac->interface; u32 reg_offset = dwmac->reg_offset; u32 reg_shift = dwmac->reg_shift; - u32 ctrl, val; + u32 ctrl, val, module; switch (phymode) { case PHY_INTERFACE_MODE_RGMII: @@ -171,48 +165,26 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac) if (dwmac->splitter_base) val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII; + /* Assert reset to the enet controller before changing the phy mode */ + if (dwmac->stmmac_rst) + reset_control_assert(dwmac->stmmac_rst); + regmap_read(sys_mgr_base_addr, reg_offset, &ctrl); ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift); ctrl |= val << reg_shift; - if (dwmac->f2h_ptp_ref_clk) + if (dwmac->f2h_ptp_ref_clk) { ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2); - else + regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, + &module); + module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2)); + regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG, + module); + } else { ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2)); + } regmap_write(sys_mgr_base_addr, reg_offset, ctrl); - return 0; -} - -static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv) -{ - struct socfpga_dwmac *dwmac = priv; - - /* On socfpga platform exit, assert and hold reset to the - * enet controller - the default state after a hard reset. - */ - if (dwmac->stmmac_rst) - reset_control_assert(dwmac->stmmac_rst); -} - -static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) -{ - struct socfpga_dwmac *dwmac = priv; - struct net_device *ndev = platform_get_drvdata(pdev); - struct stmmac_priv *stpriv = NULL; - int ret = 0; - - if (ndev) - stpriv = netdev_priv(ndev); - - /* Assert reset to the enet controller before changing the phy mode */ - if (dwmac->stmmac_rst) - reset_control_assert(dwmac->stmmac_rst); - - /* Setup the phy mode in the system manager registers according to - * devicetree configuration - */ - ret = socfpga_dwmac_setup(dwmac); /* Deassert reset for the phy configuration to be sampled by * the enet controller, and operation to start in requested mode @@ -220,25 +192,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv) if (dwmac->stmmac_rst) reset_control_deassert(dwmac->stmmac_rst); - /* Before the enet controller is suspended, the phy is suspended. - * This causes the phy clock to be gated. The enet controller is - * resumed before the phy, so the clock is still gated "off" when - * the enet controller is resumed. This code makes sure the phy - * is "resumed" before reinitializing the enet controller since - * the enet controller depends on an active phy clock to complete - * a DMA reset. A DMA reset will "time out" if executed - * with no phy clock input on the Synopsys enet controller. - * Verified through Synopsys Case #8000711656. - * - * Note that the phy clock is also gated when the phy is isolated. - * Phy "suspend" and "isolate" controls are located in phy basic - * control register 0, and can be modified by the phy driver - * framework. - */ - if (stpriv && stpriv->phydev) - phy_resume(stpriv->phydev); - - return ret; + return 0; } static int socfpga_dwmac_probe(struct platform_device *pdev) @@ -267,23 +221,58 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) return ret; } - ret = socfpga_dwmac_setup(dwmac); - if (ret) { - dev_err(dev, "couldn't setup SoC glue (%d)\n", ret); - return ret; - } - plat_dat->bsp_priv = dwmac; - plat_dat->init = socfpga_dwmac_init; - plat_dat->exit = socfpga_dwmac_exit; plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; - ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv); - if (ret) - return ret; + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (!ret) { + struct net_device *ndev = platform_get_drvdata(pdev); + struct stmmac_priv *stpriv = netdev_priv(ndev); + + /* The socfpga driver needs to control the stmmac reset to + * set the phy mode. Create a copy of the core reset handel + * so it can be used by the driver later. + */ + dwmac->stmmac_rst = stpriv->stmmac_rst; + + ret = socfpga_dwmac_set_phy_mode(dwmac); + } + + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static int socfpga_dwmac_resume(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv); - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + /* Before the enet controller is suspended, the phy is suspended. + * This causes the phy clock to be gated. The enet controller is + * resumed before the phy, so the clock is still gated "off" when + * the enet controller is resumed. This code makes sure the phy + * is "resumed" before reinitializing the enet controller since + * the enet controller depends on an active phy clock to complete + * a DMA reset. A DMA reset will "time out" if executed + * with no phy clock input on the Synopsys enet controller. + * Verified through Synopsys Case #8000711656. + * + * Note that the phy clock is also gated when the phy is isolated. + * Phy "suspend" and "isolate" controls are located in phy basic + * control register 0, and can be modified by the phy driver + * framework. + */ + if (priv->phydev) + phy_resume(priv->phydev); + + return stmmac_resume(dev); } +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend, + socfpga_dwmac_resume); static const struct of_device_id socfpga_dwmac_match[] = { { .compatible = "altr,socfpga-stmmac" }, @@ -296,7 +285,7 @@ static struct platform_driver socfpga_dwmac_driver = { .remove = stmmac_pltfr_remove, .driver = { .name = "socfpga-dwmac", - .pm = &stmmac_pltfr_pm_ops, + .pm = &socfpga_dwmac_pm_ops, .of_match_table = socfpga_dwmac_match, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index c2941172f6d1..fb1eb578e34e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -491,7 +491,8 @@ static const struct stmmac_ops dwmac1000_ops = { }; struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, - int perfect_uc_entries) + int perfect_uc_entries, + int *synopsys_id) { struct mac_device_info *mac; u32 hwid = readl(ioaddr + GMAC_VERSION); @@ -516,7 +517,9 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, mac->link.speed = GMAC_CONTROL_FES; mac->mii.addr = GMAC_MII_ADDR; mac->mii.data = GMAC_MII_DATA; - mac->synopsys_uid = hwid; + + /* Get and dump the chip ID */ + *synopsys_id = stmmac_get_synopsys_id(hwid); return mac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index da32d6037e3e..990746955216 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -215,9 +215,40 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr) } } -static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr) +static void dwmac1000_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) { - return readl(ioaddr + DMA_HW_FEATURE); + u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE); + + dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); + dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; + dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; + dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; + dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; + dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; + dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; + dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; + dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; + /* MMC */ + dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; + /* IEEE 1588-2002 */ + dma_cap->time_stamp = + (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; + /* IEEE 1588-2008 */ + dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; + dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; + /* TX and RX csum */ + dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; + dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; + dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; + dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; + /* TX and RX number of channels */ + dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; + dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; + /* Alternate (enhanced) DESC mode */ + dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; } static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index f8dd773f246c..6418b2e07619 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -173,7 +173,7 @@ static const struct stmmac_ops dwmac100_ops = { .get_umac_addr = dwmac100_get_umac_addr, }; -struct mac_device_info *dwmac100_setup(void __iomem *ioaddr) +struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id) { struct mac_device_info *mac; @@ -192,7 +192,8 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr) mac->link.speed = 0; mac->mii.addr = MAC_MII_ADDR; mac->mii.data = MAC_MII_DATA; - mac->synopsys_uid = 0; + /* Synopsys Id is not available on old chips */ + *synopsys_id = 0; return mac; } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h new file mode 100644 index 000000000000..bc50952a18e7 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -0,0 +1,255 @@ +/* + * DWMAC4 Header file. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue <alexandre.torgue@st.com> + */ + +#ifndef __DWMAC4_H__ +#define __DWMAC4_H__ + +#include "common.h" + +/* MAC registers */ +#define GMAC_CONFIG 0x00000000 +#define GMAC_PACKET_FILTER 0x00000008 +#define GMAC_HASH_TAB_0_31 0x00000010 +#define GMAC_HASH_TAB_32_63 0x00000014 +#define GMAC_RX_FLOW_CTRL 0x00000090 +#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4) +#define GMAC_INT_STATUS 0x000000b0 +#define GMAC_INT_EN 0x000000b4 +#define GMAC_AN_CTRL 0x000000e0 +#define GMAC_AN_STATUS 0x000000e4 +#define GMAC_AN_ADV 0x000000e8 +#define GMAC_AN_LPA 0x000000ec +#define GMAC_PMT 0x000000c0 +#define GMAC_VERSION 0x00000110 +#define GMAC_DEBUG 0x00000114 +#define GMAC_HW_FEATURE0 0x0000011c +#define GMAC_HW_FEATURE1 0x00000120 +#define GMAC_HW_FEATURE2 0x00000124 +#define GMAC_MDIO_ADDR 0x00000200 +#define GMAC_MDIO_DATA 0x00000204 +#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8) +#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8) + +/* MAC Packet Filtering */ +#define GMAC_PACKET_FILTER_PR BIT(0) +#define GMAC_PACKET_FILTER_HMC BIT(2) +#define GMAC_PACKET_FILTER_PM BIT(4) + +#define GMAC_MAX_PERFECT_ADDRESSES 128 + +/* MAC Flow Control RX */ +#define GMAC_RX_FLOW_CTRL_RFE BIT(0) + +/* MAC Flow Control TX */ +#define GMAC_TX_FLOW_CTRL_TFE BIT(1) +#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16 + +/* MAC Interrupt bitmap*/ +#define GMAC_INT_PMT_EN BIT(4) +#define GMAC_INT_LPI_EN BIT(5) + +enum dwmac4_irq_status { + time_stamp_irq = 0x00001000, + mmc_rx_csum_offload_irq = 0x00000800, + mmc_tx_irq = 0x00000400, + mmc_rx_irq = 0x00000200, + mmc_irq = 0x00000100, + pmt_irq = 0x00000010, + pcs_ane_irq = 0x00000004, + pcs_link_irq = 0x00000002, +}; + +/* MAC Auto-Neg bitmap*/ +#define GMAC_AN_CTRL_RAN BIT(9) +#define GMAC_AN_CTRL_ANE BIT(12) +#define GMAC_AN_CTRL_ELE BIT(14) +#define GMAC_AN_FD BIT(5) +#define GMAC_AN_HD BIT(6) +#define GMAC_AN_PSE_MASK GENMASK(8, 7) +#define GMAC_AN_PSE_SHIFT 7 + +/* MAC PMT bitmap */ +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* MAC Debug bitmap */ +#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) +#define GMAC_DEBUG_TFCSTS_SHIFT 17 +#define GMAC_DEBUG_TFCSTS_IDLE 0 +#define GMAC_DEBUG_TFCSTS_WAIT 1 +#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 +#define GMAC_DEBUG_TFCSTS_XFER 3 +#define GMAC_DEBUG_TPESTS BIT(16) +#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) +#define GMAC_DEBUG_RFCFCSTS_SHIFT 1 +#define GMAC_DEBUG_RPESTS BIT(0) + +/* MAC config */ +#define GMAC_CONFIG_IPC BIT(27) +#define GMAC_CONFIG_2K BIT(22) +#define GMAC_CONFIG_ACS BIT(20) +#define GMAC_CONFIG_BE BIT(18) +#define GMAC_CONFIG_JD BIT(17) +#define GMAC_CONFIG_JE BIT(16) +#define GMAC_CONFIG_PS BIT(15) +#define GMAC_CONFIG_FES BIT(14) +#define GMAC_CONFIG_DM BIT(13) +#define GMAC_CONFIG_DCRS BIT(9) +#define GMAC_CONFIG_TE BIT(1) +#define GMAC_CONFIG_RE BIT(0) + +/* MAC HW features0 bitmap */ +#define GMAC_HW_FEAT_ADDMAC BIT(18) +#define GMAC_HW_FEAT_RXCOESEL BIT(16) +#define GMAC_HW_FEAT_TXCOSEL BIT(14) +#define GMAC_HW_FEAT_EEESEL BIT(13) +#define GMAC_HW_FEAT_TSSEL BIT(12) +#define GMAC_HW_FEAT_MMCSEL BIT(8) +#define GMAC_HW_FEAT_MGKSEL BIT(7) +#define GMAC_HW_FEAT_RWKSEL BIT(6) +#define GMAC_HW_FEAT_SMASEL BIT(5) +#define GMAC_HW_FEAT_VLHASH BIT(4) +#define GMAC_HW_FEAT_PCSSEL BIT(3) +#define GMAC_HW_FEAT_HDSEL BIT(2) +#define GMAC_HW_FEAT_GMIISEL BIT(1) +#define GMAC_HW_FEAT_MIISEL BIT(0) + +/* MAC HW features1 bitmap */ +#define GMAC_HW_FEAT_AVSEL BIT(20) +#define GMAC_HW_TSOEN BIT(18) + +/* MAC HW features2 bitmap */ +#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18) +#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12) + +/* MAC HW ADDR regs */ +#define GMAC_HI_DCS GENMASK(18, 16) +#define GMAC_HI_DCS_SHIFT 16 +#define GMAC_HI_REG_AE BIT(31) + +/* MTL registers */ +#define MTL_INT_STATUS 0x00000c20 +#define MTL_INT_Q0 BIT(0) + +#define MTL_CHAN_BASE_ADDR 0x00000d00 +#define MTL_CHAN_BASE_OFFSET 0x40 +#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \ + (x * MTL_CHAN_BASE_OFFSET)) + +#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x) +#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8) +#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c) +#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30) +#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38) + +#define MTL_OP_MODE_RSF BIT(5) +#define MTL_OP_MODE_TSF BIT(1) + +#define MTL_OP_MODE_TTC_MASK 0x70 +#define MTL_OP_MODE_TTC_SHIFT 4 + +#define MTL_OP_MODE_TTC_32 0 +#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT) +#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT) + +#define MTL_OP_MODE_RTC_MASK 0x18 +#define MTL_OP_MODE_RTC_SHIFT 3 + +#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT) +#define MTL_OP_MODE_RTC_64 0 +#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT) +#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT) + +/* MTL debug */ +#define MTL_DEBUG_TXSTSFSTS BIT(5) +#define MTL_DEBUG_TXFSTS BIT(4) +#define MTL_DEBUG_TWCSTS BIT(3) + +/* MTL debug: Tx FIFO Read Controller Status */ +#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_TRCSTS_SHIFT 1 +#define MTL_DEBUG_TRCSTS_IDLE 0 +#define MTL_DEBUG_TRCSTS_READ 1 +#define MTL_DEBUG_TRCSTS_TXW 2 +#define MTL_DEBUG_TRCSTS_WRITE 3 +#define MTL_DEBUG_TXPAUSED BIT(0) + +/* MAC debug: GMII or MII Transmit Protocol Engine Status */ +#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4) +#define MTL_DEBUG_RXFSTS_SHIFT 4 +#define MTL_DEBUG_RXFSTS_EMPTY 0 +#define MTL_DEBUG_RXFSTS_BT 1 +#define MTL_DEBUG_RXFSTS_AT 2 +#define MTL_DEBUG_RXFSTS_FULL 3 +#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_RRCSTS_SHIFT 1 +#define MTL_DEBUG_RRCSTS_IDLE 0 +#define MTL_DEBUG_RRCSTS_RDATA 1 +#define MTL_DEBUG_RRCSTS_RSTAT 2 +#define MTL_DEBUG_RRCSTS_FLUSH 3 +#define MTL_DEBUG_RWCSTS BIT(0) + +/* MTL interrupt */ +#define MTL_RX_OVERFLOW_INT_EN BIT(24) +#define MTL_RX_OVERFLOW_INT BIT(16) + +/* Default operating mode of the MAC */ +#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ + GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) + +/* To dump the core regs excluding the Address Registers */ +#define GMAC_REG_NUM 132 + +/* MTL debug */ +#define MTL_DEBUG_TXSTSFSTS BIT(5) +#define MTL_DEBUG_TXFSTS BIT(4) +#define MTL_DEBUG_TWCSTS BIT(3) + +/* MTL debug: Tx FIFO Read Controller Status */ +#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_TRCSTS_SHIFT 1 +#define MTL_DEBUG_TRCSTS_IDLE 0 +#define MTL_DEBUG_TRCSTS_READ 1 +#define MTL_DEBUG_TRCSTS_TXW 2 +#define MTL_DEBUG_TRCSTS_WRITE 3 +#define MTL_DEBUG_TXPAUSED BIT(0) + +/* MAC debug: GMII or MII Transmit Protocol Engine Status */ +#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4) +#define MTL_DEBUG_RXFSTS_SHIFT 4 +#define MTL_DEBUG_RXFSTS_EMPTY 0 +#define MTL_DEBUG_RXFSTS_BT 1 +#define MTL_DEBUG_RXFSTS_AT 2 +#define MTL_DEBUG_RXFSTS_FULL 3 +#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1) +#define MTL_DEBUG_RRCSTS_SHIFT 1 +#define MTL_DEBUG_RRCSTS_IDLE 0 +#define MTL_DEBUG_RRCSTS_RDATA 1 +#define MTL_DEBUG_RRCSTS_RSTAT 2 +#define MTL_DEBUG_RRCSTS_FLUSH 3 +#define MTL_DEBUG_RWCSTS BIT(0) + +extern const struct stmmac_dma_ops dwmac4_dma_ops; +extern const struct stmmac_dma_ops dwmac410_dma_ops; +#endif /* __DWMAC4_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c new file mode 100644 index 000000000000..4f7283d05588 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -0,0 +1,407 @@ +/* + * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + * DWC Ether MAC version 4.00 has been used for developing this code. + * + * This only implements the mac core functions for this chip. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue <alexandre.torgue@st.com> + */ + +#include <linux/crc32.h> +#include <linux/slab.h> +#include <linux/ethtool.h> +#include <linux/io.h> +#include "dwmac4.h" + +static void dwmac4_core_init(struct mac_device_info *hw, int mtu) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONFIG); + + value |= GMAC_CORE_INIT; + + if (mtu > 1500) + value |= GMAC_CONFIG_2K; + if (mtu > 2000) + value |= GMAC_CONFIG_JE; + + writel(value, ioaddr + GMAC_CONFIG); + + /* Mask GMAC interrupts */ + writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN); +} + +static void dwmac4_dump_regs(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + int i; + + pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr); + + for (i = 0; i < GMAC_REG_NUM; i++) { + int offset = i * 4; + + pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i, + offset, readl(ioaddr + offset)); + } +} + +static int dwmac4_rx_ipc_enable(struct mac_device_info *hw) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_CONFIG); + + if (hw->rx_csum) + value |= GMAC_CONFIG_IPC; + else + value &= ~GMAC_CONFIG_IPC; + + writel(value, ioaddr + GMAC_CONFIG); + + value = readl(ioaddr + GMAC_CONFIG); + + return !!(value & GMAC_CONFIG_IPC); +} + +static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) +{ + void __iomem *ioaddr = hw->pcsr; + unsigned int pmt = 0; + + if (mode & WAKE_MAGIC) { + pr_debug("GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } + if (mode & WAKE_UCAST) { + pr_debug("GMAC: WOL on global unicast\n"); + pmt |= global_unicast; + } + + writel(pmt, ioaddr + GMAC_PMT); +} + +static void dwmac4_set_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac4_get_umac_addr(struct mac_device_info *hw, + unsigned char *addr, unsigned int reg_n) +{ + void __iomem *ioaddr = hw->pcsr; + + stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void dwmac4_set_filter(struct mac_device_info *hw, + struct net_device *dev) +{ + void __iomem *ioaddr = (void __iomem *)dev->base_addr; + unsigned int value = 0; + + if (dev->flags & IFF_PROMISC) { + value = GMAC_PACKET_FILTER_PR; + } else if ((dev->flags & IFF_ALLMULTI) || + (netdev_mc_count(dev) > HASH_TABLE_SIZE)) { + /* Pass all multi */ + value = GMAC_PACKET_FILTER_PM; + /* Set the 64 bits of the HASH tab. To be updated if taller + * hash table is used + */ + writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31); + writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63); + } else if (!netdev_mc_empty(dev)) { + u32 mc_filter[2]; + struct netdev_hw_addr *ha; + + /* Hash filter for multicast */ + value = GMAC_PACKET_FILTER_HMC; + + memset(mc_filter, 0, sizeof(mc_filter)); + netdev_for_each_mc_addr(ha, dev) { + /* The upper 6 bits of the calculated CRC are used to + * index the content of the Hash Table Reg 0 and 1. + */ + int bit_nr = + (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26); + /* The most significant bit determines the register + * to use while the other 5 bits determines the bit + * within the selected register + */ + mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F)); + } + writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31); + writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63); + } + + /* Handle multiple unicast addresses */ + if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) { + /* Switch to promiscuous mode if more than 128 addrs + * are required + */ + value |= GMAC_PACKET_FILTER_PR; + } else if (!netdev_uc_empty(dev)) { + int reg = 1; + struct netdev_hw_addr *ha; + + netdev_for_each_uc_addr(ha, dev) { + dwmac4_set_umac_addr(ioaddr, ha->addr, reg); + reg++; + } + } + + writel(value, ioaddr + GMAC_PACKET_FILTER); +} + +static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + void __iomem *ioaddr = hw->pcsr; + u32 channel = STMMAC_CHAN0; /* FIXME */ + unsigned int flow = 0; + + pr_debug("GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + pr_debug("\tReceive Flow-Control ON\n"); + flow |= GMAC_RX_FLOW_CTRL_RFE; + writel(flow, ioaddr + GMAC_RX_FLOW_CTRL); + } + if (fc & FLOW_TX) { + pr_debug("\tTransmit Flow-Control ON\n"); + flow |= GMAC_TX_FLOW_CTRL_TFE; + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); + + if (duplex) { + pr_debug("\tduplex mode: PAUSE %d\n", pause_time); + flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT); + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel)); + } + } +} + +static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart) +{ + void __iomem *ioaddr = hw->pcsr; + + /* auto negotiation enable and External Loopback enable */ + u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; + + if (restart) + value |= GMAC_AN_CTRL_RAN; + + writel(value, ioaddr + GMAC_AN_CTRL); +} + +static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value = readl(ioaddr + GMAC_AN_ADV); + + if (value & GMAC_AN_FD) + adv->duplex = DUPLEX_FULL; + if (value & GMAC_AN_HD) + adv->duplex |= DUPLEX_HALF; + + adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; + + value = readl(ioaddr + GMAC_AN_LPA); + + if (value & GMAC_AN_FD) + adv->lp_duplex = DUPLEX_FULL; + if (value & GMAC_AN_HD) + adv->lp_duplex = DUPLEX_HALF; + + adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT; +} + +static int dwmac4_irq_status(struct mac_device_info *hw, + struct stmmac_extra_stats *x) +{ + void __iomem *ioaddr = hw->pcsr; + u32 mtl_int_qx_status; + u32 intr_status; + int ret = 0; + + intr_status = readl(ioaddr + GMAC_INT_STATUS); + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & mmc_tx_irq)) + x->mmc_tx_irq_n++; + if (unlikely(intr_status & mmc_rx_irq)) + x->mmc_rx_irq_n++; + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + x->mmc_rx_csum_offload_irq_n++; + /* Clear the PMT bits 5 and 6 by reading the PMT status reg */ + if (unlikely(intr_status & pmt_irq)) { + readl(ioaddr + GMAC_PMT); + x->irq_receive_pmt_irq_n++; + } + + if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) { + readl(ioaddr + GMAC_AN_STATUS); + x->irq_pcs_ane_n++; + } + + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS); + /* Check MTL Interrupt: Currently only one queue is used: Q0. */ + if (mtl_int_qx_status & MTL_INT_Q0) { + /* read Queue 0 Interrupt status */ + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); + + if (status & MTL_RX_OVERFLOW_INT) { + /* clear Interrupt */ + writel(status | MTL_RX_OVERFLOW_INT, + ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0)); + ret = CORE_IRQ_MTL_RX_OVERFLOW; + } + } + + return ret; +} + +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x) +{ + u32 value; + + /* Currently only channel 0 is supported */ + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0)); + + if (value & MTL_DEBUG_TXSTSFSTS) + x->mtl_tx_status_fifo_full++; + if (value & MTL_DEBUG_TXFSTS) + x->mtl_tx_fifo_not_empty++; + if (value & MTL_DEBUG_TWCSTS) + x->mmtl_fifo_ctrl++; + if (value & MTL_DEBUG_TRCSTS_MASK) { + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK) + >> MTL_DEBUG_TRCSTS_SHIFT; + if (trcsts == MTL_DEBUG_TRCSTS_WRITE) + x->mtl_tx_fifo_read_ctrl_write++; + else if (trcsts == MTL_DEBUG_TRCSTS_TXW) + x->mtl_tx_fifo_read_ctrl_wait++; + else if (trcsts == MTL_DEBUG_TRCSTS_READ) + x->mtl_tx_fifo_read_ctrl_read++; + else + x->mtl_tx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_TXPAUSED) + x->mac_tx_in_pause++; + + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0)); + + if (value & MTL_DEBUG_RXFSTS_MASK) { + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK) + >> MTL_DEBUG_RRCSTS_SHIFT; + + if (rxfsts == MTL_DEBUG_RXFSTS_FULL) + x->mtl_rx_fifo_fill_level_full++; + else if (rxfsts == MTL_DEBUG_RXFSTS_AT) + x->mtl_rx_fifo_fill_above_thresh++; + else if (rxfsts == MTL_DEBUG_RXFSTS_BT) + x->mtl_rx_fifo_fill_below_thresh++; + else + x->mtl_rx_fifo_fill_level_empty++; + } + if (value & MTL_DEBUG_RRCSTS_MASK) { + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >> + MTL_DEBUG_RRCSTS_SHIFT; + + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH) + x->mtl_rx_fifo_read_ctrl_flush++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT) + x->mtl_rx_fifo_read_ctrl_read_data++; + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA) + x->mtl_rx_fifo_read_ctrl_status++; + else + x->mtl_rx_fifo_read_ctrl_idle++; + } + if (value & MTL_DEBUG_RWCSTS) + x->mtl_rx_fifo_ctrl_active++; + + /* GMAC debug */ + value = readl(ioaddr + GMAC_DEBUG); + + if (value & GMAC_DEBUG_TFCSTS_MASK) { + u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK) + >> GMAC_DEBUG_TFCSTS_SHIFT; + + if (tfcsts == GMAC_DEBUG_TFCSTS_XFER) + x->mac_tx_frame_ctrl_xfer++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE) + x->mac_tx_frame_ctrl_pause++; + else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT) + x->mac_tx_frame_ctrl_wait++; + else + x->mac_tx_frame_ctrl_idle++; + } + if (value & GMAC_DEBUG_TPESTS) + x->mac_gmii_tx_proto_engine++; + if (value & GMAC_DEBUG_RFCFCSTS_MASK) + x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK) + >> GMAC_DEBUG_RFCFCSTS_SHIFT; + if (value & GMAC_DEBUG_RPESTS) + x->mac_gmii_rx_proto_engine++; +} + +static const struct stmmac_ops dwmac4_ops = { + .core_init = dwmac4_core_init, + .rx_ipc = dwmac4_rx_ipc_enable, + .dump_regs = dwmac4_dump_regs, + .host_irq_status = dwmac4_irq_status, + .flow_ctrl = dwmac4_flow_ctrl, + .pmt = dwmac4_pmt, + .set_umac_addr = dwmac4_set_umac_addr, + .get_umac_addr = dwmac4_get_umac_addr, + .ctrl_ane = dwmac4_ctrl_ane, + .get_adv = dwmac4_get_adv, + .debug = dwmac4_debug, + .set_filter = dwmac4_set_filter, +}; + +struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins, + int perfect_uc_entries, int *synopsys_id) +{ + struct mac_device_info *mac; + u32 hwid = readl(ioaddr + GMAC_VERSION); + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + if (!mac) + return NULL; + + mac->pcsr = ioaddr; + mac->multicast_filter_bins = mcbins; + mac->unicast_filter_entries = perfect_uc_entries; + mac->mcast_bits_log2 = 0; + + if (mac->multicast_filter_bins) + mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins); + + mac->mac = &dwmac4_ops; + + mac->link.port = GMAC_CONFIG_PS; + mac->link.duplex = GMAC_CONFIG_DM; + mac->link.speed = GMAC_CONFIG_FES; + mac->mii.addr = GMAC_MDIO_ADDR; + mac->mii.data = GMAC_MDIO_DATA; + + /* Get and dump the chip ID */ + *synopsys_id = stmmac_get_synopsys_id(hwid); + + if (*synopsys_id > DWMAC_CORE_4_00) + mac->dma = &dwmac410_dma_ops; + else + mac->dma = &dwmac4_dma_ops; + + return mac; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c new file mode 100644 index 000000000000..4ec7397e7fb3 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -0,0 +1,389 @@ +/* + * This contains the functions to handle the descriptors for DesignWare databook + * 4.xx. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue <alexandre.torgue@st.com> + */ + +#include <linux/stmmac.h> +#include "common.h" +#include "dwmac4_descs.h" + +static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, + void __iomem *ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes3; + int ret = tx_done; + + tdes3 = p->des3; + + /* Get tx owner first */ + if (unlikely(tdes3 & TDES3_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR))) + return tx_not_ls; + + if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) { + if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT)) + x->tx_jabber++; + if (unlikely(tdes3 & TDES3_PACKET_FLUSHED)) + x->tx_frame_flushed++; + if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(tdes3 & TDES3_NO_CARRIER)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely((tdes3 & TDES3_LATE_COLLISION) || + (tdes3 & TDES3_EXCESSIVE_COLLISION))) + stats->collisions += + (tdes3 & TDES3_COLLISION_COUNT_MASK) + >> TDES3_COLLISION_COUNT_SHIFT; + + if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL)) + x->tx_deferred++; + + if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) + x->tx_underflow++; + + if (unlikely(tdes3 & TDES3_IP_HDR_ERROR)) + x->tx_ip_header_error++; + + if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR)) + x->tx_payload_error++; + + ret = tx_err; + } + + if (unlikely(tdes3 & TDES3_DEFERRED)) + x->tx_deferred++; + + return ret; +} + +static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int rdes1 = p->des1; + unsigned int rdes2 = p->des2; + unsigned int rdes3 = p->des3; + int message_type; + int ret = good_frame; + + if (unlikely(rdes3 & RDES3_OWN)) + return dma_own; + + /* Verify rx error by looking at the last segment. */ + if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR))) + return discard_frame; + + if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) { + if (unlikely(rdes3 & RDES3_GIANT_PACKET)) + stats->rx_length_errors++; + if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR)) + x->rx_gmac_overflow++; + + if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG)) + x->rx_watchdog++; + + if (unlikely(rdes3 & RDES3_RECEIVE_ERROR)) + x->rx_mii++; + + if (unlikely(rdes3 & RDES3_CRC_ERROR)) { + x->rx_crc++; + stats->rx_crc_errors++; + } + + if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR)) + x->dribbling_bit++; + + ret = discard_frame; + } + + message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8; + + if (rdes1 & RDES1_IP_HDR_ERROR) + x->ip_hdr_err++; + if (rdes1 & RDES1_IP_CSUM_BYPASSED) + x->ip_csum_bypassed++; + if (rdes1 & RDES1_IPV4_HEADER) + x->ipv4_pkt_rcvd++; + if (rdes1 & RDES1_IPV6_HEADER) + x->ipv6_pkt_rcvd++; + if (message_type == RDES_EXT_SYNC) + x->rx_msg_type_sync++; + else if (message_type == RDES_EXT_FOLLOW_UP) + x->rx_msg_type_follow_up++; + else if (message_type == RDES_EXT_DELAY_REQ) + x->rx_msg_type_delay_req++; + else if (message_type == RDES_EXT_DELAY_RESP) + x->rx_msg_type_delay_resp++; + else if (message_type == RDES_EXT_PDELAY_REQ) + x->rx_msg_type_pdelay_req++; + else if (message_type == RDES_EXT_PDELAY_RESP) + x->rx_msg_type_pdelay_resp++; + else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) + x->rx_msg_type_pdelay_follow_up++; + else + x->rx_msg_type_ext_no_ptp++; + + if (rdes1 & RDES1_PTP_PACKET_TYPE) + x->ptp_frame_type++; + if (rdes1 & RDES1_PTP_VER) + x->ptp_ver++; + if (rdes1 & RDES1_TIMESTAMP_DROPPED) + x->timestamp_dropped++; + + if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) { + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) { + x->da_rx_filter_fail++; + ret = discard_frame; + } + + if (rdes2 & RDES2_L3_FILTER_MATCH) + x->l3_filter_match++; + if (rdes2 & RDES2_L4_FILTER_MATCH) + x->l4_filter_match++; + if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK) + >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT) + x->l3_l4_filter_no_match++; + + return ret; +} + +static int dwmac4_rd_get_tx_len(struct dma_desc *p) +{ + return (p->des2 & TDES2_BUFFER1_SIZE_MASK); +} + +static int dwmac4_get_tx_owner(struct dma_desc *p) +{ + return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT; +} + +static void dwmac4_set_tx_owner(struct dma_desc *p) +{ + p->des3 |= TDES3_OWN; +} + +static void dwmac4_set_rx_owner(struct dma_desc *p) +{ + p->des3 |= RDES3_OWN; +} + +static int dwmac4_get_tx_ls(struct dma_desc *p) +{ + return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT; +} + +static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe) +{ + return (p->des3 & RDES3_PACKET_SIZE_MASK); +} + +static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p) +{ + p->des2 |= TDES2_TIMESTAMP_ENABLE; +} + +static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) +{ + return (p->des3 & TDES3_TIMESTAMP_STATUS) + >> TDES3_TIMESTAMP_STATUS_SHIFT; +} + +/* NOTE: For RX CTX bit has to be checked before + * HAVE a specific function for TX and another one for RX + */ +static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + u64 ns; + + ns = p->des0; + /* convert high/sec time stamp value to nanosecond */ + ns += p->des1 * 1000000000ULL; + + return ns; +} + +static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) +{ + struct dma_desc *p = (struct dma_desc *)desc; + + return (p->des1 & RDES1_TIMESTAMP_AVAILABLE) + >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; +} + +static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) +{ + p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR; + + if (!disable_rx_ic) + p->des3 |= RDES3_INT_ON_COMPLETION_EN; +} + +static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + bool csum_flag, int mode, bool tx_own, + bool ls) +{ + unsigned int tdes3 = p->des3; + + p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK); + + if (is_fs) + tdes3 |= TDES3_FIRST_DESCRIPTOR; + else + tdes3 &= ~TDES3_FIRST_DESCRIPTOR; + + if (likely(csum_flag)) + tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT); + else + tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT); + + if (ls) + tdes3 |= TDES3_LAST_DESCRIPTOR; + else + tdes3 &= ~TDES3_LAST_DESCRIPTOR; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= TDES3_OWN; + + if (is_fs & tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + wmb(); + + p->des3 = tdes3; +} + +static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs, + int len1, int len2, bool tx_own, + bool ls, unsigned int tcphdrlen, + unsigned int tcppayloadlen) +{ + unsigned int tdes3 = p->des3; + + if (len1) + p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK); + + if (len2) + p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT) + & TDES2_BUFFER2_SIZE_MASK; + + if (is_fs) { + tdes3 |= TDES3_FIRST_DESCRIPTOR | + TDES3_TCP_SEGMENTATION_ENABLE | + ((tcphdrlen << TDES3_HDR_LEN_SHIFT) & + TDES3_SLOT_NUMBER_MASK) | + ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK)); + } else { + tdes3 &= ~TDES3_FIRST_DESCRIPTOR; + } + + if (ls) + tdes3 |= TDES3_LAST_DESCRIPTOR; + else + tdes3 &= ~TDES3_LAST_DESCRIPTOR; + + /* Finally set the OWN bit. Later the DMA will start! */ + if (tx_own) + tdes3 |= TDES3_OWN; + + if (is_fs & tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + wmb(); + + p->des3 = tdes3; +} + +static void dwmac4_release_tx_desc(struct dma_desc *p, int mode) +{ + p->des2 = 0; + p->des3 = 0; +} + +static void dwmac4_rd_set_tx_ic(struct dma_desc *p) +{ + p->des2 |= TDES2_INTERRUPT_ON_COMPLETION; +} + +static void dwmac4_display_ring(void *head, unsigned int size, bool rx) +{ + struct dma_desc *p = (struct dma_desc *)head; + int i; + + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + if (p->des0) + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(p), + p->des0, p->des1, p->des2, p->des3); + p++; + } +} + +static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss) +{ + p->des0 = 0; + p->des1 = 0; + p->des2 = mss; + p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV; +} + +const struct stmmac_desc_ops dwmac4_desc_ops = { + .tx_status = dwmac4_wrback_get_tx_status, + .rx_status = dwmac4_wrback_get_rx_status, + .get_tx_len = dwmac4_rd_get_tx_len, + .get_tx_owner = dwmac4_get_tx_owner, + .set_tx_owner = dwmac4_set_tx_owner, + .set_rx_owner = dwmac4_set_rx_owner, + .get_tx_ls = dwmac4_get_tx_ls, + .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, + .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, + .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, + .get_timestamp = dwmac4_wrback_get_timestamp, + .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, + .set_tx_ic = dwmac4_rd_set_tx_ic, + .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, + .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, + .release_tx_desc = dwmac4_release_tx_desc, + .init_rx_desc = dwmac4_rd_init_rx_desc, + .init_tx_desc = dwmac4_rd_init_tx_desc, + .display_ring = dwmac4_display_ring, + .set_mss = dwmac4_set_mss_ctxt, +}; + +const struct stmmac_mode_ops dwmac4_ring_mode_ops = { }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h new file mode 100644 index 000000000000..0902a2edeaa9 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h @@ -0,0 +1,129 @@ +/* + * Header File to describe the DMA descriptors and related definitions specific + * for DesignWare databook 4.xx. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue <alexandre.torgue@st.com> + */ + +#ifndef __DWMAC4_DESCS_H__ +#define __DWMAC4_DESCS_H__ + +#include <linux/bitops.h> + +/* Normal transmit descriptor defines (without split feature) */ + +/* TDES2 (read format) */ +#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0) +#define TDES2_VLAN_TAG_MASK GENMASK(15, 14) +#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16) +#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16 +#define TDES2_TIMESTAMP_ENABLE BIT(30) +#define TDES2_INTERRUPT_ON_COMPLETION BIT(31) + +/* TDES3 (read format) */ +#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16) +#define TDES3_CHECKSUM_INSERTION_SHIFT 16 +#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0) +#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18) +#define TDES3_HDR_LEN_SHIFT 19 +#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19) +#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23) +#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26) + +/* TDES3 (write back format) */ +#define TDES3_IP_HDR_ERROR BIT(0) +#define TDES3_DEFERRED BIT(1) +#define TDES3_UNDERFLOW_ERROR BIT(2) +#define TDES3_EXCESSIVE_DEFERRAL BIT(3) +#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4) +#define TDES3_COLLISION_COUNT_SHIFT 4 +#define TDES3_EXCESSIVE_COLLISION BIT(8) +#define TDES3_LATE_COLLISION BIT(9) +#define TDES3_NO_CARRIER BIT(10) +#define TDES3_LOSS_CARRIER BIT(11) +#define TDES3_PAYLOAD_ERROR BIT(12) +#define TDES3_PACKET_FLUSHED BIT(13) +#define TDES3_JABBER_TIMEOUT BIT(14) +#define TDES3_ERROR_SUMMARY BIT(15) +#define TDES3_TIMESTAMP_STATUS BIT(17) +#define TDES3_TIMESTAMP_STATUS_SHIFT 17 + +/* TDES3 context */ +#define TDES3_CTXT_TCMSSV BIT(26) + +/* TDES3 Common */ +#define TDES3_LAST_DESCRIPTOR BIT(28) +#define TDES3_LAST_DESCRIPTOR_SHIFT 28 +#define TDES3_FIRST_DESCRIPTOR BIT(29) +#define TDES3_CONTEXT_TYPE BIT(30) + +/* TDS3 use for both format (read and write back) */ +#define TDES3_OWN BIT(31) +#define TDES3_OWN_SHIFT 31 + +/* Normal receive descriptor defines (without split feature) */ + +/* RDES0 (write back format) */ +#define RDES0_VLAN_TAG_MASK GENMASK(15, 0) + +/* RDES1 (write back format) */ +#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0) +#define RDES1_IP_HDR_ERROR BIT(3) +#define RDES1_IPV4_HEADER BIT(4) +#define RDES1_IPV6_HEADER BIT(5) +#define RDES1_IP_CSUM_BYPASSED BIT(6) +#define RDES1_IP_CSUM_ERROR BIT(7) +#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8) +#define RDES1_PTP_PACKET_TYPE BIT(12) +#define RDES1_PTP_VER BIT(13) +#define RDES1_TIMESTAMP_AVAILABLE BIT(14) +#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14 +#define RDES1_TIMESTAMP_DROPPED BIT(15) +#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16) + +/* RDES2 (write back format) */ +#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0) +#define RDES2_VLAN_FILTER_STATUS BIT(15) +#define RDES2_SA_FILTER_FAIL BIT(16) +#define RDES2_DA_FILTER_FAIL BIT(17) +#define RDES2_HASH_FILTER_STATUS BIT(18) +#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19) +#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19) +#define RDES2_L3_FILTER_MATCH BIT(27) +#define RDES2_L4_FILTER_MATCH BIT(28) +#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26) +#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26 + +/* RDES3 (write back format) */ +#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0) +#define RDES3_ERROR_SUMMARY BIT(15) +#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16) +#define RDES3_DRIBBLE_ERROR BIT(19) +#define RDES3_RECEIVE_ERROR BIT(20) +#define RDES3_OVERFLOW_ERROR BIT(21) +#define RDES3_RECEIVE_WATCHDOG BIT(22) +#define RDES3_GIANT_PACKET BIT(23) +#define RDES3_CRC_ERROR BIT(24) +#define RDES3_RDES0_VALID BIT(25) +#define RDES3_RDES1_VALID BIT(26) +#define RDES3_RDES2_VALID BIT(27) +#define RDES3_LAST_DESCRIPTOR BIT(28) +#define RDES3_FIRST_DESCRIPTOR BIT(29) +#define RDES3_CONTEXT_DESCRIPTOR BIT(30) + +/* RDES3 (read format) */ +#define RDES3_BUFFER1_VALID_ADDR BIT(24) +#define RDES3_BUFFER2_VALID_ADDR BIT(25) +#define RDES3_INT_ON_COMPLETION_EN BIT(30) + +/* TDS3 use for both format (read and write back) */ +#define RDES3_OWN BIT(31) + +#endif /* __DWMAC4_DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c new file mode 100644 index 000000000000..116151cd6a95 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -0,0 +1,354 @@ +/* + * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + * DWC Ether MAC version 4.xx has been used for developing this code. + * + * This contains the functions to handle the dma. + * + * Copyright (C) 2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue <alexandre.torgue@st.com> + */ + +#include <linux/io.h> +#include "dwmac4.h" +#include "dwmac4_dma.h" + +static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + int i; + + pr_info("dwmac4: Master AXI performs %s burst length\n", + (value & DMA_SYS_BUS_FB) ? "fixed" : "any"); + + if (axi->axi_lpi_en) + value |= DMA_AXI_EN_LPI; + if (axi->axi_xit_frm) + value |= DMA_AXI_LPI_XIT_FRM; + + value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) << + DMA_AXI_WR_OSR_LMT_SHIFT; + + value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) << + DMA_AXI_RD_OSR_LMT_SHIFT; + + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). + */ + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= DMA_AXI_BLEN256; + break; + case 128: + value |= DMA_AXI_BLEN128; + break; + case 64: + value |= DMA_AXI_BLEN64; + break; + case 32: + value |= DMA_AXI_BLEN32; + break; + case 16: + value |= DMA_AXI_BLEN16; + break; + case 8: + value |= DMA_AXI_BLEN8; + break; + case 4: + value |= DMA_AXI_BLEN4; + break; + } + } + + writel(value, ioaddr + DMA_SYS_BUS_MODE); +} + +static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl, + u32 dma_tx_phy, u32 dma_rx_phy, + u32 channel) +{ + u32 value; + + /* set PBL for each channels. Currently we affect same configuration + * on each channel + */ + value = readl(ioaddr + DMA_CHAN_CONTROL(channel)); + value = value | DMA_BUS_MODE_PBL; + writel(value, ioaddr + DMA_CHAN_CONTROL(channel)); + + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)); + value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel)); + + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)); + value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT); + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel)); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel)); + + writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)); + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)); +} + +static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int aal, u32 dma_tx, u32 dma_rx, int atds) +{ + u32 value = readl(ioaddr + DMA_SYS_BUS_MODE); + int i; + + /* Set the Fixed burst mode */ + if (fb) + value |= DMA_SYS_BUS_FB; + + /* Mixed Burst has no effect when fb is set */ + if (mb) + value |= DMA_SYS_BUS_MB; + + if (aal) + value |= DMA_SYS_BUS_AAL; + + writel(value, ioaddr + DMA_SYS_BUS_MODE); + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) + dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i); +} + +static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel) +{ + pr_debug(" Channel %d\n", channel); + pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0, + readl(ioaddr + DMA_CHAN_CONTROL(channel))); + pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4, + readl(ioaddr + DMA_CHAN_TX_CONTROL(channel))); + pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8, + readl(ioaddr + DMA_CHAN_RX_CONTROL(channel))); + pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14, + readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel))); + pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c, + readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel))); + pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20, + readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel))); + pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28, + readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel))); + pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c, + readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel))); + pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30, + readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel))); + pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34, + readl(ioaddr + DMA_CHAN_INTR_ENA(channel))); + pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38, + readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel))); + pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c, + readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel))); + pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44, + readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel))); + pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c, + readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel))); + pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54, + readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel))); + pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c, + readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel))); + pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60, + readl(ioaddr + DMA_CHAN_STATUS(channel))); +} + +static void dwmac4_dump_dma_regs(void __iomem *ioaddr) +{ + int i; + + pr_debug(" GMAC4 DMA registers\n"); + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) + _dwmac4_dump_dma_regs(ioaddr, i); +} + +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt) +{ + int i; + + for (i = 0; i < DMA_CHANNEL_NB_MAX; i++) + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i)); +} + +static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode, + int rxmode, u32 channel) +{ + u32 mtl_tx_op, mtl_rx_op, mtl_rx_int; + + /* Following code only done for channel 0, other channels not yet + * supported. + */ + mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + if (txmode == SF_DMA_MODE) { + pr_debug("GMAC: enable TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + mtl_tx_op |= MTL_OP_MODE_TSF; + } else { + pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode); + mtl_tx_op &= ~MTL_OP_MODE_TSF; + mtl_tx_op &= MTL_OP_MODE_TTC_MASK; + /* Set the transmit threshold */ + if (txmode <= 32) + mtl_tx_op |= MTL_OP_MODE_TTC_32; + else if (txmode <= 64) + mtl_tx_op |= MTL_OP_MODE_TTC_64; + else if (txmode <= 96) + mtl_tx_op |= MTL_OP_MODE_TTC_96; + else if (txmode <= 128) + mtl_tx_op |= MTL_OP_MODE_TTC_128; + else if (txmode <= 192) + mtl_tx_op |= MTL_OP_MODE_TTC_192; + else if (txmode <= 256) + mtl_tx_op |= MTL_OP_MODE_TTC_256; + else if (txmode <= 384) + mtl_tx_op |= MTL_OP_MODE_TTC_384; + else + mtl_tx_op |= MTL_OP_MODE_TTC_512; + } + + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + + if (rxmode == SF_DMA_MODE) { + pr_debug("GMAC: enable RX store and forward mode\n"); + mtl_rx_op |= MTL_OP_MODE_RSF; + } else { + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode); + mtl_rx_op &= ~MTL_OP_MODE_RSF; + mtl_rx_op &= MTL_OP_MODE_RTC_MASK; + if (rxmode <= 32) + mtl_rx_op |= MTL_OP_MODE_RTC_32; + else if (rxmode <= 64) + mtl_rx_op |= MTL_OP_MODE_RTC_64; + else if (rxmode <= 96) + mtl_rx_op |= MTL_OP_MODE_RTC_96; + else + mtl_rx_op |= MTL_OP_MODE_RTC_128; + } + + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel)); + + /* Enable MTL RX overflow */ + mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel)); + writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN, + ioaddr + MTL_CHAN_INT_CTRL(channel)); +} + +static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode, + int rxmode, int rxfifosz) +{ + /* Only Channel 0 is actually configured and used */ + dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0); +} + +static void dwmac4_get_hw_feature(void __iomem *ioaddr, + struct dma_features *dma_cap) +{ + u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0); + + /* MAC HW feature0 */ + dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL); + dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1; + dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2; + dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4; + dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18; + dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3; + dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5; + dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6; + dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7; + /* MMC */ + dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8; + /* IEEE 1588-2008 */ + dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12; + /* 802.3az - Energy-Efficient Ethernet (EEE) */ + dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13; + /* TX and RX csum */ + dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14; + dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16; + + /* MAC HW feature1 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE1); + dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20; + dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18; + /* MAC HW feature2 */ + hw_cap = readl(ioaddr + GMAC_HW_FEATURE2); + /* TX and RX number of channels */ + dma_cap->number_rx_channel = + ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1; + dma_cap->number_tx_channel = + ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1; + + /* IEEE 1588-2002 */ + dma_cap->time_stamp = 0; +} + +/* Enable/disable TSO feature and set MSS */ +static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan) +{ + u32 value; + + if (en) { + /* enable TSO */ + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value | DMA_CONTROL_TSE, + ioaddr + DMA_CHAN_TX_CONTROL(chan)); + } else { + /* enable TSO */ + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)); + writel(value & ~DMA_CONTROL_TSE, + ioaddr + DMA_CHAN_TX_CONTROL(chan)); + } +} + +const struct stmmac_dma_ops dwmac4_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, + .dma_mode = dwmac4_dma_operation_mode, + .enable_dma_irq = dwmac4_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, + .stop_tx = dwmac4_dma_stop_tx, + .start_rx = dwmac4_dma_start_rx, + .stop_rx = dwmac4_dma_stop_rx, + .dma_interrupt = dwmac4_dma_interrupt, + .get_hw_feature = dwmac4_get_hw_feature, + .rx_watchdog = dwmac4_rx_watchdog, + .set_rx_ring_len = dwmac4_set_rx_ring_len, + .set_tx_ring_len = dwmac4_set_tx_ring_len, + .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, + .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, + .enable_tso = dwmac4_enable_tso, +}; + +const struct stmmac_dma_ops dwmac410_dma_ops = { + .reset = dwmac4_dma_reset, + .init = dwmac4_dma_init, + .axi = dwmac4_dma_axi, + .dump_regs = dwmac4_dump_dma_regs, + .dma_mode = dwmac4_dma_operation_mode, + .enable_dma_irq = dwmac410_enable_dma_irq, + .disable_dma_irq = dwmac4_disable_dma_irq, + .start_tx = dwmac4_dma_start_tx, + .stop_tx = dwmac4_dma_stop_tx, + .start_rx = dwmac4_dma_start_rx, + .stop_rx = dwmac4_dma_stop_rx, + .dma_interrupt = dwmac4_dma_interrupt, + .get_hw_feature = dwmac4_get_hw_feature, + .rx_watchdog = dwmac4_rx_watchdog, + .set_rx_ring_len = dwmac4_set_rx_ring_len, + .set_tx_ring_len = dwmac4_set_tx_ring_len, + .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, + .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, + .enable_tso = dwmac4_enable_tso, +}; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h new file mode 100644 index 000000000000..1b06df749e2b --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h @@ -0,0 +1,202 @@ +/* + * DWMAC4 DMA Header file. + * + * + * Copyright (C) 2007-2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue <alexandre.torgue@st.com> + */ + +#ifndef __DWMAC4_DMA_H__ +#define __DWMAC4_DMA_H__ + +/* Define the max channel number used for tx (also rx). + * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX + */ +#define DMA_CHANNEL_NB_MAX 1 + +#define DMA_BUS_MODE 0x00001000 +#define DMA_SYS_BUS_MODE 0x00001004 +#define DMA_STATUS 0x00001008 +#define DMA_DEBUG_STATUS_0 0x0000100c +#define DMA_DEBUG_STATUS_1 0x00001010 +#define DMA_DEBUG_STATUS_2 0x00001014 +#define DMA_AXI_BUS_MODE 0x00001028 + +/* DMA Bus Mode bitmap */ +#define DMA_BUS_MODE_SFT_RESET BIT(0) + +/* DMA SYS Bus Mode bitmap */ +#define DMA_BUS_MODE_SPH BIT(24) +#define DMA_BUS_MODE_PBL BIT(16) +#define DMA_BUS_MODE_PBL_SHIFT 16 +#define DMA_BUS_MODE_RPBL_SHIFT 16 +#define DMA_BUS_MODE_MB BIT(14) +#define DMA_BUS_MODE_FB BIT(0) + +/* DMA Interrupt top status */ +#define DMA_STATUS_MAC BIT(17) +#define DMA_STATUS_MTL BIT(16) +#define DMA_STATUS_CHAN7 BIT(7) +#define DMA_STATUS_CHAN6 BIT(6) +#define DMA_STATUS_CHAN5 BIT(5) +#define DMA_STATUS_CHAN4 BIT(4) +#define DMA_STATUS_CHAN3 BIT(3) +#define DMA_STATUS_CHAN2 BIT(2) +#define DMA_STATUS_CHAN1 BIT(1) +#define DMA_STATUS_CHAN0 BIT(0) + +/* DMA debug status bitmap */ +#define DMA_DEBUG_STATUS_TS_MASK 0xf +#define DMA_DEBUG_STATUS_RS_MASK 0xf + +/* DMA AXI bitmap */ +#define DMA_AXI_EN_LPI BIT(31) +#define DMA_AXI_LPI_XIT_FRM BIT(30) +#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24) +#define DMA_AXI_WR_OSR_LMT_SHIFT 24 +#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16) +#define DMA_AXI_RD_OSR_LMT_SHIFT 16 + +#define DMA_AXI_OSR_MAX 0xf +#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ + (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) + +#define DMA_SYS_BUS_MB BIT(14) +#define DMA_AXI_1KBBE BIT(13) +#define DMA_SYS_BUS_AAL BIT(12) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_SYS_BUS_FB BIT(0) + +#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ + DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ + DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ + DMA_AXI_BLEN4) + +#define DMA_AXI_BURST_LEN_MASK 0x000000FE + +/* Following DMA defines are chanels oriented */ +#define DMA_CHAN_BASE_ADDR 0x00001100 +#define DMA_CHAN_BASE_OFFSET 0x80 +#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \ + (x * DMA_CHAN_BASE_OFFSET)) +#define DMA_CHAN_REG_NUMBER 17 + +#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x) +#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4) +#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8) +#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14) +#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c) +#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20) +#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28) +#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c) +#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30) +#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34) +#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38) +#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c) +#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44) +#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c) +#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54) +#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c) +#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60) + +/* DMA Control X */ +#define DMA_CONTROL_MSS_MASK GENMASK(13, 0) + +/* DMA Tx Channel X Control register defines */ +#define DMA_CONTROL_TSE BIT(12) +#define DMA_CONTROL_OSP BIT(4) +#define DMA_CONTROL_ST BIT(0) + +/* DMA Rx Channel X Control register defines */ +#define DMA_CONTROL_SR BIT(0) + +/* Interrupt status per channel */ +#define DMA_CHAN_STATUS_REB GENMASK(21, 19) +#define DMA_CHAN_STATUS_REB_SHIFT 19 +#define DMA_CHAN_STATUS_TEB GENMASK(18, 16) +#define DMA_CHAN_STATUS_TEB_SHIFT 16 +#define DMA_CHAN_STATUS_NIS BIT(15) +#define DMA_CHAN_STATUS_AIS BIT(14) +#define DMA_CHAN_STATUS_CDE BIT(13) +#define DMA_CHAN_STATUS_FBE BIT(12) +#define DMA_CHAN_STATUS_ERI BIT(11) +#define DMA_CHAN_STATUS_ETI BIT(10) +#define DMA_CHAN_STATUS_RWT BIT(9) +#define DMA_CHAN_STATUS_RPS BIT(8) +#define DMA_CHAN_STATUS_RBU BIT(7) +#define DMA_CHAN_STATUS_RI BIT(6) +#define DMA_CHAN_STATUS_TBU BIT(2) +#define DMA_CHAN_STATUS_TPS BIT(1) +#define DMA_CHAN_STATUS_TI BIT(0) + +/* Interrupt enable bits per channel */ +#define DMA_CHAN_INTR_ENA_NIE BIT(16) +#define DMA_CHAN_INTR_ENA_AIE BIT(15) +#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15) +#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14) +#define DMA_CHAN_INTR_ENA_CDE BIT(13) +#define DMA_CHAN_INTR_ENA_FBE BIT(12) +#define DMA_CHAN_INTR_ENA_ERE BIT(11) +#define DMA_CHAN_INTR_ENA_ETE BIT(10) +#define DMA_CHAN_INTR_ENA_RWE BIT(9) +#define DMA_CHAN_INTR_ENA_RSE BIT(8) +#define DMA_CHAN_INTR_ENA_RBUE BIT(7) +#define DMA_CHAN_INTR_ENA_RIE BIT(6) +#define DMA_CHAN_INTR_ENA_TBUE BIT(2) +#define DMA_CHAN_INTR_ENA_TSE BIT(1) +#define DMA_CHAN_INTR_ENA_TIE BIT(0) + +#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \ + DMA_CHAN_INTR_ENA_RIE | \ + DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \ + DMA_CHAN_INTR_ENA_FBE) +/* DMA default interrupt mask for 4.00 */ +#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \ + DMA_CHAN_INTR_ABNORMAL) + +#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \ + DMA_CHAN_INTR_ENA_RIE | \ + DMA_CHAN_INTR_ENA_TIE) + +#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \ + DMA_CHAN_INTR_ENA_FBE) +/* DMA default interrupt mask for 4.10a */ +#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \ + DMA_CHAN_INTR_ABNORMAL_4_10) + +/* channel 0 specific fields */ +#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12) +#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12 +#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8) +#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8 + +int dwmac4_dma_reset(void __iomem *ioaddr); +void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr); +void dwmac4_enable_dma_irq(void __iomem *ioaddr); +void dwmac410_enable_dma_irq(void __iomem *ioaddr); +void dwmac4_disable_dma_irq(void __iomem *ioaddr); +void dwmac4_dma_start_tx(void __iomem *ioaddr); +void dwmac4_dma_stop_tx(void __iomem *ioaddr); +void dwmac4_dma_start_rx(void __iomem *ioaddr); +void dwmac4_dma_stop_rx(void __iomem *ioaddr); +int dwmac4_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x); +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len); +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len); +void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); +void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan); + +#endif /* __DWMAC4_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c new file mode 100644 index 000000000000..c7326d5b2f43 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2007-2015 STMicroelectronics Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * Author: Alexandre Torgue <alexandre.torgue@st.com> + */ + +#include <linux/io.h> +#include <linux/delay.h> +#include "common.h" +#include "dwmac4_dma.h" +#include "dwmac4.h" + +int dwmac4_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + int limit; + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + limit = 10; + while (limit--) { + if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + break; + mdelay(10); + } + + if (limit < 0) + return -EBUSY; + + return 0; +} + +void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) +{ + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0)); +} + +void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan) +{ + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0)); +} + +void dwmac4_dma_start_tx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_TE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_stop_tx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0)); + + value = readl(ioaddr + GMAC_CONFIG); + value &= ~GMAC_CONFIG_TE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_start_rx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + + value |= DMA_CONTROL_SR; + + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + + value = readl(ioaddr + GMAC_CONFIG); + value |= GMAC_CONFIG_RE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_dma_stop_rx(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0)); + + value = readl(ioaddr + GMAC_CONFIG); + value &= ~GMAC_CONFIG_RE; + writel(value, ioaddr + GMAC_CONFIG); +} + +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len) +{ + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0)); +} + +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len) +{ + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0)); +} + +void dwmac4_enable_dma_irq(void __iomem *ioaddr) +{ + writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); +} + +void dwmac410_enable_dma_irq(void __iomem *ioaddr) +{ + writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10, + ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); +} + +void dwmac4_disable_dma_irq(void __iomem *ioaddr) +{ + writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); +} + +int dwmac4_dma_interrupt(void __iomem *ioaddr, + struct stmmac_extra_stats *x) +{ + int ret = 0; + + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0)); + + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) { + if (unlikely(intr_status & DMA_CHAN_STATUS_RBU)) + x->rx_buf_unav_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_RPS)) + x->rx_process_stopped_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_RWT)) + x->rx_watchdog_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_ETI)) + x->tx_early_irq++; + if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) { + x->tx_process_stopped_irq++; + ret = tx_hard_error; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) { + x->fatal_bus_error_irq++; + ret = tx_hard_error; + } + } + /* TX/RX NORMAL interrupts */ + if (likely(intr_status & DMA_CHAN_STATUS_NIS)) { + x->normal_irq_n++; + if (likely(intr_status & DMA_CHAN_STATUS_RI)) { + u32 value; + + value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0)); + /* to schedule NAPI on real RIE event. */ + if (likely(value & DMA_CHAN_INTR_ENA_RIE)) { + x->rx_normal_irq_n++; + ret |= handle_rx; + } + } + if (likely(intr_status & DMA_CHAN_STATUS_TI)) { + x->tx_normal_irq_n++; + ret |= handle_tx; + } + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) + x->rx_early_irq++; + } + + /* Clear the interrupt by writing a logic 1 to the chanX interrupt + * status [21-0] expect reserved bits [5-3] + */ + writel((intr_status & 0x3fffc7), + ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0)); + + return ret; +} + +void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + /* For MAC Addr registers se have to set the Address Enable (AE) + * bit that has no effect on the High Reg 0 where the bit 31 (MO) + * is RO. + */ + data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT); + writel(data | GMAC_HI_REG_AE, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); +} + +/* Enable disable MAC RX/TX */ +void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable) +{ + u32 value = readl(ioaddr + GMAC_CONFIG); + + if (enable) + value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE; + else + value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE); + + writel(value, ioaddr + GMAC_CONFIG); +} + +void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, + unsigned int high, unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; +} diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index cfb018c7c5eb..38f19c99cf59 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -411,6 +411,26 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) } } +static void enh_desc_display_ring(void *head, unsigned int size, bool rx) +{ + struct dma_extended_desc *ep = (struct dma_extended_desc *)head; + int i; + + pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + u64 x; + + x = *(u64 *)ep; + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", + i, (unsigned int)virt_to_phys(ep), + (unsigned int)x, (unsigned int)(x >> 32), + ep->basic.des2, ep->basic.des3); + ep++; + } + pr_info("\n"); +} + const struct stmmac_desc_ops enh_desc_ops = { .tx_status = enh_desc_get_tx_status, .rx_status = enh_desc_get_rx_status, @@ -430,4 +450,5 @@ const struct stmmac_desc_ops enh_desc_ops = { .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, .get_timestamp = enh_desc_get_timestamp, .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, + .display_ring = enh_desc_display_ring, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 192c2491330b..38a1a5603293 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -35,6 +35,10 @@ * current value.*/ #define MMC_CNTRL_PRESET 0x10 #define MMC_CNTRL_FULL_HALF_PRESET 0x20 + +#define MMC_GMAC4_OFFSET 0x700 +#define MMC_GMAC3_X_OFFSET 0x100 + struct stmmac_counters { unsigned int mmc_tx_octetcount_gb; unsigned int mmc_tx_framecount_gb; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 3f20bb1fe570..ce9aa792857b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -28,12 +28,12 @@ /* MAC Management Counters register offset */ -#define MMC_CNTRL 0x00000100 /* MMC Control */ -#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */ -#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ -#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ -#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ -#define MMC_DEFAULT_MASK 0xffffffff +#define MMC_CNTRL 0x00 /* MMC Control */ +#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */ +#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */ +#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */ +#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */ +#define MMC_DEFAULT_MASK 0xffffffff /* MMC TX counter registers */ @@ -41,115 +41,115 @@ * _GB register stands for good and bad frames * _G is for good only. */ -#define MMC_TX_OCTETCOUNT_GB 0x00000114 -#define MMC_TX_FRAMECOUNT_GB 0x00000118 -#define MMC_TX_BROADCASTFRAME_G 0x0000011c -#define MMC_TX_MULTICASTFRAME_G 0x00000120 -#define MMC_TX_64_OCTETS_GB 0x00000124 -#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128 -#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c -#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130 -#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134 -#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138 -#define MMC_TX_UNICAST_GB 0x0000013c -#define MMC_TX_MULTICAST_GB 0x00000140 -#define MMC_TX_BROADCAST_GB 0x00000144 -#define MMC_TX_UNDERFLOW_ERROR 0x00000148 -#define MMC_TX_SINGLECOL_G 0x0000014c -#define MMC_TX_MULTICOL_G 0x00000150 -#define MMC_TX_DEFERRED 0x00000154 -#define MMC_TX_LATECOL 0x00000158 -#define MMC_TX_EXESSCOL 0x0000015c -#define MMC_TX_CARRIER_ERROR 0x00000160 -#define MMC_TX_OCTETCOUNT_G 0x00000164 -#define MMC_TX_FRAMECOUNT_G 0x00000168 -#define MMC_TX_EXCESSDEF 0x0000016c -#define MMC_TX_PAUSE_FRAME 0x00000170 -#define MMC_TX_VLAN_FRAME_G 0x00000174 +#define MMC_TX_OCTETCOUNT_GB 0x14 +#define MMC_TX_FRAMECOUNT_GB 0x18 +#define MMC_TX_BROADCASTFRAME_G 0x1c +#define MMC_TX_MULTICASTFRAME_G 0x20 +#define MMC_TX_64_OCTETS_GB 0x24 +#define MMC_TX_65_TO_127_OCTETS_GB 0x28 +#define MMC_TX_128_TO_255_OCTETS_GB 0x2c +#define MMC_TX_256_TO_511_OCTETS_GB 0x30 +#define MMC_TX_512_TO_1023_OCTETS_GB 0x34 +#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38 +#define MMC_TX_UNICAST_GB 0x3c +#define MMC_TX_MULTICAST_GB 0x40 +#define MMC_TX_BROADCAST_GB 0x44 +#define MMC_TX_UNDERFLOW_ERROR 0x48 +#define MMC_TX_SINGLECOL_G 0x4c +#define MMC_TX_MULTICOL_G 0x50 +#define MMC_TX_DEFERRED 0x54 +#define MMC_TX_LATECOL 0x58 +#define MMC_TX_EXESSCOL 0x5c +#define MMC_TX_CARRIER_ERROR 0x60 +#define MMC_TX_OCTETCOUNT_G 0x64 +#define MMC_TX_FRAMECOUNT_G 0x68 +#define MMC_TX_EXCESSDEF 0x6c +#define MMC_TX_PAUSE_FRAME 0x70 +#define MMC_TX_VLAN_FRAME_G 0x74 /* MMC RX counter registers */ -#define MMC_RX_FRAMECOUNT_GB 0x00000180 -#define MMC_RX_OCTETCOUNT_GB 0x00000184 -#define MMC_RX_OCTETCOUNT_G 0x00000188 -#define MMC_RX_BROADCASTFRAME_G 0x0000018c -#define MMC_RX_MULTICASTFRAME_G 0x00000190 -#define MMC_RX_CRC_ERROR 0x00000194 -#define MMC_RX_ALIGN_ERROR 0x00000198 -#define MMC_RX_RUN_ERROR 0x0000019C -#define MMC_RX_JABBER_ERROR 0x000001A0 -#define MMC_RX_UNDERSIZE_G 0x000001A4 -#define MMC_RX_OVERSIZE_G 0x000001A8 -#define MMC_RX_64_OCTETS_GB 0x000001AC -#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0 -#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4 -#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8 -#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc -#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0 -#define MMC_RX_UNICAST_G 0x000001c4 -#define MMC_RX_LENGTH_ERROR 0x000001c8 -#define MMC_RX_AUTOFRANGETYPE 0x000001cc -#define MMC_RX_PAUSE_FRAMES 0x000001d0 -#define MMC_RX_FIFO_OVERFLOW 0x000001d4 -#define MMC_RX_VLAN_FRAMES_GB 0x000001d8 -#define MMC_RX_WATCHDOG_ERROR 0x000001dc +#define MMC_RX_FRAMECOUNT_GB 0x80 +#define MMC_RX_OCTETCOUNT_GB 0x84 +#define MMC_RX_OCTETCOUNT_G 0x88 +#define MMC_RX_BROADCASTFRAME_G 0x8c +#define MMC_RX_MULTICASTFRAME_G 0x90 +#define MMC_RX_CRC_ERROR 0x94 +#define MMC_RX_ALIGN_ERROR 0x98 +#define MMC_RX_RUN_ERROR 0x9C +#define MMC_RX_JABBER_ERROR 0xA0 +#define MMC_RX_UNDERSIZE_G 0xA4 +#define MMC_RX_OVERSIZE_G 0xA8 +#define MMC_RX_64_OCTETS_GB 0xAC +#define MMC_RX_65_TO_127_OCTETS_GB 0xb0 +#define MMC_RX_128_TO_255_OCTETS_GB 0xb4 +#define MMC_RX_256_TO_511_OCTETS_GB 0xb8 +#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc +#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0 +#define MMC_RX_UNICAST_G 0xc4 +#define MMC_RX_LENGTH_ERROR 0xc8 +#define MMC_RX_AUTOFRANGETYPE 0xcc +#define MMC_RX_PAUSE_FRAMES 0xd0 +#define MMC_RX_FIFO_OVERFLOW 0xd4 +#define MMC_RX_VLAN_FRAMES_GB 0xd8 +#define MMC_RX_WATCHDOG_ERROR 0xdc /* IPC*/ -#define MMC_RX_IPC_INTR_MASK 0x00000200 -#define MMC_RX_IPC_INTR 0x00000208 +#define MMC_RX_IPC_INTR_MASK 0x100 +#define MMC_RX_IPC_INTR 0x108 /* IPv4*/ -#define MMC_RX_IPV4_GD 0x00000210 -#define MMC_RX_IPV4_HDERR 0x00000214 -#define MMC_RX_IPV4_NOPAY 0x00000218 -#define MMC_RX_IPV4_FRAG 0x0000021C -#define MMC_RX_IPV4_UDSBL 0x00000220 +#define MMC_RX_IPV4_GD 0x110 +#define MMC_RX_IPV4_HDERR 0x114 +#define MMC_RX_IPV4_NOPAY 0x118 +#define MMC_RX_IPV4_FRAG 0x11C +#define MMC_RX_IPV4_UDSBL 0x120 -#define MMC_RX_IPV4_GD_OCTETS 0x00000250 -#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254 -#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258 -#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c -#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260 +#define MMC_RX_IPV4_GD_OCTETS 0x150 +#define MMC_RX_IPV4_HDERR_OCTETS 0x154 +#define MMC_RX_IPV4_NOPAY_OCTETS 0x158 +#define MMC_RX_IPV4_FRAG_OCTETS 0x15c +#define MMC_RX_IPV4_UDSBL_OCTETS 0x160 /* IPV6*/ -#define MMC_RX_IPV6_GD_OCTETS 0x00000264 -#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268 -#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c +#define MMC_RX_IPV6_GD_OCTETS 0x164 +#define MMC_RX_IPV6_HDERR_OCTETS 0x168 +#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c -#define MMC_RX_IPV6_GD 0x00000224 -#define MMC_RX_IPV6_HDERR 0x00000228 -#define MMC_RX_IPV6_NOPAY 0x0000022c +#define MMC_RX_IPV6_GD 0x124 +#define MMC_RX_IPV6_HDERR 0x128 +#define MMC_RX_IPV6_NOPAY 0x12c /* Protocols*/ -#define MMC_RX_UDP_GD 0x00000230 -#define MMC_RX_UDP_ERR 0x00000234 -#define MMC_RX_TCP_GD 0x00000238 -#define MMC_RX_TCP_ERR 0x0000023c -#define MMC_RX_ICMP_GD 0x00000240 -#define MMC_RX_ICMP_ERR 0x00000244 +#define MMC_RX_UDP_GD 0x130 +#define MMC_RX_UDP_ERR 0x134 +#define MMC_RX_TCP_GD 0x138 +#define MMC_RX_TCP_ERR 0x13c +#define MMC_RX_ICMP_GD 0x140 +#define MMC_RX_ICMP_ERR 0x144 -#define MMC_RX_UDP_GD_OCTETS 0x00000270 -#define MMC_RX_UDP_ERR_OCTETS 0x00000274 -#define MMC_RX_TCP_GD_OCTETS 0x00000278 -#define MMC_RX_TCP_ERR_OCTETS 0x0000027c -#define MMC_RX_ICMP_GD_OCTETS 0x00000280 -#define MMC_RX_ICMP_ERR_OCTETS 0x00000284 +#define MMC_RX_UDP_GD_OCTETS 0x170 +#define MMC_RX_UDP_ERR_OCTETS 0x174 +#define MMC_RX_TCP_GD_OCTETS 0x178 +#define MMC_RX_TCP_ERR_OCTETS 0x17c +#define MMC_RX_ICMP_GD_OCTETS 0x180 +#define MMC_RX_ICMP_ERR_OCTETS 0x184 -void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) +void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) { - u32 value = readl(ioaddr + MMC_CNTRL); + u32 value = readl(mmcaddr + MMC_CNTRL); value |= (mode & 0x3F); - writel(value, ioaddr + MMC_CNTRL); + writel(value, mmcaddr + MMC_CNTRL); pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n", MMC_CNTRL, value); } /* To mask all all interrupts.*/ -void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) +void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr) { - writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); - writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); - writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK); } /* This reads the MAC core counters (if actaully supported). @@ -157,111 +157,116 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) * counter after a read. So all the field of the mmc struct * have to be incremented. */ -void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) +void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc) { - mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB); - mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB); - mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G); - mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G); - mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB); + mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB); + mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB); + mmc->mmc_tx_broadcastframe_g += readl(mmcaddr + + MMC_TX_BROADCASTFRAME_G); + mmc->mmc_tx_multicastframe_g += readl(mmcaddr + + MMC_TX_MULTICASTFRAME_G); + mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB); mmc->mmc_tx_65_to_127_octets_gb += - readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB); + readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB); mmc->mmc_tx_128_to_255_octets_gb += - readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB); + readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB); mmc->mmc_tx_256_to_511_octets_gb += - readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB); + readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB); mmc->mmc_tx_512_to_1023_octets_gb += - readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB); + readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB); mmc->mmc_tx_1024_to_max_octets_gb += - readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); - mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB); - mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB); - mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB); - mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR); - mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G); - mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G); - mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED); - mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL); - mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL); - mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR); - mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G); - mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G); - mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF); - mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME); - mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G); + readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB); + mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB); + mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB); + mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR); + mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G); + mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G); + mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED); + mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL); + mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL); + mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR); + mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G); + mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G); + mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF); + mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME); + mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G); /* MMC RX counter registers */ - mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB); - mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB); - mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); - mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); - mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); - mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR); - mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); - mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); - mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); - mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G); - mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G); - mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB); + mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB); + mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB); + mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G); + mmc->mmc_rx_broadcastframe_g += readl(mmcaddr + + MMC_RX_BROADCASTFRAME_G); + mmc->mmc_rx_multicastframe_g += readl(mmcaddr + + MMC_RX_MULTICASTFRAME_G); + mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR); + mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR); + mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR); + mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR); + mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G); + mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G); + mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB); mmc->mmc_rx_65_to_127_octets_gb += - readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB); + readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB); mmc->mmc_rx_128_to_255_octets_gb += - readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB); + readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB); mmc->mmc_rx_256_to_511_octets_gb += - readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB); + readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB); mmc->mmc_rx_512_to_1023_octets_gb += - readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB); + readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB); mmc->mmc_rx_1024_to_max_octets_gb += - readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); - mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G); - mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR); - mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE); - mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES); - mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW); - mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB); - mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR); + readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB); + mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G); + mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR); + mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE); + mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES); + mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW); + mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB); + mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR); /* IPC */ - mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK); - mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR); + mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK); + mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR); /* IPv4 */ - mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD); - mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR); - mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY); - mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG); - mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL); + mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD); + mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR); + mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY); + mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG); + mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL); - mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS); + mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS); mmc->mmc_rx_ipv4_hderr_octets += - readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS); + readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS); mmc->mmc_rx_ipv4_nopay_octets += - readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS); - mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS); + readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS); + mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr + + MMC_RX_IPV4_FRAG_OCTETS); mmc->mmc_rx_ipv4_udsbl_octets += - readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS); + readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS); /* IPV6 */ - mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS); + mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS); mmc->mmc_rx_ipv6_hderr_octets += - readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS); + readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS); mmc->mmc_rx_ipv6_nopay_octets += - readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS); + readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS); - mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD); - mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR); - mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY); + mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD); + mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR); + mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY); /* Protocols */ - mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD); - mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR); - mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD); - mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR); - mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD); - mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR); + mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD); + mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR); + mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD); + mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR); + mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD); + mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR); - mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS); - mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS); - mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS); - mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS); - mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS); - mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS); + mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS); + mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS); + mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS); + mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS); + mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS); + mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS); } diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 011386f6f24d..2beacd0d3043 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -279,6 +279,26 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats) return 1; } +static void ndesc_display_ring(void *head, unsigned int size, bool rx) +{ + struct dma_desc *p = (struct dma_desc *)head; + int i; + + pr_info("%s descriptor ring:\n", rx ? "RX" : "TX"); + + for (i = 0; i < size; i++) { + u64 x; + + x = *(u64 *)p; + pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", + i, (unsigned int)virt_to_phys(p), + (unsigned int)x, (unsigned int)(x >> 32), + p->des2, p->des3); + p++; + } + pr_info("\n"); +} + const struct stmmac_desc_ops ndesc_ops = { .tx_status = ndesc_get_tx_status, .rx_status = ndesc_get_rx_status, @@ -297,4 +317,5 @@ const struct stmmac_desc_ops ndesc_ops = { .get_tx_timestamp_status = ndesc_get_tx_timestamp_status, .get_timestamp = ndesc_get_timestamp, .get_rx_timestamp_status = ndesc_get_rx_timestamp_status, + .display_ring = ndesc_display_ring, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 8bbab97895fe..59ae6088cd22 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -24,7 +24,7 @@ #define __STMMAC_H__ #define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "Oct_2015" +#define DRV_MODULE_VERSION "Jan_2016" #include <linux/clk.h> #include <linux/stmmac.h> @@ -67,6 +67,7 @@ struct stmmac_priv { spinlock_t tx_lock; bool tx_path_in_lpi_mode; struct timer_list txtimer; + bool tso; struct dma_desc *dma_rx ____cacheline_aligned_in_smp; struct dma_extended_desc *dma_erx; @@ -128,6 +129,10 @@ struct stmmac_priv { int use_riwt; int irq_wake; spinlock_t ptp_lock; + void __iomem *mmcaddr; + u32 rx_tail_addr; + u32 tx_tail_addr; + u32 mss; #ifdef CONFIG_DEBUG_FS struct dentry *dbgfs_dir; @@ -143,9 +148,9 @@ void stmmac_set_ethtool_ops(struct net_device *netdev); int stmmac_ptp_register(struct stmmac_priv *priv); void stmmac_ptp_unregister(struct stmmac_priv *priv); -int stmmac_resume(struct net_device *ndev); -int stmmac_suspend(struct net_device *ndev); -int stmmac_dvr_remove(struct net_device *ndev); +int stmmac_resume(struct device *dev); +int stmmac_suspend(struct device *dev); +int stmmac_dvr_remove(struct device *dev); int stmmac_dvr_probe(struct device *device, struct plat_stmmacenet_data *plat_dat, struct stmmac_resources *res); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 3c7928edfebb..e2b98b01647e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -161,6 +161,9 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(mtl_rx_fifo_ctrl_active), STMMAC_STAT(mac_rx_frame_ctrl_fifo), STMMAC_STAT(mac_gmii_rx_proto_engine), + /* TSO */ + STMMAC_STAT(tx_tso_frames), + STMMAC_STAT(tx_tso_nfrags), }; #define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) @@ -499,14 +502,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, int i, j = 0; /* Update the DMA HW counters for dwmac10/100 */ - if (!priv->plat->has_gmac) + if (priv->hw->dma->dma_diagnostic_fr) priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats, priv->ioaddr); else { /* If supported, for new GMAC chips expose the MMC counters */ if (priv->dma_cap.rmon) { - dwmac_mmc_read(priv->ioaddr, &priv->mmc); + dwmac_mmc_read(priv->mmcaddr, &priv->mmc); for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) { char *p; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 78464fa7fe1f..eac45d0c75e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -56,6 +56,7 @@ #include "dwmac1000.h" #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) +#define TSO_MAX_BUFF_SIZE (SZ_16K - 1) /* Module parameters */ #define TX_TIMEO 5000 @@ -288,10 +289,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv) (priv->pcs == STMMAC_PCS_RTBI)) goto out; - /* Never init EEE in case of a switch is attached */ - if (priv->phydev->is_pseudo_fixed_link) - goto out; - /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { int tx_lpi_timer = priv->tx_lpi_timer; @@ -725,13 +722,15 @@ static void stmmac_adjust_link(struct net_device *dev) new_state = 1; switch (phydev->speed) { case 1000: - if (likely(priv->plat->has_gmac)) + if (likely((priv->plat->has_gmac) || + (priv->plat->has_gmac4))) ctrl &= ~priv->hw->link.port; stmmac_hw_fix_mac_speed(priv); break; case 100: case 10: - if (priv->plat->has_gmac) { + if (likely((priv->plat->has_gmac) || + (priv->plat->has_gmac4))) { ctrl |= priv->hw->link.port; if (phydev->speed == SPEED_100) { ctrl |= priv->hw->link.speed; @@ -771,10 +770,16 @@ static void stmmac_adjust_link(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); - /* At this stage, it could be needed to setup the EEE or adjust some - * MAC related HW registers. - */ - priv->eee_enabled = stmmac_eee_init(priv); + if (phydev->is_pseudo_fixed_link) + /* Stop PHY layer to call the hook to adjust the link in case + * of a switch is attached to the stmmac driver. + */ + phydev->irq = PHY_IGNORE_INTERRUPT; + else + /* At this stage, init the EEE if supported. + * Never called in case of fixed_link. + */ + priv->eee_enabled = stmmac_eee_init(priv); } /** @@ -865,10 +870,6 @@ static int stmmac_init_phy(struct net_device *dev) return -ENODEV; } - /* If attached to a switch, there is no reason to poll phy handler */ - if (phydev->is_pseudo_fixed_link) - phydev->irq = PHY_IGNORE_INTERRUPT; - pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); @@ -877,53 +878,22 @@ static int stmmac_init_phy(struct net_device *dev) return 0; } -/** - * stmmac_display_ring - display ring - * @head: pointer to the head of the ring passed. - * @size: size of the ring. - * @extend_desc: to verify if extended descriptors are used. - * Description: display the control/status and buffer descriptors. - */ -static void stmmac_display_ring(void *head, int size, int extend_desc) -{ - int i; - struct dma_extended_desc *ep = (struct dma_extended_desc *)head; - struct dma_desc *p = (struct dma_desc *)head; - - for (i = 0; i < size; i++) { - u64 x; - if (extend_desc) { - x = *(u64 *) ep; - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", - i, (unsigned int)virt_to_phys(ep), - (unsigned int)x, (unsigned int)(x >> 32), - ep->basic.des2, ep->basic.des3); - ep++; - } else { - x = *(u64 *) p; - pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x", - i, (unsigned int)virt_to_phys(p), - (unsigned int)x, (unsigned int)(x >> 32), - p->des2, p->des3); - p++; - } - pr_info("\n"); - } -} - static void stmmac_display_rings(struct stmmac_priv *priv) { + void *head_rx, *head_tx; + if (priv->extend_desc) { - pr_info("Extended RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1); - pr_info("Extended TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1); + head_rx = (void *)priv->dma_erx; + head_tx = (void *)priv->dma_etx; } else { - pr_info("RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0); - pr_info("TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0); + head_rx = (void *)priv->dma_rx; + head_tx = (void *)priv->dma_tx; } + + /* Display Rx ring */ + priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); + /* Display Tx ring */ + priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); } static int stmmac_set_bfsize(int mtu, int bufsize) @@ -1002,7 +972,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, return -EINVAL; } - p->des2 = priv->rx_skbuff_dma[i]; + if (priv->synopsys_id >= DWMAC_CORE_4_00) + p->des0 = priv->rx_skbuff_dma[i]; + else + p->des2 = priv->rx_skbuff_dma[i]; if ((priv->hw->mode->init_desc3) && (priv->dma_buf_sz == BUF_SIZE_16KiB)) @@ -1093,7 +1066,16 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) p = &((priv->dma_etx + i)->basic); else p = priv->dma_tx + i; - p->des2 = 0; + + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + p->des0 = 0; + p->des1 = 0; + p->des2 = 0; + p->des3 = 0; + } else { + p->des2 = 0; + } + priv->tx_skbuff_dma[i].buf = 0; priv->tx_skbuff_dma[i].map_as_page = false; priv->tx_skbuff_dma[i].len = 0; @@ -1356,9 +1338,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry].buf = 0; + priv->tx_skbuff_dma[entry].len = 0; priv->tx_skbuff_dma[entry].map_as_page = false; } - priv->hw->mode->clean_desc3(priv, p); + + if (priv->hw->mode->clean_desc3) + priv->hw->mode->clean_desc3(priv, p); + priv->tx_skbuff_dma[entry].last_segment = false; priv->tx_skbuff_dma[entry].is_jumbo = false; @@ -1481,41 +1467,23 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv) static void stmmac_mmc_setup(struct stmmac_priv *priv) { unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | - MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; + MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; - dwmac_mmc_intr_all_mask(priv->ioaddr); + if (priv->synopsys_id >= DWMAC_CORE_4_00) + priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; + else + priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; + + dwmac_mmc_intr_all_mask(priv->mmcaddr); if (priv->dma_cap.rmon) { - dwmac_mmc_ctrl(priv->ioaddr, mode); + dwmac_mmc_ctrl(priv->mmcaddr, mode); memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); } else pr_info(" No MAC Management Counters available\n"); } /** - * stmmac_get_synopsys_id - return the SYINID. - * @priv: driver private structure - * Description: this simple function is to decode and return the SYINID - * starting from the HW core register. - */ -static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv) -{ - u32 hwid = priv->hw->synopsys_uid; - - /* Check Synopsys Id (not available on old chips) */ - if (likely(hwid)) { - u32 uid = ((hwid & 0x0000ff00) >> 8); - u32 synid = (hwid & 0x000000ff); - - pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n", - uid, synid); - - return synid; - } - return 0; -} - -/** * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors * @priv: driver private structure * Description: select the Enhanced/Alternate or Normal descriptors. @@ -1552,51 +1520,15 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv) */ static int stmmac_get_hw_features(struct stmmac_priv *priv) { - u32 hw_cap = 0; + u32 ret = 0; if (priv->hw->dma->get_hw_feature) { - hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr); - - priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL); - priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1; - priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2; - priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4; - priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5; - priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6; - priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8; - priv->dma_cap.pmt_remote_wake_up = - (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9; - priv->dma_cap.pmt_magic_frame = - (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10; - /* MMC */ - priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11; - /* IEEE 1588-2002 */ - priv->dma_cap.time_stamp = - (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12; - /* IEEE 1588-2008 */ - priv->dma_cap.atime_stamp = - (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13; - /* 802.3az - Energy-Efficient Ethernet (EEE) */ - priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14; - priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15; - /* TX and RX csum */ - priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16; - priv->dma_cap.rx_coe_type1 = - (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17; - priv->dma_cap.rx_coe_type2 = - (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18; - priv->dma_cap.rxfifo_over_2048 = - (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19; - /* TX and RX number of channels */ - priv->dma_cap.number_rx_channel = - (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20; - priv->dma_cap.number_tx_channel = - (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22; - /* Alternate (enhanced) DESC mode */ - priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24; - } - - return hw_cap; + priv->hw->dma->get_hw_feature(priv->ioaddr, + &priv->dma_cap); + ret = 1; + } + + return ret; } /** @@ -1652,8 +1584,19 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); - if ((priv->synopsys_id >= DWMAC_CORE_3_50) && - (priv->plat->axi && priv->hw->dma->axi)) + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->rx_tail_addr = priv->dma_rx_phy + + (DMA_RX_SIZE * sizeof(struct dma_desc)); + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr, + STMMAC_CHAN0); + + priv->tx_tail_addr = priv->dma_tx_phy + + (DMA_TX_SIZE * sizeof(struct dma_desc)); + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, + STMMAC_CHAN0); + } + + if (priv->plat->axi && priv->hw->dma->axi) priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); return ret; @@ -1733,7 +1676,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) } /* Enable the MAC Rx/Tx */ - stmmac_set_mac(priv->ioaddr, true); + if (priv->synopsys_id >= DWMAC_CORE_4_00) + stmmac_dwmac4_set_mac(priv->ioaddr, true); + else + stmmac_set_mac(priv->ioaddr, true); /* Set the HW DMA mode and the COE */ stmmac_dma_operation_mode(priv); @@ -1771,6 +1717,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) if (priv->pcs && priv->hw->mac->ctrl_ane) priv->hw->mac->ctrl_ane(priv->hw, 0); + /* set TX ring length */ + if (priv->hw->dma->set_tx_ring_len) + priv->hw->dma->set_tx_ring_len(priv->ioaddr, + (DMA_TX_SIZE - 1)); + /* set RX ring length */ + if (priv->hw->dma->set_rx_ring_len) + priv->hw->dma->set_rx_ring_len(priv->ioaddr, + (DMA_RX_SIZE - 1)); + /* Enable TSO */ + if (priv->tso) + priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0); + return 0; } @@ -1936,6 +1894,239 @@ static int stmmac_release(struct net_device *dev) } /** + * stmmac_tso_allocator - close entry point of the driver + * @priv: driver private structure + * @des: buffer start address + * @total_len: total length to fill in descriptors + * @last_segmant: condition for the last descriptor + * Description: + * This function fills descriptor and request new descriptors according to + * buffer length to fill + */ +static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, + int total_len, bool last_segment) +{ + struct dma_desc *desc; + int tmp_len; + u32 buff_size; + + tmp_len = total_len; + + while (tmp_len > 0) { + priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + desc = priv->dma_tx + priv->cur_tx; + + desc->des0 = des + (total_len - tmp_len); + buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? + TSO_MAX_BUFF_SIZE : tmp_len; + + priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, + 0, 1, + (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), + 0, 0); + + tmp_len -= TSO_MAX_BUFF_SIZE; + } +} + +/** + * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) + * @skb : the socket buffer + * @dev : device pointer + * Description: this is the transmit function that is called on TSO frames + * (support available on GMAC4 and newer chips). + * Diagram below show the ring programming in case of TSO frames: + * + * First Descriptor + * -------- + * | DES0 |---> buffer1 = L2/L3/L4 header + * | DES1 |---> TCP Payload (can continue on next descr...) + * | DES2 |---> buffer 1 and 2 len + * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] + * -------- + * | + * ... + * | + * -------- + * | DES0 | --| Split TCP Payload on Buffers 1 and 2 + * | DES1 | --| + * | DES2 | --> buffer 1 and 2 len + * | DES3 | + * -------- + * + * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. + */ +static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) +{ + u32 pay_len, mss; + int tmp_pay_len = 0; + struct stmmac_priv *priv = netdev_priv(dev); + int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int first_entry, des; + struct dma_desc *desc, *first, *mss_desc = NULL; + u8 proto_hdr_len; + int i; + + spin_lock(&priv->tx_lock); + + /* Compute header lengths */ + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + + /* Desc availability based on threshold should be enough safe */ + if (unlikely(stmmac_tx_avail(priv) < + (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + pr_err("%s: Tx Ring full when queue awake\n", __func__); + } + spin_unlock(&priv->tx_lock); + return NETDEV_TX_BUSY; + } + + pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ + + mss = skb_shinfo(skb)->gso_size; + + /* set new MSS value if needed */ + if (mss != priv->mss) { + mss_desc = priv->dma_tx + priv->cur_tx; + priv->hw->desc->set_mss(mss_desc, mss); + priv->mss = mss; + priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + } + + if (netif_msg_tx_queued(priv)) { + pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n", + __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss); + pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, + skb->data_len); + } + + first_entry = priv->cur_tx; + + desc = priv->dma_tx + first_entry; + first = desc; + + /* first descriptor: fill Headers on Buf1 */ + des = dma_map_single(priv->device, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) + goto dma_map_err; + + priv->tx_skbuff_dma[first_entry].buf = des; + priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb); + priv->tx_skbuff[first_entry] = skb; + + first->des0 = des; + + /* Fill start of payload in buff2 of first descriptor */ + if (pay_len) + first->des1 = des + proto_hdr_len; + + /* If needed take extra descriptors to fill the remaining payload */ + tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; + + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0)); + + /* Prepare fragments */ + for (i = 0; i < nfrags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + des = skb_frag_dma_map(priv->device, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + + stmmac_tso_allocator(priv, des, skb_frag_size(frag), + (i == nfrags - 1)); + + priv->tx_skbuff_dma[priv->cur_tx].buf = des; + priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag); + priv->tx_skbuff[priv->cur_tx] = NULL; + priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true; + } + + priv->tx_skbuff_dma[priv->cur_tx].last_segment = true; + + priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE); + + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { + if (netif_msg_hw(priv)) + pr_debug("%s: stop transmitted packets\n", __func__); + netif_stop_queue(dev); + } + + dev->stats.tx_bytes += skb->len; + priv->xstats.tx_tso_frames++; + priv->xstats.tx_tso_nfrags += nfrags; + + /* Manage tx mitigation */ + priv->tx_count_frames += nfrags + 1; + if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { + mod_timer(&priv->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer)); + } else { + priv->tx_count_frames = 0; + priv->hw->desc->set_tx_ic(desc); + priv->xstats.tx_set_ic_bit++; + } + + if (!priv->hwts_tx_en) + skb_tx_timestamp(skb); + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->enable_tx_timestamp(first); + } + + /* Complete the first descriptor before granting the DMA */ + priv->hw->desc->prepare_tso_tx_desc(first, 1, + proto_hdr_len, + pay_len, + 1, priv->tx_skbuff_dma[first_entry].last_segment, + tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); + + /* If context desc is used to change MSS */ + if (mss_desc) + priv->hw->desc->set_tx_owner(mss_desc); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + smp_wmb(); + + if (netif_msg_pktdata(priv)) { + pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", + __func__, priv->cur_tx, priv->dirty_tx, first_entry, + priv->cur_tx, first, nfrags); + + priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE, + 0); + + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb_headlen(skb)); + } + + netdev_sent_queue(dev, skb->len); + + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, + STMMAC_CHAN0); + + spin_unlock(&priv->tx_lock); + return NETDEV_TX_OK; + +dma_map_err: + spin_unlock(&priv->tx_lock); + dev_err(priv->device, "Tx dma map failed\n"); + dev_kfree_skb(skb); + priv->dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +/** * stmmac_xmit - Tx entry point of the driver * @skb : the socket buffer * @dev : device pointer @@ -1952,6 +2143,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int entry, first_entry; struct dma_desc *desc, *first; unsigned int enh_desc; + unsigned int des; + + /* Manage oversized TCP frames for GMAC4 device */ + if (skb_is_gso(skb) && priv->tso) { + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + return stmmac_tso_xmit(skb, dev); + } spin_lock(&priv->tx_lock); @@ -1987,7 +2185,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (enh_desc) is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); - if (unlikely(is_jumbo)) { + if (unlikely(is_jumbo) && likely(priv->synopsys_id < + DWMAC_CORE_4_00)) { entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); if (unlikely(entry < 0)) goto dma_map_err; @@ -2005,13 +2204,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) else desc = priv->dma_tx + entry; - desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, - DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, desc->des2)) + des = skb_frag_dma_map(priv->device, frag, 0, len, + DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) goto dma_map_err; /* should reuse desc w/o issues */ priv->tx_skbuff[entry] = NULL; - priv->tx_skbuff_dma[entry].buf = desc->des2; + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { + desc->des0 = des; + priv->tx_skbuff_dma[entry].buf = desc->des0; + } else { + desc->des2 = des; + priv->tx_skbuff_dma[entry].buf = desc->des2; + } + priv->tx_skbuff_dma[entry].map_as_page = true; priv->tx_skbuff_dma[entry].len = len; priv->tx_skbuff_dma[entry].last_segment = last_segment; @@ -2026,16 +2233,18 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->cur_tx = entry; if (netif_msg_pktdata(priv)) { + void *tx_head; + pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", __func__, priv->cur_tx, priv->dirty_tx, first_entry, entry, first, nfrags); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_etx, - DMA_TX_SIZE, 1); + tx_head = (void *)priv->dma_etx; else - stmmac_display_ring((void *)priv->dma_tx, - DMA_TX_SIZE, 0); + tx_head = (void *)priv->dma_tx; + + priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); pr_debug(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); @@ -2074,12 +2283,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!is_jumbo)) { bool last_segment = (nfrags == 0); - first->des2 = dma_map_single(priv->device, skb->data, - nopaged_len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, first->des2)) + des = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, des)) goto dma_map_err; - priv->tx_skbuff_dma[first_entry].buf = first->des2; + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { + first->des0 = des; + priv->tx_skbuff_dma[first_entry].buf = first->des0; + } else { + first->des2 = des; + priv->tx_skbuff_dma[first_entry].buf = first->des2; + } + priv->tx_skbuff_dma[first_entry].len = nopaged_len; priv->tx_skbuff_dma[first_entry].last_segment = last_segment; @@ -2103,7 +2319,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) } netdev_sent_queue(dev, skb->len); - priv->hw->dma->enable_dma_transmission(priv->ioaddr); + + if (priv->synopsys_id < DWMAC_CORE_4_00) + priv->hw->dma->enable_dma_transmission(priv->ioaddr); + else + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr, + STMMAC_CHAN0); spin_unlock(&priv->tx_lock); return NETDEV_TX_OK; @@ -2185,9 +2406,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) dev_kfree_skb(skb); break; } - p->des2 = priv->rx_skbuff_dma[entry]; - priv->hw->mode->refill_desc3(priv, p); + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { + p->des0 = priv->rx_skbuff_dma[entry]; + p->des1 = 0; + } else { + p->des2 = priv->rx_skbuff_dma[entry]; + } + if (priv->hw->mode->refill_desc3) + priv->hw->mode->refill_desc3(priv, p); if (priv->rx_zeroc_thresh > 0) priv->rx_zeroc_thresh--; @@ -2195,9 +2422,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) if (netif_msg_rx_status(priv)) pr_debug("\trefill entry #%d\n", entry); } - wmb(); - priv->hw->desc->set_rx_owner(p); + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); + else + priv->hw->desc->set_rx_owner(p); + wmb(); entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); @@ -2220,13 +2451,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) int coe = priv->hw->rx_csum; if (netif_msg_rx_status(priv)) { + void *rx_head; + pr_debug("%s: descriptor ring:\n", __func__); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_erx, - DMA_RX_SIZE, 1); + rx_head = (void *)priv->dma_erx; else - stmmac_display_ring((void *)priv->dma_rx, - DMA_RX_SIZE, 0); + rx_head = (void *)priv->dma_rx; + + priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); } while (count < limit) { int status; @@ -2276,11 +2509,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) } else { struct sk_buff *skb; int frame_len; + unsigned int des; + + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) + des = p->des0; + else + des = p->des2; frame_len = priv->hw->desc->get_rx_frame_len(p, coe); - /* check if frame_len fits the preallocated memory */ + /* If frame length is greather than skb buffer size + * (preallocated during init) then the packet is + * ignored + */ if (frame_len > priv->dma_buf_sz) { + pr_err("%s: len %d larger than size (%d)\n", + priv->dev->name, frame_len, + priv->dma_buf_sz); priv->dev->stats.rx_length_errors++; break; } @@ -2293,14 +2538,19 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) if (netif_msg_rx_status(priv)) { pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", - p, entry, p->des2); + p, entry, des); if (frame_len > ETH_FRAME_LEN) pr_debug("\tframe size %d, COE: %d\n", frame_len, status); } - if (unlikely((frame_len < priv->rx_copybreak) || - stmmac_rx_threshold_count(priv))) { + /* The zero-copy is always used for all the sizes + * in case of GMAC4 because it needs + * to refill the used descriptors, always. + */ + if (unlikely(!priv->plat->has_gmac4 && + ((frame_len < priv->rx_copybreak) || + stmmac_rx_threshold_count(priv)))) { skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (unlikely(!skb)) { @@ -2452,7 +2702,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) return -EBUSY; } - if (priv->plat->enh_desc) + if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) max_mtu = JUMBO_LEN; else max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); @@ -2466,6 +2716,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu) } dev->mtu = new_mtu; + netdev_update_features(dev); return 0; @@ -2490,6 +2741,14 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) features &= ~NETIF_F_CSUM_MASK; + /* Disable tso if asked by ethtool */ + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + if (features & NETIF_F_TSO) + priv->tso = true; + else + priv->tso = false; + } + return features; } @@ -2536,7 +2795,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) } /* To handle GMAC own interrupts */ - if (priv->plat->has_gmac) { + if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { int status = priv->hw->mac->host_irq_status(priv->hw, &priv->xstats); if (unlikely(status)) { @@ -2545,6 +2804,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id) priv->tx_path_in_lpi_mode = true; if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) priv->tx_path_in_lpi_mode = false; + if (status & CORE_IRQ_MTL_RX_OVERFLOW) + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, + priv->rx_tail_addr, + STMMAC_CHAN0); } } @@ -2617,15 +2880,14 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, x = *(u64 *) ep; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), - (unsigned int)x, (unsigned int)(x >> 32), + ep->basic.des0, ep->basic.des1, ep->basic.des2, ep->basic.des3); ep++; } else { x = *(u64 *) p; seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", i, (unsigned int)virt_to_phys(ep), - (unsigned int)x, (unsigned int)(x >> 32), - p->des2, p->des3); + p->des0, p->des1, p->des2, p->des3); p++; } seq_printf(seq, "\n"); @@ -2708,10 +2970,15 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); seq_printf(seq, "\tChecksum Offload in TX: %s\n", (priv->dma_cap.tx_coe) ? "Y" : "N"); - seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", - (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); - seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", - (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", + (priv->dma_cap.rx_coe) ? "Y" : "N"); + } else { + seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", + (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); + seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", + (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); + } seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); seq_printf(seq, "\tNumber of Additional RX channel: %d\n", @@ -2820,27 +3087,35 @@ static int stmmac_hw_init(struct stmmac_priv *priv) priv->dev->priv_flags |= IFF_UNICAST_FLT; mac = dwmac1000_setup(priv->ioaddr, priv->plat->multicast_filter_bins, - priv->plat->unicast_filter_entries); + priv->plat->unicast_filter_entries, + &priv->synopsys_id); + } else if (priv->plat->has_gmac4) { + priv->dev->priv_flags |= IFF_UNICAST_FLT; + mac = dwmac4_setup(priv->ioaddr, + priv->plat->multicast_filter_bins, + priv->plat->unicast_filter_entries, + &priv->synopsys_id); } else { - mac = dwmac100_setup(priv->ioaddr); + mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id); } if (!mac) return -ENOMEM; priv->hw = mac; - /* Get and dump the chip ID */ - priv->synopsys_id = stmmac_get_synopsys_id(priv); - /* To use the chained or ring mode */ - if (chain_mode) { - priv->hw->mode = &chain_mode_ops; - pr_info(" Chain mode enabled\n"); - priv->mode = STMMAC_CHAIN_MODE; + if (priv->synopsys_id >= DWMAC_CORE_4_00) { + priv->hw->mode = &dwmac4_ring_mode_ops; } else { - priv->hw->mode = &ring_mode_ops; - pr_info(" Ring mode enabled\n"); - priv->mode = STMMAC_RING_MODE; + if (chain_mode) { + priv->hw->mode = &chain_mode_ops; + pr_info(" Chain mode enabled\n"); + priv->mode = STMMAC_CHAIN_MODE; + } else { + priv->hw->mode = &ring_mode_ops; + pr_info(" Ring mode enabled\n"); + priv->mode = STMMAC_RING_MODE; + } } /* Get the HW capability (new GMAC newer than 3.50a) */ @@ -2862,6 +3137,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) else priv->plat->tx_coe = priv->dma_cap.tx_coe; + /* In case of GMAC4 rx_coe is from HW cap register. */ + priv->plat->rx_coe = priv->dma_cap.rx_coe; + if (priv->dma_cap.rx_coe_type2) priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; else if (priv->dma_cap.rx_coe_type1) @@ -2870,13 +3148,17 @@ static int stmmac_hw_init(struct stmmac_priv *priv) } else pr_info(" No HW DMA feature register supported"); - /* To use alternate (extended) or normal descriptor structures */ - stmmac_selec_desc_mode(priv); + /* To use alternate (extended), normal or GMAC4 descriptor structures */ + if (priv->synopsys_id >= DWMAC_CORE_4_00) + priv->hw->desc = &dwmac4_desc_ops; + else + stmmac_selec_desc_mode(priv); if (priv->plat->rx_coe) { priv->hw->rx_csum = priv->plat->rx_coe; - pr_info(" RX Checksum Offload Engine supported (type %d)\n", - priv->plat->rx_coe); + pr_info(" RX Checksum Offload Engine supported\n"); + if (priv->synopsys_id < DWMAC_CORE_4_00) + pr_info("\tCOE Type %d\n", priv->hw->rx_csum); } if (priv->plat->tx_coe) pr_info(" TX Checksum insertion supported\n"); @@ -2886,6 +3168,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv) device_set_wakeup_capable(priv->device, 1); } + if (priv->dma_cap.tsoen) + pr_info(" TSO supported\n"); + return 0; } @@ -2989,6 +3274,12 @@ int stmmac_dvr_probe(struct device *device, ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; + + if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { + ndev->hw_features |= NETIF_F_TSO; + priv->tso = true; + pr_info(" TSO feature enabled\n"); + } ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; ndev->watchdog_timeo = msecs_to_jiffies(watchdog); #ifdef STMMAC_VLAN_TAG_USED @@ -3064,12 +3355,13 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe); /** * stmmac_dvr_remove - * @ndev: net device pointer + * @dev: device pointer * Description: this function resets the TX/RX processes, disables the MAC RX/TX * changes the link status, releases the DMA descriptor rings. */ -int stmmac_dvr_remove(struct net_device *ndev) +int stmmac_dvr_remove(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); pr_info("%s:\n\tremoving driver", __func__); @@ -3095,13 +3387,14 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_remove); /** * stmmac_suspend - suspend callback - * @ndev: net device pointer + * @dev: device pointer * Description: this is the function to suspend the device and it is called * by the platform driver to stop the network queue, release the resources, * program the PMT register (for WoL), clean and release driver resources. */ -int stmmac_suspend(struct net_device *ndev) +int stmmac_suspend(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); unsigned long flags; @@ -3144,12 +3437,13 @@ EXPORT_SYMBOL_GPL(stmmac_suspend); /** * stmmac_resume - resume callback - * @ndev: net device pointer + * @dev: device pointer * Description: when resume this function is invoked to setup the DMA and CORE * in a usable state. */ -int stmmac_resume(struct net_device *ndev) +int stmmac_resume(struct device *dev) { + struct net_device *ndev = dev_get_drvdata(dev); struct stmmac_priv *priv = netdev_priv(ndev); unsigned long flags; @@ -3183,6 +3477,11 @@ int stmmac_resume(struct net_device *ndev) priv->dirty_rx = 0; priv->dirty_tx = 0; priv->cur_tx = 0; + /* reset private mss value to force mss context settings at + * next tso xmit (only used for gmac4). + */ + priv->mss = 0; + stmmac_clear_descriptors(priv); stmmac_hw_setup(ndev, false); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index 06704ca6f9ca..3f83c369f56c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -37,6 +37,18 @@ #define MII_BUSY 0x00000001 #define MII_WRITE 0x00000002 +/* GMAC4 defines */ +#define MII_GMAC4_GOC_SHIFT 2 +#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT) +#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT) + +#define MII_PHY_ADDR_GMAC4_SHIFT 21 +#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21) +#define MII_PHY_REG_GMAC4_SHIFT 16 +#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16) +#define MII_CSR_CLK_GMAC4_SHIFT 8 +#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8) + static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr) { unsigned long curr; @@ -124,6 +136,80 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, } /** + * stmmac_mdio_read_gmac4 + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 25-21 + * @phyreg: MII addr reg bits 20-16 + * Description: it reads data from the MII register of GMAC4 from within + * the phy device. + */ +static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + int data; + u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) & + (MII_PHY_ADDR_GMAC4_MASK)) | + ((phyreg << MII_PHY_REG_GMAC4_SHIFT) & + (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ; + + value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK) + << MII_CSR_CLK_GMAC4_SHIFT); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + writel(value, priv->ioaddr + mii_address); + + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + /* Read the data from the MII data register */ + data = (int)readl(priv->ioaddr + mii_data); + + return data; +} + +/** + * stmmac_mdio_write_gmac4 + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 25-21 + * @phyreg: MII addr reg bits 20-16 + * @phydata: phy data + * Description: it writes the data into the MII register of GMAC4 from within + * the device. + */ +static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned int mii_address = priv->hw->mii.addr; + unsigned int mii_data = priv->hw->mii.data; + + u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) & + (MII_PHY_ADDR_GMAC4_MASK)) | + ((phyreg << MII_PHY_REG_GMAC4_SHIFT) & + (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE; + + value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK) + << MII_CSR_CLK_GMAC4_SHIFT); + + /* Wait until any existing MII operation is complete */ + if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) + return -EBUSY; + + /* Set the MII address register to write */ + writel(phydata, priv->ioaddr + mii_data); + writel(value, priv->ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ + return stmmac_mdio_busy_wait(priv->ioaddr, mii_address); +} + +/** * stmmac_mdio_reset * @bus: points to the mii_bus structure * Description: reset the MII bus @@ -180,9 +266,11 @@ int stmmac_mdio_reset(struct mii_bus *bus) /* This is a workaround for problems with the STE101P PHY. * It doesn't complete its reset until at least one clock cycle - * on MDC, so perform a dummy mdio read. + * on MDC, so perform a dummy mdio read. To be upadted for GMAC4 + * if needed. */ - writel(0, priv->ioaddr + mii_address); + if (!priv->plat->has_gmac4) + writel(0, priv->ioaddr + mii_address); #endif return 0; } @@ -217,8 +305,14 @@ int stmmac_mdio_register(struct net_device *ndev) #endif new_bus->name = "stmmac"; - new_bus->read = &stmmac_mdio_read; - new_bus->write = &stmmac_mdio_write; + if (priv->plat->has_gmac4) { + new_bus->read = &stmmac_mdio_read_gmac4; + new_bus->write = &stmmac_mdio_write_gmac4; + } else { + new_bus->read = &stmmac_mdio_read; + new_bus->write = &stmmac_mdio_write; + } + new_bus->reset = &stmmac_mdio_reset; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index ae4388735b7f..56c8a2342c14 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -231,30 +231,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev, */ static void stmmac_pci_remove(struct pci_dev *pdev) { - struct net_device *ndev = pci_get_drvdata(pdev); - - stmmac_dvr_remove(ndev); -} - -#ifdef CONFIG_PM_SLEEP -static int stmmac_pci_suspend(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); - - return stmmac_suspend(ndev); -} - -static int stmmac_pci_resume(struct device *dev) -{ - struct pci_dev *pdev = to_pci_dev(dev); - struct net_device *ndev = pci_get_drvdata(pdev); - - return stmmac_resume(ndev); + stmmac_dvr_remove(&pdev->dev); } -#endif -static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume); +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume); #define STMMAC_VENDOR_ID 0x700 #define STMMAC_QUARK_ID 0x0937 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index cf37ea558ecc..409db913b117 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -284,6 +284,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) plat->pmt = 1; } + if (of_device_is_compatible(np, "snps,dwmac-4.00") || + of_device_is_compatible(np, "snps,dwmac-4.10a")) { + plat->has_gmac4 = 1; + plat->pmt = 1; + plat->tso_en = of_property_read_bool(np, "snps,tso"); + } + if (of_device_is_compatible(np, "snps,dwmac-3.610") || of_device_is_compatible(np, "snps,dwmac-3.710")) { plat->enh_desc = 1; @@ -379,7 +386,7 @@ int stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); - int ret = stmmac_dvr_remove(ndev); + int ret = stmmac_dvr_remove(&pdev->dev); if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv); @@ -403,7 +410,7 @@ static int stmmac_pltfr_suspend(struct device *dev) struct stmmac_priv *priv = netdev_priv(ndev); struct platform_device *pdev = to_platform_device(dev); - ret = stmmac_suspend(ndev); + ret = stmmac_suspend(dev); if (priv->plat->exit) priv->plat->exit(pdev, priv->plat->bsp_priv); @@ -426,7 +433,7 @@ static int stmmac_pltfr_resume(struct device *dev) if (priv->plat->init) priv->plat->init(pdev, priv->plat->bsp_priv); - return stmmac_resume(ndev); + return stmmac_resume(dev); } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 9cc45649f477..a2371aa14a49 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6431,7 +6431,7 @@ static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void niu_netif_stop(struct niu *np) { - np->dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(np->dev); /* prevent tx timeout */ niu_disable_napi(np); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 2437227712dc..d6ad0fbd054e 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -226,7 +226,7 @@ static void gem_put_cell(struct gem *gp) static inline void gem_netif_stop(struct gem *gp) { - gp->dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(gp->dev); /* prevent tx timeout */ napi_disable(&gp->napi); netif_tx_disable(gp->dev); } diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index af11ed1e0bcc..158213cd6cdd 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -949,7 +949,7 @@ static void dwceqos_adjust_link(struct net_device *ndev) if (status_change) { if (phydev->link) { - lp->ndev->trans_start = jiffies; + netif_trans_update(lp->ndev); dwceqos_link_up(lp); } else { dwceqos_link_down(lp); @@ -2203,7 +2203,7 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) netdev_sent_queue(ndev, skb->len); spin_unlock_bh(&lp->tx_lock); - ndev->trans_start = jiffies; + netif_trans_update(ndev); return 0; tx_error: diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 14c9d1baa85c..7452b5f9d024 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -1610,7 +1610,6 @@ static inline int bdx_tx_space(struct bdx_priv *priv) * o NETDEV_TX_BUSY Cannot transmit packet, try later * Usually a bug, means queue start/stop flow control is broken in * the driver. Note: the driver must NOT put the skb in its DMA ring. - * o NETDEV_TX_LOCKED Locking failed, please retry quickly. */ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev) @@ -1630,12 +1629,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, ENTER; local_irq_save(flags); - if (!spin_trylock(&priv->tx_lock)) { - local_irq_restore(flags); - DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", - BDX_DRV_NAME, ndev->name); - return NETDEV_TX_LOCKED; - } + spin_lock(&priv->tx_lock); /* build tx descriptor */ BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ @@ -1707,7 +1701,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, #endif #ifdef BDX_LLTX - ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ + netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */ #endif ndev->stats.tx_packets++; ndev->stats.tx_bytes += skb->len; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 42fdfd4d9d4f..4b08a2f52b3e 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -367,7 +367,6 @@ struct cpsw_priv { spinlock_t lock; struct platform_device *pdev; struct net_device *ndev; - struct device_node *phy_node; struct napi_struct napi_rx; struct napi_struct napi_tx; struct device *dev; @@ -381,7 +380,6 @@ struct cpsw_priv { u32 coal_intvl; u32 bus_freq_mhz; int rx_packet_max; - int host_port; struct clk *clk; u8 mac_addr[ETH_ALEN]; struct cpsw_slave *slaves; @@ -531,21 +529,18 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = { int slave_port = cpsw_get_slave_port(priv, \ slave->slave_num); \ cpsw_ale_add_mcast(priv->ale, addr, \ - 1 << slave_port | 1 << priv->host_port, \ + 1 << slave_port | ALE_PORT_HOST, \ ALE_VLAN, slave->port_vlan, 0); \ } else { \ cpsw_ale_add_mcast(priv->ale, addr, \ - ALE_ALL_PORTS << priv->host_port, \ + ALE_ALL_PORTS, \ 0, 0, 0); \ } \ } while (0) static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num) { - if (priv->host_port == 0) - return slave_num + 1; - else - return slave_num; + return slave_num + 1; } static void cpsw_set_promiscious(struct net_device *ndev, bool enable) @@ -602,8 +597,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1); /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << - priv->host_port, -1); + cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); @@ -648,8 +642,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, - vid); + cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid); if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; @@ -1092,7 +1085,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( struct cpsw_priv *priv, struct cpsw_slave *slave, u32 slave_port) { - u32 port_mask = 1 << slave_port | 1 << priv->host_port; + u32 port_mask = 1 << slave_port | ALE_PORT_HOST; if (priv->version == CPSW_VERSION_1) slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN); @@ -1103,7 +1096,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, slave->port_vlan, 0); cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan); + HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan); } static void soft_reset_slave(struct cpsw_slave *slave) @@ -1148,31 +1141,39 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1 << slave_port, 0, 0, ALE_MCAST_FWD_2); - if (priv->phy_node) - slave->phy = of_phy_connect(priv->ndev, priv->phy_node, + if (slave->data->phy_node) { + slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node, &cpsw_adjust_link, 0, slave->data->phy_if); - else + if (!slave->phy) { + dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", + slave->data->phy_node->full_name, + slave->slave_num); + return; + } + } else { slave->phy = phy_connect(priv->ndev, slave->data->phy_id, &cpsw_adjust_link, slave->data->phy_if); - if (IS_ERR(slave->phy)) { - dev_err(priv->dev, "phy %s not found on slave %d\n", - slave->data->phy_id, slave->slave_num); - slave->phy = NULL; - } else { - phy_attached_info(slave->phy); + if (IS_ERR(slave->phy)) { + dev_err(priv->dev, + "phy \"%s\" not found on slave %d, err %ld\n", + slave->data->phy_id, slave->slave_num, + PTR_ERR(slave->phy)); + slave->phy = NULL; + return; + } + } - phy_start(slave->phy); + phy_attached_info(slave->phy); - /* Configure GMII_SEL register */ - cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, - slave->slave_num); - } + phy_start(slave->phy); + + /* Configure GMII_SEL register */ + cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num); } static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) { const int vlan = priv->data.default_vlan; - const int port = priv->host_port; u32 reg; int i; int unreg_mcast_mask; @@ -1190,9 +1191,9 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv) else unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; - cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, - ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, - unreg_mcast_mask << port); + cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS, + ALE_ALL_PORTS, ALE_ALL_PORTS, + unreg_mcast_mask); } static void cpsw_init_host_port(struct cpsw_priv *priv) @@ -1205,7 +1206,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) cpsw_ale_start(priv->ale); /* switch to vlan unaware mode */ - cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE, + cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, CPSW_ALE_VLAN_AWARE); control_reg = readl(&priv->regs->control); control_reg |= CPSW_VLAN_AWARE; @@ -1219,14 +1220,14 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) &priv->host_port_regs->cpdma_tx_pri_map); __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map); - cpsw_ale_control_set(priv->ale, priv->host_port, + cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); if (!priv->data.dual_emac) { - cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, + cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM, 0, 0); cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2); + ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2); } } @@ -1251,12 +1252,12 @@ static int cpsw_ndo_open(struct net_device *ndev) int i, ret; u32 reg; + pm_runtime_get_sync(&priv->pdev->dev); + if (!cpsw_common_res_usage_state(priv)) cpsw_intr_disable(priv); netif_carrier_off(ndev); - pm_runtime_get_sync(&priv->pdev->dev); - reg = priv->version; dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n", @@ -1273,8 +1274,7 @@ static int cpsw_ndo_open(struct net_device *ndev) cpsw_add_default_vlan(priv); else cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan, - ALE_ALL_PORTS << priv->host_port, - ALE_ALL_PORTS << priv->host_port, 0, 0); + ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0); if (!cpsw_common_res_usage_state(priv)) { struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0); @@ -1389,7 +1389,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct cpsw_priv *priv = netdev_priv(ndev); int ret; - ndev->trans_start = jiffies; + netif_trans_update(ndev); if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); @@ -1620,9 +1620,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) flags = ALE_VLAN; } - cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port, + cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM, flags, vid); - cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port, + cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM, flags, vid); memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); @@ -1666,12 +1666,12 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, } ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask, - unreg_mcast_mask << priv->host_port); + unreg_mcast_mask); if (ret != 0) return ret; ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); + HOST_PORT_NUM, ALE_VLAN, vid); if (ret != 0) goto clean_vid; @@ -1683,7 +1683,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, clean_vlan_ucast: cpsw_ale_del_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); + HOST_PORT_NUM, ALE_VLAN, vid); clean_vid: cpsw_ale_del_vlan(priv->ale, vid, 0); return ret; @@ -1738,7 +1738,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, return ret; ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, vid); + HOST_PORT_NUM, ALE_VLAN, vid); if (ret != 0) return ret; @@ -1940,12 +1940,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, slave->port_vlan = data->dual_emac_res_vlan; } -static int cpsw_probe_dt(struct cpsw_priv *priv, +static int cpsw_probe_dt(struct cpsw_platform_data *data, struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct device_node *slave_node; - struct cpsw_platform_data *data = &priv->data; int i = 0, ret; u32 prop; @@ -2033,25 +2032,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv, if (strcmp(slave_node->name, "slave")) continue; - priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0); + slave_data->phy_node = of_parse_phandle(slave_node, + "phy-handle", 0); parp = of_get_property(slave_node, "phy_id", &lenp); - if (of_phy_is_fixed_link(slave_node)) { - struct device_node *phy_node; - struct phy_device *phy_dev; - + if (slave_data->phy_node) { + dev_dbg(&pdev->dev, + "slave[%d] using phy-handle=\"%s\"\n", + i, slave_data->phy_node->full_name); + } else if (of_phy_is_fixed_link(slave_node)) { /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. */ ret = of_phy_register_fixed_link(slave_node); if (ret) return ret; - phy_node = of_node_get(slave_node); - phy_dev = of_phy_find_device(phy_node); - if (!phy_dev) - return -ENODEV; - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, phy_dev->mdio.bus->id, - phy_dev->mdio.addr); + slave_data->phy_node = of_node_get(slave_node); } else if (parp) { u32 phyid; struct device_node *mdio_node; @@ -2072,7 +2067,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv, snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), PHY_ID_FMT, mdio->name, phyid); } else { - dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i); + dev_err(&pdev->dev, + "No slave[%d] phy_id, phy-handle, or fixed-link property\n", + i); goto no_phy_slave; } slave_data->phy_if = of_get_phy_mode(slave_node); @@ -2152,7 +2149,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev, priv_sl2->bus_freq_mhz = priv->bus_freq_mhz; priv_sl2->regs = priv->regs; - priv_sl2->host_port = priv->host_port; priv_sl2->host_port_regs = priv->host_port_regs; priv_sl2->wr_regs = priv->wr_regs; priv_sl2->hw_stats = priv->hw_stats; @@ -2275,7 +2271,7 @@ static int cpsw_probe(struct platform_device *pdev) /* Select default pin state */ pinctrl_pm_select_default_state(&pdev->dev); - if (cpsw_probe_dt(priv, pdev)) { + if (cpsw_probe_dt(&priv->data, pdev)) { dev_err(&pdev->dev, "cpsw: platform data missing\n"); ret = -ENODEV; goto clean_runtime_disable_ret; @@ -2321,7 +2317,6 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_runtime_disable_ret; } priv->regs = ss_regs; - priv->host_port = HOST_PORT_NUM; /* Need to enable clocks with runtime PM api to access module * registers diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h index 442a7038e660..e50afd1b2eda 100644 --- a/drivers/net/ethernet/ti/cpsw.h +++ b/drivers/net/ethernet/ti/cpsw.h @@ -18,6 +18,7 @@ #include <linux/phy.h> struct cpsw_slave_data { + struct device_node *phy_node; char phy_id[MII_BUS_ID_SIZE]; int phy_if; u8 mac_addr[ETH_ALEN]; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 5d9abedd6b75..f56d66e6ec15 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) /* TODO: Add phy read and write and private statistics get feature */ - return phy_mii_ioctl(priv->phydev, ifrq, cmd); + if (priv->phydev) + return phy_mii_ioctl(priv->phydev, ifrq, cmd); + else + return -EOPNOTSUPP; } static int match_first_device(struct device *dev, void *data) @@ -1878,8 +1881,6 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) pdata->hw_ram_addr = auxdata->hw_ram_addr; } - pdev->dev.platform_data = pdata; - return pdata; } @@ -2101,6 +2102,7 @@ static int davinci_emac_remove(struct platform_device *pdev) cpdma_ctlr_destroy(priv->dma); unregister_netdev(ndev); + pm_runtime_disable(&pdev->dev); free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1d0942c53120..32516661f180 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1272,7 +1272,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (ret) goto drop; - ndev->trans_start = jiffies; + netif_trans_update(ndev); /* Check Tx pool count & stop subqueue if needed */ desc_count = knav_pool_count(netcp->tx_pool); @@ -1788,7 +1788,7 @@ static void netcp_ndo_tx_timeout(struct net_device *ndev) dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs); netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size); - ndev->trans_start = jiffies; + netif_trans_update(ndev); netif_tx_wake_all_queues(ndev); } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index a274cd49afe9..561703317312 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -1007,7 +1007,7 @@ static void tlan_tx_timeout(struct net_device *dev) tlan_reset_lists(dev); tlan_read_and_clear_stats(dev, TLAN_IGNORE); tlan_reset_adapter(dev); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 298e059d0498..922a443e3415 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -1883,7 +1883,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) /* Save the timestamp. */ - dev->trans_start = jiffies; + netif_trans_update(dev); #ifdef TILE_NET_PARANOIA @@ -2026,7 +2026,7 @@ static void tile_net_tx_timeout(struct net_device *dev) { PDEBUG("tile_net_tx_timeout()\n"); PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, - jiffies - dev->trans_start); + jiffies - dev_trans_start(dev)); /* XXX: ISSUE: This doesn't seem useful for us. */ netif_wake_queue(dev); diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 13214a6492ac..743b18266a7c 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c @@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl) continue; /* copy hw scan info */ - memcpy(target->hwinfo, scan_info, scan_info->size); + memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size)); target->essid_len = strnlen(scan_info->essid, sizeof(scan_info->essid)); target->rate_len = 0; diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 67610270d171..36a6e8b54d94 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -705,7 +705,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, wmb(); descr->prev->hwdescr->next_descr_addr = descr->bus_addr; - card->netdev->trans_start = jiffies; /* set netdev watchdog timer */ + netif_trans_update(card->netdev); /* set netdev watchdog timer */ return 0; } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 520cf50a3d5a..01a77145a0fa 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1314,7 +1314,8 @@ static int tsi108_open(struct net_device *dev) data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma, GFP_KERNEL); if (!data->txring) { - pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); + pci_free_consistent(NULL, rxring_size, data->rxring, + data->rxdma); return -ENOMEM; } diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 2b7550c43f78..9d14731cdcb1 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -1758,7 +1758,7 @@ static void rhine_reset_task(struct work_struct *work) spin_unlock_bh(&rp->lock); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ dev->stats.tx_errors++; netif_wake_queue(dev); diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig index f98b91d21f33..1981e88c18dc 100644 --- a/drivers/net/ethernet/wiznet/Kconfig +++ b/drivers/net/ethernet/wiznet/Kconfig @@ -69,4 +69,18 @@ config WIZNET_BUS_ANY Performance may decrease compared to explicitly selected bus mode. endchoice +config WIZNET_W5100_SPI + tristate "WIZnet W5100/W5200/W5500 Ethernet support for SPI mode" + depends on WIZNET_BUS_ANY && WIZNET_W5100 + depends on SPI + ---help--- + In SPI mode host system accesses registers using SPI protocol + (mode 0) on the SPI bus. + + Performance decreases compared to other bus interface mode. + In W5100 SPI mode, burst READ/WRITE processing are not provided. + + To compile this driver as a module, choose M here: the module + will be called w5100-spi. + endif # NET_VENDOR_WIZNET diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile index c614535227e8..1e05e1a84208 100644 --- a/drivers/net/ethernet/wiznet/Makefile +++ b/drivers/net/ethernet/wiznet/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_WIZNET_W5100) += w5100.o +obj-$(CONFIG_WIZNET_W5100_SPI) += w5100-spi.o obj-$(CONFIG_WIZNET_W5300) += w5300.o diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c new file mode 100644 index 000000000000..93a2d3c07303 --- /dev/null +++ b/drivers/net/ethernet/wiznet/w5100-spi.c @@ -0,0 +1,466 @@ +/* + * Ethernet driver for the WIZnet W5100/W5200/W5500 chip. + * + * Copyright (C) 2016 Akinobu Mita <akinobu.mita@gmail.com> + * + * Licensed under the GPL-2 or later. + * + * Datasheet: + * http://www.wiznet.co.kr/wp-content/uploads/wiznethome/Chip/W5100/Document/W5100_Datasheet_v1.2.6.pdf + * http://wiznethome.cafe24.com/wp-content/uploads/wiznethome/Chip/W5200/Documents/W5200_DS_V140E.pdf + * http://wizwiki.net/wiki/lib/exe/fetch.php?media=products:w5500:w5500_ds_v106e_141230.pdf + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/of_net.h> +#include <linux/spi/spi.h> + +#include "w5100.h" + +#define W5100_SPI_WRITE_OPCODE 0xf0 +#define W5100_SPI_READ_OPCODE 0x0f + +static int w5100_spi_read(struct net_device *ndev, u32 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[3] = { W5100_SPI_READ_OPCODE, addr >> 8, addr & 0xff }; + u8 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1); + + return ret ? ret : data; +} + +static int w5100_spi_write(struct net_device *ndev, u32 addr, u8 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[4] = { W5100_SPI_WRITE_OPCODE, addr >> 8, addr & 0xff, data}; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5100_spi_read16(struct net_device *ndev, u32 addr) +{ + u16 data; + int ret; + + ret = w5100_spi_read(ndev, addr); + if (ret < 0) + return ret; + data = ret << 8; + ret = w5100_spi_read(ndev, addr + 1); + + return ret < 0 ? ret : data | ret; +} + +static int w5100_spi_write16(struct net_device *ndev, u32 addr, u16 data) +{ + int ret; + + ret = w5100_spi_write(ndev, addr, data >> 8); + if (ret) + return ret; + + return w5100_spi_write(ndev, addr + 1, data & 0xff); +} + +static int w5100_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf, + int len) +{ + int i; + + for (i = 0; i < len; i++) { + int ret = w5100_spi_read(ndev, addr + i); + + if (ret < 0) + return ret; + buf[i] = ret; + } + + return 0; +} + +static int w5100_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf, + int len) +{ + int i; + + for (i = 0; i < len; i++) { + int ret = w5100_spi_write(ndev, addr + i, buf[i]); + + if (ret) + return ret; + } + + return 0; +} + +static const struct w5100_ops w5100_spi_ops = { + .may_sleep = true, + .chip_id = W5100, + .read = w5100_spi_read, + .write = w5100_spi_write, + .read16 = w5100_spi_read16, + .write16 = w5100_spi_write16, + .readbulk = w5100_spi_readbulk, + .writebulk = w5100_spi_writebulk, +}; + +#define W5200_SPI_WRITE_OPCODE 0x80 + +struct w5200_spi_priv { + /* Serialize access to cmd_buf */ + struct mutex cmd_lock; + + /* DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + u8 cmd_buf[4] ____cacheline_aligned; +}; + +static struct w5200_spi_priv *w5200_spi_priv(struct net_device *ndev) +{ + return w5100_ops_priv(ndev); +} + +static int w5200_spi_init(struct net_device *ndev) +{ + struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev); + + mutex_init(&spi_priv->cmd_lock); + + return 0; +} + +static int w5200_spi_read(struct net_device *ndev, u32 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 1 }; + u8 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1); + + return ret ? ret : data; +} + +static int w5200_spi_write(struct net_device *ndev, u32 addr, u8 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[5] = { addr >> 8, addr & 0xff, W5200_SPI_WRITE_OPCODE, 1, data }; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5200_spi_read16(struct net_device *ndev, u32 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 2 }; + __be16 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data)); + + return ret ? ret : be16_to_cpu(data); +} + +static int w5200_spi_write16(struct net_device *ndev, u32 addr, u16 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[6] = { + addr >> 8, addr & 0xff, + W5200_SPI_WRITE_OPCODE, 2, + data >> 8, data & 0xff + }; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5200_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf, + int len) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev); + struct spi_transfer xfer[] = { + { + .tx_buf = spi_priv->cmd_buf, + .len = sizeof(spi_priv->cmd_buf), + }, + { + .rx_buf = buf, + .len = len, + }, + }; + int ret; + + mutex_lock(&spi_priv->cmd_lock); + + spi_priv->cmd_buf[0] = addr >> 8; + spi_priv->cmd_buf[1] = addr; + spi_priv->cmd_buf[2] = len >> 8; + spi_priv->cmd_buf[3] = len; + ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); + + mutex_unlock(&spi_priv->cmd_lock); + + return ret; +} + +static int w5200_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf, + int len) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev); + struct spi_transfer xfer[] = { + { + .tx_buf = spi_priv->cmd_buf, + .len = sizeof(spi_priv->cmd_buf), + }, + { + .tx_buf = buf, + .len = len, + }, + }; + int ret; + + mutex_lock(&spi_priv->cmd_lock); + + spi_priv->cmd_buf[0] = addr >> 8; + spi_priv->cmd_buf[1] = addr; + spi_priv->cmd_buf[2] = W5200_SPI_WRITE_OPCODE | (len >> 8); + spi_priv->cmd_buf[3] = len; + ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); + + mutex_unlock(&spi_priv->cmd_lock); + + return ret; +} + +static const struct w5100_ops w5200_ops = { + .may_sleep = true, + .chip_id = W5200, + .read = w5200_spi_read, + .write = w5200_spi_write, + .read16 = w5200_spi_read16, + .write16 = w5200_spi_write16, + .readbulk = w5200_spi_readbulk, + .writebulk = w5200_spi_writebulk, + .init = w5200_spi_init, +}; + +#define W5500_SPI_BLOCK_SELECT(addr) (((addr) >> 16) & 0x1f) +#define W5500_SPI_READ_CONTROL(addr) (W5500_SPI_BLOCK_SELECT(addr) << 3) +#define W5500_SPI_WRITE_CONTROL(addr) \ + ((W5500_SPI_BLOCK_SELECT(addr) << 3) | BIT(2)) + +struct w5500_spi_priv { + /* Serialize access to cmd_buf */ + struct mutex cmd_lock; + + /* DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + u8 cmd_buf[3] ____cacheline_aligned; +}; + +static struct w5500_spi_priv *w5500_spi_priv(struct net_device *ndev) +{ + return w5100_ops_priv(ndev); +} + +static int w5500_spi_init(struct net_device *ndev) +{ + struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev); + + mutex_init(&spi_priv->cmd_lock); + + return 0; +} + +static int w5500_spi_read(struct net_device *ndev, u32 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[3] = { + addr >> 8, + addr, + W5500_SPI_READ_CONTROL(addr) + }; + u8 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1); + + return ret ? ret : data; +} + +static int w5500_spi_write(struct net_device *ndev, u32 addr, u8 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[4] = { + addr >> 8, + addr, + W5500_SPI_WRITE_CONTROL(addr), + data + }; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5500_spi_read16(struct net_device *ndev, u32 addr) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[3] = { + addr >> 8, + addr, + W5500_SPI_READ_CONTROL(addr) + }; + __be16 data; + int ret; + + ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data)); + + return ret ? ret : be16_to_cpu(data); +} + +static int w5500_spi_write16(struct net_device *ndev, u32 addr, u16 data) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + u8 cmd[5] = { + addr >> 8, + addr, + W5500_SPI_WRITE_CONTROL(addr), + data >> 8, + data + }; + + return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0); +} + +static int w5500_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf, + int len) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev); + struct spi_transfer xfer[] = { + { + .tx_buf = spi_priv->cmd_buf, + .len = sizeof(spi_priv->cmd_buf), + }, + { + .rx_buf = buf, + .len = len, + }, + }; + int ret; + + mutex_lock(&spi_priv->cmd_lock); + + spi_priv->cmd_buf[0] = addr >> 8; + spi_priv->cmd_buf[1] = addr; + spi_priv->cmd_buf[2] = W5500_SPI_READ_CONTROL(addr); + ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); + + mutex_unlock(&spi_priv->cmd_lock); + + return ret; +} + +static int w5500_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf, + int len) +{ + struct spi_device *spi = to_spi_device(ndev->dev.parent); + struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev); + struct spi_transfer xfer[] = { + { + .tx_buf = spi_priv->cmd_buf, + .len = sizeof(spi_priv->cmd_buf), + }, + { + .tx_buf = buf, + .len = len, + }, + }; + int ret; + + mutex_lock(&spi_priv->cmd_lock); + + spi_priv->cmd_buf[0] = addr >> 8; + spi_priv->cmd_buf[1] = addr; + spi_priv->cmd_buf[2] = W5500_SPI_WRITE_CONTROL(addr); + ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); + + mutex_unlock(&spi_priv->cmd_lock); + + return ret; +} + +static const struct w5100_ops w5500_ops = { + .may_sleep = true, + .chip_id = W5500, + .read = w5500_spi_read, + .write = w5500_spi_write, + .read16 = w5500_spi_read16, + .write16 = w5500_spi_write16, + .readbulk = w5500_spi_readbulk, + .writebulk = w5500_spi_writebulk, + .init = w5500_spi_init, +}; + +static int w5100_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + const struct w5100_ops *ops; + int priv_size; + const void *mac = of_get_mac_address(spi->dev.of_node); + + switch (id->driver_data) { + case W5100: + ops = &w5100_spi_ops; + priv_size = 0; + break; + case W5200: + ops = &w5200_ops; + priv_size = sizeof(struct w5200_spi_priv); + break; + case W5500: + ops = &w5500_ops; + priv_size = sizeof(struct w5500_spi_priv); + break; + default: + return -EINVAL; + } + + return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL); +} + +static int w5100_spi_remove(struct spi_device *spi) +{ + return w5100_remove(&spi->dev); +} + +static const struct spi_device_id w5100_spi_ids[] = { + { "w5100", W5100 }, + { "w5200", W5200 }, + { "w5500", W5500 }, + {} +}; +MODULE_DEVICE_TABLE(spi, w5100_spi_ids); + +static struct spi_driver w5100_spi_driver = { + .driver = { + .name = "w5100", + .pm = &w5100_pm_ops, + }, + .probe = w5100_spi_probe, + .remove = w5100_spi_remove, + .id_table = w5100_spi_ids, +}; +module_spi_driver(w5100_spi_driver); + +MODULE_DESCRIPTION("WIZnet W5100/W5200/W5500 Ethernet driver for SPI mode"); +MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 8b282d0b169c..4f6255cf62ce 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -27,6 +27,8 @@ #include <linux/irq.h> #include <linux/gpio.h> +#include "w5100.h" + #define DRV_NAME "w5100" #define DRV_VERSION "2012-04-04" @@ -36,7 +38,7 @@ MODULE_ALIAS("platform:"DRV_NAME); MODULE_LICENSE("GPL"); /* - * Registers + * W5100/W5200/W5500 common registers */ #define W5100_COMMON_REGS 0x0000 #define W5100_MR 0x0000 /* Mode Register */ @@ -46,55 +48,115 @@ MODULE_LICENSE("GPL"); #define MR_IND 0x01 /* Indirect mode */ #define W5100_SHAR 0x0009 /* Source MAC address */ #define W5100_IR 0x0015 /* Interrupt Register */ -#define W5100_IMR 0x0016 /* Interrupt Mask Register */ -#define IR_S0 0x01 /* S0 interrupt */ -#define W5100_RTR 0x0017 /* Retry Time-value Register */ -#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */ -#define W5100_RMSR 0x001a /* Receive Memory Size */ -#define W5100_TMSR 0x001b /* Transmit Memory Size */ #define W5100_COMMON_REGS_LEN 0x0040 -#define W5100_S0_REGS 0x0400 -#define W5100_S0_MR 0x0400 /* S0 Mode Register */ -#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */ -#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */ -#define W5100_S0_CR 0x0401 /* S0 Command Register */ +#define W5100_Sn_MR 0x0000 /* Sn Mode Register */ +#define W5100_Sn_CR 0x0001 /* Sn Command Register */ +#define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */ +#define W5100_Sn_SR 0x0003 /* Sn Status Register */ +#define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */ +#define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */ +#define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */ +#define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */ +#define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */ + +#define S0_REGS(priv) ((priv)->s0_regs) + +#define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR) +#define S0_MR_MACRAW 0x04 /* MAC RAW mode */ +#define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */ +#define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */ +#define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR) #define S0_CR_OPEN 0x01 /* OPEN command */ #define S0_CR_CLOSE 0x10 /* CLOSE command */ #define S0_CR_SEND 0x20 /* SEND command */ #define S0_CR_RECV 0x40 /* RECV command */ -#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */ +#define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR) #define S0_IR_SENDOK 0x10 /* complete sending */ #define S0_IR_RECV 0x04 /* receiving data */ -#define W5100_S0_SR 0x0403 /* S0 Status Register */ +#define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR) #define S0_SR_MACRAW 0x42 /* mac raw mode */ -#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */ -#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */ -#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */ -#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */ -#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */ +#define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR) +#define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD) +#define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR) +#define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR) +#define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD) + #define W5100_S0_REGS_LEN 0x0040 +/* + * W5100 and W5200 common registers + */ +#define W5100_IMR 0x0016 /* Interrupt Mask Register */ +#define IR_S0 0x01 /* S0 interrupt */ +#define W5100_RTR 0x0017 /* Retry Time-value Register */ +#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */ + +/* + * W5100 specific register and memory + */ +#define W5100_RMSR 0x001a /* Receive Memory Size */ +#define W5100_TMSR 0x001b /* Transmit Memory Size */ + +#define W5100_S0_REGS 0x0400 + #define W5100_TX_MEM_START 0x4000 -#define W5100_TX_MEM_END 0x5fff -#define W5100_TX_MEM_MASK 0x1fff +#define W5100_TX_MEM_SIZE 0x2000 #define W5100_RX_MEM_START 0x6000 -#define W5100_RX_MEM_END 0x7fff -#define W5100_RX_MEM_MASK 0x1fff +#define W5100_RX_MEM_SIZE 0x2000 + +/* + * W5200 specific register and memory + */ +#define W5200_S0_REGS 0x4000 + +#define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */ +#define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */ + +#define W5200_TX_MEM_START 0x8000 +#define W5200_TX_MEM_SIZE 0x4000 +#define W5200_RX_MEM_START 0xc000 +#define W5200_RX_MEM_SIZE 0x4000 + +/* + * W5500 specific register and memory + * + * W5500 register and memory are organized by multiple blocks. Each one is + * selected by 16bits offset address and 5bits block select bits. So we + * encode it into 32bits address. (lower 16bits is offset address and + * upper 16bits is block select bits) + */ +#define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */ +#define W5500_RTR 0x0019 /* Retry Time-value Register */ + +#define W5500_S0_REGS 0x10000 + +#define W5500_Sn_RXMEM_SIZE(n) \ + (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */ +#define W5500_Sn_TXMEM_SIZE(n) \ + (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */ + +#define W5500_TX_MEM_START 0x20000 +#define W5500_TX_MEM_SIZE 0x04000 +#define W5500_RX_MEM_START 0x30000 +#define W5500_RX_MEM_SIZE 0x04000 /* * Device driver private data structure */ + struct w5100_priv { - void __iomem *base; - spinlock_t reg_lock; - bool indirect; - u8 (*read)(struct w5100_priv *priv, u16 addr); - void (*write)(struct w5100_priv *priv, u16 addr, u8 data); - u16 (*read16)(struct w5100_priv *priv, u16 addr); - void (*write16)(struct w5100_priv *priv, u16 addr, u16 data); - void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len); - void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len); + const struct w5100_ops *ops; + + /* Socket 0 register offset address */ + u32 s0_regs; + /* Socket 0 TX buffer offset address and size */ + u32 s0_tx_buf; + u16 s0_tx_buf_size; + /* Socket 0 RX buffer offset address and size */ + u32 s0_rx_buf; + u16 s0_rx_buf_size; + int irq; int link_irq; int link_gpio; @@ -103,6 +165,13 @@ struct w5100_priv { struct net_device *ndev; bool promisc; u32 msg_enable; + + struct workqueue_struct *xfer_wq; + struct work_struct rx_work; + struct sk_buff *tx_skb; + struct work_struct tx_work; + struct work_struct setrx_work; + struct work_struct restart_work; }; /************************************************************************ @@ -111,63 +180,122 @@ struct w5100_priv { * ***********************************************************************/ +struct w5100_mmio_priv { + void __iomem *base; + /* Serialize access in indirect address mode */ + spinlock_t reg_lock; +}; + +static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev) +{ + return w5100_ops_priv(dev); +} + +static inline void __iomem *w5100_mmio(struct net_device *ndev) +{ + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); + + return mmio_priv->base; +} + /* * In direct address mode host system can directly access W5100 registers * after mapping to Memory-Mapped I/O space. * * 0x8000 bytes are required for memory space. */ -static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr) +static inline int w5100_read_direct(struct net_device *ndev, u32 addr) { - return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); + return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT)); } -static inline void w5100_write_direct(struct w5100_priv *priv, - u16 addr, u8 data) +static inline int __w5100_write_direct(struct net_device *ndev, u32 addr, + u8 data) { - iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT)); + iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT)); + + return 0; } -static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr) +static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data) +{ + __w5100_write_direct(ndev, addr, data); + mmiowb(); + + return 0; +} + +static int w5100_read16_direct(struct net_device *ndev, u32 addr) { u16 data; - data = w5100_read_direct(priv, addr) << 8; - data |= w5100_read_direct(priv, addr + 1); + data = w5100_read_direct(ndev, addr) << 8; + data |= w5100_read_direct(ndev, addr + 1); return data; } -static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data) +static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data) { - w5100_write_direct(priv, addr, data >> 8); - w5100_write_direct(priv, addr + 1, data); + __w5100_write_direct(ndev, addr, data >> 8); + __w5100_write_direct(ndev, addr + 1, data); + mmiowb(); + + return 0; } -static void w5100_readbuf_direct(struct w5100_priv *priv, - u16 offset, u8 *buf, int len) +static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf, + int len) { - u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK); int i; - for (i = 0; i < len; i++, addr++) { - if (unlikely(addr > W5100_RX_MEM_END)) - addr = W5100_RX_MEM_START; - *buf++ = w5100_read_direct(priv, addr); - } + for (i = 0; i < len; i++, addr++) + *buf++ = w5100_read_direct(ndev, addr); + + return 0; } -static void w5100_writebuf_direct(struct w5100_priv *priv, - u16 offset, u8 *buf, int len) +static int w5100_writebulk_direct(struct net_device *ndev, u32 addr, + const u8 *buf, int len) { - u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK); int i; - for (i = 0; i < len; i++, addr++) { - if (unlikely(addr > W5100_TX_MEM_END)) - addr = W5100_TX_MEM_START; - w5100_write_direct(priv, addr, *buf++); - } + for (i = 0; i < len; i++, addr++) + __w5100_write_direct(ndev, addr, *buf++); + + mmiowb(); + + return 0; } +static int w5100_mmio_init(struct net_device *ndev) +{ + struct platform_device *pdev = to_platform_device(ndev->dev.parent); + struct w5100_priv *priv = netdev_priv(ndev); + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); + struct resource *mem; + + spin_lock_init(&mmio_priv->reg_lock); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(mmio_priv->base)) + return PTR_ERR(mmio_priv->base); + + netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq); + + return 0; +} + +static const struct w5100_ops w5100_mmio_direct_ops = { + .chip_id = W5100, + .read = w5100_read_direct, + .write = w5100_write_direct, + .read16 = w5100_read16_direct, + .write16 = w5100_write16_direct, + .readbulk = w5100_readbulk_direct, + .writebulk = w5100_writebulk_direct, + .init = w5100_mmio_init, +}; + /* * In indirect address mode host system indirectly accesses registers by * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data @@ -179,139 +307,290 @@ static void w5100_writebuf_direct(struct w5100_priv *priv, #define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */ #define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */ -static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr) +static int w5100_read_indirect(struct net_device *ndev, u32 addr) { + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; u8 data; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); - data = w5100_read_direct(priv, W5100_IDM_DR); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + data = w5100_read_direct(ndev, W5100_IDM_DR); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return data; } -static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data) +static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data) { + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); - w5100_write_direct(priv, W5100_IDM_DR, data); - mmiowb(); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + w5100_write_direct(ndev, W5100_IDM_DR, data); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); + + return 0; } -static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr) +static int w5100_read16_indirect(struct net_device *ndev, u32 addr) { + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; u16 data; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); - data = w5100_read_direct(priv, W5100_IDM_DR) << 8; - data |= w5100_read_direct(priv, W5100_IDM_DR); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + data = w5100_read_direct(ndev, W5100_IDM_DR) << 8; + data |= w5100_read_direct(ndev, W5100_IDM_DR); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); return data; } -static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data) +static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data) { + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); - w5100_write_direct(priv, W5100_IDM_DR, data >> 8); - w5100_write_direct(priv, W5100_IDM_DR, data); - mmiowb(); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8); + w5100_write_direct(ndev, W5100_IDM_DR, data); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); + + return 0; } -static void w5100_readbuf_indirect(struct w5100_priv *priv, - u16 offset, u8 *buf, int len) +static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf, + int len) { - u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK); + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; int i; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + + for (i = 0; i < len; i++) + *buf++ = w5100_read_direct(ndev, W5100_IDM_DR); - for (i = 0; i < len; i++, addr++) { - if (unlikely(addr > W5100_RX_MEM_END)) { - addr = W5100_RX_MEM_START; - w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); - } - *buf++ = w5100_read_direct(priv, W5100_IDM_DR); - } mmiowb(); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); + + return 0; } -static void w5100_writebuf_indirect(struct w5100_priv *priv, - u16 offset, u8 *buf, int len) +static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr, + const u8 *buf, int len) { - u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK); + struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev); unsigned long flags; int i; - spin_lock_irqsave(&priv->reg_lock, flags); - w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); + spin_lock_irqsave(&mmio_priv->reg_lock, flags); + w5100_write16_direct(ndev, W5100_IDM_AR, addr); + + for (i = 0; i < len; i++) + __w5100_write_direct(ndev, W5100_IDM_DR, *buf++); - for (i = 0; i < len; i++, addr++) { - if (unlikely(addr > W5100_TX_MEM_END)) { - addr = W5100_TX_MEM_START; - w5100_write16_direct(priv, W5100_IDM_AR, addr); - mmiowb(); - } - w5100_write_direct(priv, W5100_IDM_DR, *buf++); - } mmiowb(); - spin_unlock_irqrestore(&priv->reg_lock, flags); + spin_unlock_irqrestore(&mmio_priv->reg_lock, flags); + + return 0; } +static int w5100_reset_indirect(struct net_device *ndev) +{ + w5100_write_direct(ndev, W5100_MR, MR_RST); + mdelay(5); + w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND); + + return 0; +} + +static const struct w5100_ops w5100_mmio_indirect_ops = { + .chip_id = W5100, + .read = w5100_read_indirect, + .write = w5100_write_indirect, + .read16 = w5100_read16_indirect, + .write16 = w5100_write16_indirect, + .readbulk = w5100_readbulk_indirect, + .writebulk = w5100_writebulk_indirect, + .init = w5100_mmio_init, + .reset = w5100_reset_indirect, +}; + #if defined(CONFIG_WIZNET_BUS_DIRECT) -#define w5100_read w5100_read_direct -#define w5100_write w5100_write_direct -#define w5100_read16 w5100_read16_direct -#define w5100_write16 w5100_write16_direct -#define w5100_readbuf w5100_readbuf_direct -#define w5100_writebuf w5100_writebuf_direct + +static int w5100_read(struct w5100_priv *priv, u32 addr) +{ + return w5100_read_direct(priv->ndev, addr); +} + +static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data) +{ + return w5100_write_direct(priv->ndev, addr, data); +} + +static int w5100_read16(struct w5100_priv *priv, u32 addr) +{ + return w5100_read16_direct(priv->ndev, addr); +} + +static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data) +{ + return w5100_write16_direct(priv->ndev, addr, data); +} + +static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len) +{ + return w5100_readbulk_direct(priv->ndev, addr, buf, len); +} + +static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf, + int len) +{ + return w5100_writebulk_direct(priv->ndev, addr, buf, len); +} #elif defined(CONFIG_WIZNET_BUS_INDIRECT) -#define w5100_read w5100_read_indirect -#define w5100_write w5100_write_indirect -#define w5100_read16 w5100_read16_indirect -#define w5100_write16 w5100_write16_indirect -#define w5100_readbuf w5100_readbuf_indirect -#define w5100_writebuf w5100_writebuf_indirect + +static int w5100_read(struct w5100_priv *priv, u32 addr) +{ + return w5100_read_indirect(priv->ndev, addr); +} + +static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data) +{ + return w5100_write_indirect(priv->ndev, addr, data); +} + +static int w5100_read16(struct w5100_priv *priv, u32 addr) +{ + return w5100_read16_indirect(priv->ndev, addr); +} + +static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data) +{ + return w5100_write16_indirect(priv->ndev, addr, data); +} + +static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len) +{ + return w5100_readbulk_indirect(priv->ndev, addr, buf, len); +} + +static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf, + int len) +{ + return w5100_writebulk_indirect(priv->ndev, addr, buf, len); +} #else /* CONFIG_WIZNET_BUS_ANY */ -#define w5100_read priv->read -#define w5100_write priv->write -#define w5100_read16 priv->read16 -#define w5100_write16 priv->write16 -#define w5100_readbuf priv->readbuf -#define w5100_writebuf priv->writebuf + +static int w5100_read(struct w5100_priv *priv, u32 addr) +{ + return priv->ops->read(priv->ndev, addr); +} + +static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data) +{ + return priv->ops->write(priv->ndev, addr, data); +} + +static int w5100_read16(struct w5100_priv *priv, u32 addr) +{ + return priv->ops->read16(priv->ndev, addr); +} + +static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data) +{ + return priv->ops->write16(priv->ndev, addr, data); +} + +static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len) +{ + return priv->ops->readbulk(priv->ndev, addr, buf, len); +} + +static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf, + int len) +{ + return priv->ops->writebulk(priv->ndev, addr, buf, len); +} + #endif +static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len) +{ + u32 addr; + int remain = 0; + int ret; + const u32 mem_start = priv->s0_rx_buf; + const u16 mem_size = priv->s0_rx_buf_size; + + offset %= mem_size; + addr = mem_start + offset; + + if (offset + len > mem_size) { + remain = (offset + len) % mem_size; + len = mem_size - offset; + } + + ret = w5100_readbulk(priv, addr, buf, len); + if (ret || !remain) + return ret; + + return w5100_readbulk(priv, mem_start, buf + len, remain); +} + +static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf, + int len) +{ + u32 addr; + int ret; + int remain = 0; + const u32 mem_start = priv->s0_tx_buf; + const u16 mem_size = priv->s0_tx_buf_size; + + offset %= mem_size; + addr = mem_start + offset; + + if (offset + len > mem_size) { + remain = (offset + len) % mem_size; + len = mem_size - offset; + } + + ret = w5100_writebulk(priv, addr, buf, len); + if (ret || !remain) + return ret; + + return w5100_writebulk(priv, mem_start, buf + len, remain); +} + +static int w5100_reset(struct w5100_priv *priv) +{ + if (priv->ops->reset) + return priv->ops->reset(priv->ndev); + + w5100_write(priv, W5100_MR, MR_RST); + mdelay(5); + w5100_write(priv, W5100_MR, MR_PB); + + return 0; +} + static int w5100_command(struct w5100_priv *priv, u16 cmd) { - unsigned long timeout = jiffies + msecs_to_jiffies(100); + unsigned long timeout; - w5100_write(priv, W5100_S0_CR, cmd); - mmiowb(); + w5100_write(priv, W5100_S0_CR(priv), cmd); + + timeout = jiffies + msecs_to_jiffies(100); - while (w5100_read(priv, W5100_S0_CR) != 0) { + while (w5100_read(priv, W5100_S0_CR(priv)) != 0) { if (time_after(jiffies, timeout)) return -EIO; cpu_relax(); @@ -323,47 +602,124 @@ static int w5100_command(struct w5100_priv *priv, u16 cmd) static void w5100_write_macaddr(struct w5100_priv *priv) { struct net_device *ndev = priv->ndev; - int i; - for (i = 0; i < ETH_ALEN; i++) - w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]); - mmiowb(); + w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN); } -static void w5100_hw_reset(struct w5100_priv *priv) +static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask) { - w5100_write_direct(priv, W5100_MR, MR_RST); - mmiowb(); - mdelay(5); - w5100_write_direct(priv, W5100_MR, priv->indirect ? - MR_PB | MR_AI | MR_IND : - MR_PB); - mmiowb(); - w5100_write(priv, W5100_IMR, 0); - w5100_write_macaddr(priv); + u32 imr; + + if (priv->ops->chip_id == W5500) + imr = W5500_SIMR; + else + imr = W5100_IMR; + + w5100_write(priv, imr, mask); +} +static void w5100_enable_intr(struct w5100_priv *priv) +{ + w5100_socket_intr_mask(priv, IR_S0); +} + +static void w5100_disable_intr(struct w5100_priv *priv) +{ + w5100_socket_intr_mask(priv, 0); +} + +static void w5100_memory_configure(struct w5100_priv *priv) +{ /* Configure 16K of internal memory * as 8K RX buffer and 8K TX buffer */ w5100_write(priv, W5100_RMSR, 0x03); w5100_write(priv, W5100_TMSR, 0x03); - mmiowb(); +} + +static void w5200_memory_configure(struct w5100_priv *priv) +{ + int i; + + /* Configure internal RX memory as 16K RX buffer and + * internal TX memory as 16K TX buffer + */ + w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10); + w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10); + + for (i = 1; i < 8; i++) { + w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0); + w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0); + } +} + +static void w5500_memory_configure(struct w5100_priv *priv) +{ + int i; + + /* Configure internal RX memory as 16K RX buffer and + * internal TX memory as 16K TX buffer + */ + w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10); + w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10); + + for (i = 1; i < 8; i++) { + w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0); + w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0); + } +} + +static int w5100_hw_reset(struct w5100_priv *priv) +{ + u32 rtr; + + w5100_reset(priv); + + w5100_disable_intr(priv); + w5100_write_macaddr(priv); + + switch (priv->ops->chip_id) { + case W5100: + w5100_memory_configure(priv); + rtr = W5100_RTR; + break; + case W5200: + w5200_memory_configure(priv); + rtr = W5100_RTR; + break; + case W5500: + w5500_memory_configure(priv); + rtr = W5500_RTR; + break; + default: + return -EINVAL; + } + + if (w5100_read16(priv, rtr) != RTR_DEFAULT) + return -ENODEV; + + return 0; } static void w5100_hw_start(struct w5100_priv *priv) { - w5100_write(priv, W5100_S0_MR, priv->promisc ? - S0_MR_MACRAW : S0_MR_MACRAW_MF); - mmiowb(); + u8 mode = S0_MR_MACRAW; + + if (!priv->promisc) { + if (priv->ops->chip_id == W5500) + mode |= W5500_S0_MR_MF; + else + mode |= S0_MR_MF; + } + + w5100_write(priv, W5100_S0_MR(priv), mode); w5100_command(priv, S0_CR_OPEN); - w5100_write(priv, W5100_IMR, IR_S0); - mmiowb(); + w5100_enable_intr(priv); } static void w5100_hw_close(struct w5100_priv *priv) { - w5100_write(priv, W5100_IMR, 0); - mmiowb(); + w5100_disable_intr(priv); w5100_command(priv, S0_CR_CLOSE); } @@ -412,20 +768,17 @@ static int w5100_get_regs_len(struct net_device *ndev) } static void w5100_get_regs(struct net_device *ndev, - struct ethtool_regs *regs, void *_buf) + struct ethtool_regs *regs, void *buf) { struct w5100_priv *priv = netdev_priv(ndev); - u8 *buf = _buf; - u16 i; regs->version = 1; - for (i = 0; i < W5100_COMMON_REGS_LEN; i++) - *buf++ = w5100_read(priv, W5100_COMMON_REGS + i); - for (i = 0; i < W5100_S0_REGS_LEN; i++) - *buf++ = w5100_read(priv, W5100_S0_REGS + i); + w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN); + buf += W5100_COMMON_REGS_LEN; + w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN); } -static void w5100_tx_timeout(struct net_device *ndev) +static void w5100_restart(struct net_device *ndev) { struct w5100_priv *priv = netdev_priv(ndev); @@ -433,74 +786,138 @@ static void w5100_tx_timeout(struct net_device *ndev) w5100_hw_reset(priv); w5100_hw_start(priv); ndev->stats.tx_errors++; - ndev->trans_start = jiffies; + netif_trans_update(ndev); netif_wake_queue(ndev); } -static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) +static void w5100_restart_work(struct work_struct *work) +{ + struct w5100_priv *priv = container_of(work, struct w5100_priv, + restart_work); + + w5100_restart(priv->ndev); +} + +static void w5100_tx_timeout(struct net_device *ndev) { struct w5100_priv *priv = netdev_priv(ndev); - u16 offset; - netif_stop_queue(ndev); + if (priv->ops->may_sleep) + schedule_work(&priv->restart_work); + else + w5100_restart(ndev); +} - offset = w5100_read16(priv, W5100_S0_TX_WR); +static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb) +{ + struct w5100_priv *priv = netdev_priv(ndev); + u16 offset; + + offset = w5100_read16(priv, W5100_S0_TX_WR(priv)); w5100_writebuf(priv, offset, skb->data, skb->len); - w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len); - mmiowb(); + w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len); ndev->stats.tx_bytes += skb->len; ndev->stats.tx_packets++; dev_kfree_skb(skb); w5100_command(priv, S0_CR_SEND); +} + +static void w5100_tx_work(struct work_struct *work) +{ + struct w5100_priv *priv = container_of(work, struct w5100_priv, + tx_work); + struct sk_buff *skb = priv->tx_skb; + + priv->tx_skb = NULL; + + if (WARN_ON(!skb)) + return; + w5100_tx_skb(priv->ndev, skb); +} + +static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) +{ + struct w5100_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + + if (priv->ops->may_sleep) { + WARN_ON(priv->tx_skb); + priv->tx_skb = skb; + queue_work(priv->xfer_wq, &priv->tx_work); + } else { + w5100_tx_skb(ndev, skb); + } return NETDEV_TX_OK; } -static int w5100_napi_poll(struct napi_struct *napi, int budget) +static struct sk_buff *w5100_rx_skb(struct net_device *ndev) { - struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi); - struct net_device *ndev = priv->ndev; + struct w5100_priv *priv = netdev_priv(ndev); struct sk_buff *skb; - int rx_count; u16 rx_len; u16 offset; u8 header[2]; + u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv)); - for (rx_count = 0; rx_count < budget; rx_count++) { - u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR); - if (rx_buf_len == 0) - break; + if (rx_buf_len == 0) + return NULL; - offset = w5100_read16(priv, W5100_S0_RX_RD); - w5100_readbuf(priv, offset, header, 2); - rx_len = get_unaligned_be16(header) - 2; - - skb = netdev_alloc_skb_ip_align(ndev, rx_len); - if (unlikely(!skb)) { - w5100_write16(priv, W5100_S0_RX_RD, - offset + rx_buf_len); - w5100_command(priv, S0_CR_RECV); - ndev->stats.rx_dropped++; - return -ENOMEM; - } + offset = w5100_read16(priv, W5100_S0_RX_RD(priv)); + w5100_readbuf(priv, offset, header, 2); + rx_len = get_unaligned_be16(header) - 2; - skb_put(skb, rx_len); - w5100_readbuf(priv, offset + 2, skb->data, rx_len); - w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len); - mmiowb(); + skb = netdev_alloc_skb_ip_align(ndev, rx_len); + if (unlikely(!skb)) { + w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len); w5100_command(priv, S0_CR_RECV); - skb->protocol = eth_type_trans(skb, ndev); + ndev->stats.rx_dropped++; + return NULL; + } + + skb_put(skb, rx_len); + w5100_readbuf(priv, offset + 2, skb->data, rx_len); + w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len); + w5100_command(priv, S0_CR_RECV); + skb->protocol = eth_type_trans(skb, ndev); + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += rx_len; + + return skb; +} + +static void w5100_rx_work(struct work_struct *work) +{ + struct w5100_priv *priv = container_of(work, struct w5100_priv, + rx_work); + struct sk_buff *skb; + + while ((skb = w5100_rx_skb(priv->ndev))) + netif_rx_ni(skb); + + w5100_enable_intr(priv); +} + +static int w5100_napi_poll(struct napi_struct *napi, int budget) +{ + struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi); + int rx_count; + + for (rx_count = 0; rx_count < budget; rx_count++) { + struct sk_buff *skb = w5100_rx_skb(priv->ndev); - netif_receive_skb(skb); - ndev->stats.rx_packets++; - ndev->stats.rx_bytes += rx_len; + if (skb) + netif_receive_skb(skb); + else + break; } if (rx_count < budget) { napi_complete(napi); - w5100_write(priv, W5100_IMR, IR_S0); - mmiowb(); + w5100_enable_intr(priv); } return rx_count; @@ -511,11 +928,10 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) struct net_device *ndev = ndev_instance; struct w5100_priv *priv = netdev_priv(ndev); - int ir = w5100_read(priv, W5100_S0_IR); + int ir = w5100_read(priv, W5100_S0_IR(priv)); if (!ir) return IRQ_NONE; - w5100_write(priv, W5100_S0_IR, ir); - mmiowb(); + w5100_write(priv, W5100_S0_IR(priv), ir); if (ir & S0_IR_SENDOK) { netif_dbg(priv, tx_done, ndev, "tx done\n"); @@ -523,11 +939,12 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance) } if (ir & S0_IR_RECV) { - if (napi_schedule_prep(&priv->napi)) { - w5100_write(priv, W5100_IMR, 0); - mmiowb(); + w5100_disable_intr(priv); + + if (priv->ops->may_sleep) + queue_work(priv->xfer_wq, &priv->rx_work); + else if (napi_schedule_prep(&priv->napi)) __napi_schedule(&priv->napi); - } } return IRQ_HANDLED; @@ -551,6 +968,14 @@ static irqreturn_t w5100_detect_link(int irq, void *ndev_instance) return IRQ_HANDLED; } +static void w5100_setrx_work(struct work_struct *work) +{ + struct w5100_priv *priv = container_of(work, struct w5100_priv, + setrx_work); + + w5100_hw_start(priv); +} + static void w5100_set_rx_mode(struct net_device *ndev) { struct w5100_priv *priv = netdev_priv(ndev); @@ -558,7 +983,11 @@ static void w5100_set_rx_mode(struct net_device *ndev) if (priv->promisc != set_promisc) { priv->promisc = set_promisc; - w5100_hw_start(priv); + + if (priv->ops->may_sleep) + schedule_work(&priv->setrx_work); + else + w5100_hw_start(priv); } } @@ -620,95 +1049,100 @@ static const struct net_device_ops w5100_netdev_ops = { .ndo_change_mtu = eth_change_mtu, }; -static int w5100_hw_probe(struct platform_device *pdev) +static int w5100_mmio_probe(struct platform_device *pdev) { struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev); - struct net_device *ndev = platform_get_drvdata(pdev); - struct w5100_priv *priv = netdev_priv(ndev); - const char *name = netdev_name(ndev); + const void *mac_addr = NULL; struct resource *mem; - int mem_size; + const struct w5100_ops *ops; int irq; - int ret; - if (data && is_valid_ether_addr(data->mac_addr)) { - memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); - } else { - eth_hw_addr_random(ndev); - } + if (data && is_valid_ether_addr(data->mac_addr)) + mac_addr = data->mac_addr; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - priv->base = devm_ioremap_resource(&pdev->dev, mem); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); - - mem_size = resource_size(mem); - - spin_lock_init(&priv->reg_lock); - priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; - if (priv->indirect) { - priv->read = w5100_read_indirect; - priv->write = w5100_write_indirect; - priv->read16 = w5100_read16_indirect; - priv->write16 = w5100_write16_indirect; - priv->readbuf = w5100_readbuf_indirect; - priv->writebuf = w5100_writebuf_indirect; - } else { - priv->read = w5100_read_direct; - priv->write = w5100_write_direct; - priv->read16 = w5100_read16_direct; - priv->write16 = w5100_write16_direct; - priv->readbuf = w5100_readbuf_direct; - priv->writebuf = w5100_writebuf_direct; - } - - w5100_hw_reset(priv); - if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT) - return -ENODEV; + if (resource_size(mem) < W5100_BUS_DIRECT_SIZE) + ops = &w5100_mmio_indirect_ops; + else + ops = &w5100_mmio_direct_ops; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; - ret = request_irq(irq, w5100_interrupt, - IRQ_TYPE_LEVEL_LOW, name, ndev); - if (ret < 0) - return ret; - priv->irq = irq; - priv->link_gpio = data ? data->link_gpio : -EINVAL; - if (gpio_is_valid(priv->link_gpio)) { - char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL); - if (!link_name) - return -ENOMEM; - snprintf(link_name, 16, "%s-link", name); - priv->link_irq = gpio_to_irq(priv->link_gpio); - if (request_any_context_irq(priv->link_irq, w5100_detect_link, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - link_name, priv->ndev) < 0) - priv->link_gpio = -EINVAL; - } + return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv), + mac_addr, irq, data ? data->link_gpio : -EINVAL); +} - netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq); - return 0; +static int w5100_mmio_remove(struct platform_device *pdev) +{ + return w5100_remove(&pdev->dev); +} + +void *w5100_ops_priv(const struct net_device *ndev) +{ + return netdev_priv(ndev) + + ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN); } +EXPORT_SYMBOL_GPL(w5100_ops_priv); -static int w5100_probe(struct platform_device *pdev) +int w5100_probe(struct device *dev, const struct w5100_ops *ops, + int sizeof_ops_priv, const void *mac_addr, int irq, + int link_gpio) { struct w5100_priv *priv; struct net_device *ndev; int err; + size_t alloc_size; - ndev = alloc_etherdev(sizeof(*priv)); + alloc_size = sizeof(*priv); + if (sizeof_ops_priv) { + alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); + alloc_size += sizeof_ops_priv; + } + alloc_size += NETDEV_ALIGN - 1; + + ndev = alloc_etherdev(alloc_size); if (!ndev) return -ENOMEM; - SET_NETDEV_DEV(ndev, &pdev->dev); - platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + dev_set_drvdata(dev, ndev); priv = netdev_priv(ndev); + + switch (ops->chip_id) { + case W5100: + priv->s0_regs = W5100_S0_REGS; + priv->s0_tx_buf = W5100_TX_MEM_START; + priv->s0_tx_buf_size = W5100_TX_MEM_SIZE; + priv->s0_rx_buf = W5100_RX_MEM_START; + priv->s0_rx_buf_size = W5100_RX_MEM_SIZE; + break; + case W5200: + priv->s0_regs = W5200_S0_REGS; + priv->s0_tx_buf = W5200_TX_MEM_START; + priv->s0_tx_buf_size = W5200_TX_MEM_SIZE; + priv->s0_rx_buf = W5200_RX_MEM_START; + priv->s0_rx_buf_size = W5200_RX_MEM_SIZE; + break; + case W5500: + priv->s0_regs = W5500_S0_REGS; + priv->s0_tx_buf = W5500_TX_MEM_START; + priv->s0_tx_buf_size = W5500_TX_MEM_SIZE; + priv->s0_rx_buf = W5500_RX_MEM_START; + priv->s0_rx_buf_size = W5500_RX_MEM_SIZE; + break; + default: + err = -EINVAL; + goto err_register; + } + priv->ndev = ndev; + priv->ops = ops; + priv->irq = irq; + priv->link_gpio = link_gpio; ndev->netdev_ops = &w5100_netdev_ops; ndev->ethtool_ops = &w5100_ethtool_ops; - ndev->watchdog_timeo = HZ; netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16); /* This chip doesn't support VLAN packets with normal MTU, @@ -720,22 +1154,76 @@ static int w5100_probe(struct platform_device *pdev) if (err < 0) goto err_register; - err = w5100_hw_probe(pdev); - if (err < 0) - goto err_hw_probe; + priv->xfer_wq = create_workqueue(netdev_name(ndev)); + if (!priv->xfer_wq) { + err = -ENOMEM; + goto err_wq; + } + + INIT_WORK(&priv->rx_work, w5100_rx_work); + INIT_WORK(&priv->tx_work, w5100_tx_work); + INIT_WORK(&priv->setrx_work, w5100_setrx_work); + INIT_WORK(&priv->restart_work, w5100_restart_work); + + if (mac_addr) + memcpy(ndev->dev_addr, mac_addr, ETH_ALEN); + else + eth_hw_addr_random(ndev); + + if (priv->ops->init) { + err = priv->ops->init(priv->ndev); + if (err) + goto err_hw; + } + + err = w5100_hw_reset(priv); + if (err) + goto err_hw; + + if (ops->may_sleep) { + err = request_threaded_irq(priv->irq, NULL, w5100_interrupt, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + netdev_name(ndev), ndev); + } else { + err = request_irq(priv->irq, w5100_interrupt, + IRQF_TRIGGER_LOW, netdev_name(ndev), ndev); + } + if (err) + goto err_hw; + + if (gpio_is_valid(priv->link_gpio)) { + char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL); + + if (!link_name) { + err = -ENOMEM; + goto err_gpio; + } + snprintf(link_name, 16, "%s-link", netdev_name(ndev)); + priv->link_irq = gpio_to_irq(priv->link_gpio); + if (request_any_context_irq(priv->link_irq, w5100_detect_link, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + link_name, priv->ndev) < 0) + priv->link_gpio = -EINVAL; + } return 0; -err_hw_probe: +err_gpio: + free_irq(priv->irq, ndev); +err_hw: + destroy_workqueue(priv->xfer_wq); +err_wq: unregister_netdev(ndev); err_register: free_netdev(ndev); return err; } +EXPORT_SYMBOL_GPL(w5100_probe); -static int w5100_remove(struct platform_device *pdev) +int w5100_remove(struct device *dev) { - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct w5100_priv *priv = netdev_priv(ndev); w5100_hw_reset(priv); @@ -743,16 +1231,21 @@ static int w5100_remove(struct platform_device *pdev) if (gpio_is_valid(priv->link_gpio)) free_irq(priv->link_irq, ndev); + flush_work(&priv->setrx_work); + flush_work(&priv->restart_work); + flush_workqueue(priv->xfer_wq); + destroy_workqueue(priv->xfer_wq); + unregister_netdev(ndev); free_netdev(ndev); return 0; } +EXPORT_SYMBOL_GPL(w5100_remove); #ifdef CONFIG_PM_SLEEP static int w5100_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct w5100_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { @@ -766,8 +1259,7 @@ static int w5100_suspend(struct device *dev) static int w5100_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct net_device *ndev = platform_get_drvdata(pdev); + struct net_device *ndev = dev_get_drvdata(dev); struct w5100_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { @@ -783,15 +1275,15 @@ static int w5100_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); +SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); +EXPORT_SYMBOL_GPL(w5100_pm_ops); -static struct platform_driver w5100_driver = { +static struct platform_driver w5100_mmio_driver = { .driver = { .name = DRV_NAME, .pm = &w5100_pm_ops, }, - .probe = w5100_probe, - .remove = w5100_remove, + .probe = w5100_mmio_probe, + .remove = w5100_mmio_remove, }; - -module_platform_driver(w5100_driver); +module_platform_driver(w5100_mmio_driver); diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h new file mode 100644 index 000000000000..17983a3b8d6c --- /dev/null +++ b/drivers/net/ethernet/wiznet/w5100.h @@ -0,0 +1,37 @@ +/* + * Ethernet driver for the WIZnet W5100 chip. + * + * Copyright (C) 2006-2008 WIZnet Co.,Ltd. + * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru> + * + * Licensed under the GPL-2 or later. + */ + +enum { + W5100, + W5200, + W5500, +}; + +struct w5100_ops { + bool may_sleep; + int chip_id; + int (*read)(struct net_device *ndev, u32 addr); + int (*write)(struct net_device *ndev, u32 addr, u8 data); + int (*read16)(struct net_device *ndev, u32 addr); + int (*write16)(struct net_device *ndev, u32 addr, u16 data); + int (*readbulk)(struct net_device *ndev, u32 addr, u8 *buf, int len); + int (*writebulk)(struct net_device *ndev, u32 addr, const u8 *buf, + int len); + int (*reset)(struct net_device *ndev); + int (*init)(struct net_device *ndev); +}; + +void *w5100_ops_priv(const struct net_device *ndev); + +int w5100_probe(struct device *dev, const struct w5100_ops *ops, + int sizeof_ops_priv, const void *mac_addr, int irq, + int link_gpio); +int w5100_remove(struct device *dev); + +extern const struct dev_pm_ops w5100_pm_ops; diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 8da7b930ff59..0b37ce9f28f1 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -362,7 +362,7 @@ static void w5300_tx_timeout(struct net_device *ndev) w5300_hw_reset(priv); w5300_hw_start(priv); ndev->stats.tx_errors++; - ndev->trans_start = jiffies; + netif_trans_update(ndev); netif_wake_queue(ndev); } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 5a1068df7038..739708712022 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -584,7 +584,7 @@ static void temac_device_reset(struct net_device *ndev) dev_err(&ndev->dev, "Error setting TEMAC options\n"); /* Init Driver variable */ - ndev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(ndev); /* prevent tx timeout */ } static void temac_adjust_link(struct net_device *ndev) diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 4684644703cc..8c7f5be51e62 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -508,7 +508,7 @@ static void axienet_device_reset(struct net_device *ndev) axienet_set_multicast_list(ndev); axienet_setoptions(ndev, lp->options); - ndev->trans_start = jiffies; + netif_trans_update(ndev); } /** diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index e324b3092380..3cee84a24815 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -531,7 +531,7 @@ static void xemaclite_tx_timeout(struct net_device *dev) } /* To exclude tx timeout */ - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ /* We're all ready to go. Start the queue */ netif_wake_queue(dev); @@ -563,7 +563,7 @@ static void xemaclite_tx_handler(struct net_device *dev) dev->stats.tx_bytes += lp->deferred_skb->len; dev_kfree_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } } diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index d56f8693202b..7b44968e02e6 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1199,7 +1199,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work) struct net_device *dev = local->dev; /* reset the card */ do_reset(dev,1); - dev->trans_start = jiffies; /* prevent tx timeout */ + netif_trans_update(dev); /* prevent tx timeout */ netif_wake_queue(dev); } |