diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
27 files changed, 1314 insertions, 475 deletions
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f15a8fc6dfc9..f33b25fbca63 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -574,6 +574,34 @@ static int bcm_sysport_set_wol(struct net_device *dev, return 0; } +static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv, + u32 usecs, u32 pkts) +{ + u32 reg; + + reg = rdma_readl(priv, RDMA_MBDONE_INTR); + reg &= ~(RDMA_INTR_THRESH_MASK | + RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); + reg |= pkts; + reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT; + rdma_writel(priv, reg, RDMA_MBDONE_INTR); +} + +static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct bcm_sysport_priv *priv = ring->priv; + u32 reg; + + reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); + reg &= ~(RING_INTR_THRESH_MASK | + RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); + reg |= ec->tx_max_coalesced_frames; + reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << + RING_TIMEOUT_SHIFT; + tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); +} + static int bcm_sysport_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { @@ -589,6 +617,7 @@ static int bcm_sysport_get_coalesce(struct net_device *dev, ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000; ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK; + ec->use_adaptive_rx_coalesce = priv->dim.use_dim; return 0; } @@ -597,8 +626,9 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct net_dim_cq_moder moder; + u32 usecs, pkts; unsigned int i; - u32 reg; /* Base system clock is 125Mhz, DMA timeout is this reference clock * divided by 1024, which yield roughly 8.192 us, our maximum value has @@ -611,26 +641,28 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, return -EINVAL; if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || - (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) + (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) || + ec->use_adaptive_tx_coalesce) return -EINVAL; - for (i = 0; i < dev->num_tx_queues; i++) { - reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i)); - reg &= ~(RING_INTR_THRESH_MASK | - RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); - reg |= ec->tx_max_coalesced_frames; - reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) << - RING_TIMEOUT_SHIFT; - tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i)); + for (i = 0; i < dev->num_tx_queues; i++) + bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec); + + priv->rx_coalesce_usecs = ec->rx_coalesce_usecs; + priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = priv->rx_coalesce_usecs; + pkts = priv->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) { + moder = net_dim_get_def_profile(priv->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; } - reg = rdma_readl(priv, RDMA_MBDONE_INTR); - reg &= ~(RDMA_INTR_THRESH_MASK | - RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT); - reg |= ec->rx_max_coalesced_frames; - reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) << - RDMA_TIMEOUT_SHIFT; - rdma_writel(priv, reg, RDMA_MBDONE_INTR); + priv->dim.use_dim = ec->use_adaptive_rx_coalesce; + + /* Apply desired coalescing parameters */ + bcm_sysport_set_rx_coalesce(priv, usecs, pkts); return 0; } @@ -709,6 +741,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; + unsigned int processed_bytes = 0; struct bcm_sysport_cb *cb; struct sk_buff *skb; unsigned int p_index; @@ -800,6 +833,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, */ skb_pull(skb, sizeof(*rsb) + 2); len -= (sizeof(*rsb) + 2); + processed_bytes += len; /* UniMAC may forward CRC */ if (priv->crc_fwd) { @@ -824,6 +858,9 @@ next: priv->rx_read_ptr = 0; } + priv->dim.packets = processed; + priv->dim.bytes = processed_bytes; + return processed; } @@ -855,10 +892,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { - unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; struct net_device *ndev = priv->netdev; + unsigned int txbds_processed = 0; struct bcm_sysport_cb *cb; + unsigned int txbds_ready; + unsigned int c_index; u32 hw_ind; /* Clear status before servicing to reduce spurious interrupts */ @@ -871,29 +910,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; - ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); - - last_c_index = ring->c_index; - num_tx_cbs = ring->size; - - c_index &= (num_tx_cbs - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; - else - last_tx_cn = num_tx_cbs - last_c_index + c_index; + txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; netif_dbg(priv, tx_done, ndev, - "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", - ring->index, c_index, last_tx_cn, last_c_index); + "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + ring->index, ring->c_index, c_index, txbds_ready); - while (last_tx_cn-- > 0) { - cb = ring->cbs + last_c_index; + while (txbds_processed < txbds_ready) { + cb = &ring->cbs[ring->clean_index]; bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); ring->desc_count++; - last_c_index++; - last_c_index &= (num_tx_cbs - 1); + txbds_processed++; + + if (likely(ring->clean_index < ring->size - 1)) + ring->clean_index++; + else + ring->clean_index = 0; } u64_stats_update_begin(&priv->syncp); @@ -976,6 +1009,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) { struct bcm_sysport_priv *priv = container_of(napi, struct bcm_sysport_priv, napi); + struct net_dim_sample dim_sample; unsigned int work_done = 0; work_done = bcm_sysport_desc_rx(priv, budget); @@ -998,6 +1032,12 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget) intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE); } + if (priv->dim.use_dim) { + net_dim_sample(priv->dim.event_ctr, priv->dim.packets, + priv->dim.bytes, &dim_sample); + net_dim(&priv->dim.dim, dim_sample); + } + return work_done; } @@ -1016,6 +1056,20 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n"); } +static void bcm_sysport_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, work); + struct bcm_sysport_net_dim *ndim = + container_of(dim, struct bcm_sysport_net_dim, dim); + struct bcm_sysport_priv *priv = + container_of(ndim, struct bcm_sysport_priv, dim); + struct net_dim_cq_moder cur_profile = + net_dim_get_profile(dim->mode, dim->profile_ix); + + bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts); + dim->state = NET_DIM_START_MEASURE; +} + /* RX and misc interrupt routine */ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) { @@ -1034,6 +1088,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) } if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) { + priv->dim.event_ctr++; if (likely(napi_schedule_prep(&priv->napi))) { /* disable RX interrupts */ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE); @@ -1137,7 +1192,7 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, u32 csum_info; u8 ip_proto; u16 csum_start; - u16 ip_ver; + __be16 ip_ver; /* Re-allocate SKB if needed */ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) { @@ -1156,12 +1211,12 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, memset(tsb, 0, sizeof(*tsb)); if (skb->ip_summed == CHECKSUM_PARTIAL) { - ip_ver = htons(skb->protocol); + ip_ver = skb->protocol; switch (ip_ver) { - case ETH_P_IP: + case htons(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; break; - case ETH_P_IPV6: + case htons(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; break; default: @@ -1175,7 +1230,8 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb, if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { csum_info |= L4_LENGTH_VALID; - if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) + if (ip_proto == IPPROTO_UDP && + ip_ver == htons(ETH_P_IP)) csum_info |= L4_UDP; } else { csum_info = 0; @@ -1358,6 +1414,37 @@ out: phy_print_status(phydev); } +static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv, + void (*cb)(struct work_struct *work)) +{ + struct bcm_sysport_net_dim *dim = &priv->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv) +{ + struct bcm_sysport_net_dim *dim = &priv->dim; + struct net_dim_cq_moder moder; + u32 usecs, pkts; + + usecs = priv->rx_coalesce_usecs; + pkts = priv->rx_max_coalesced_frames; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_profile(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcm_sysport_set_rx_coalesce(priv, usecs, pkts); +} + static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, unsigned int index) { @@ -1394,6 +1481,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); ring->index = index; ring->size = size; + ring->clean_index = 0; ring->alloc_size = ring->size; ring->desc_cpu = p; ring->desc_count = ring->size; @@ -1597,8 +1685,6 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv) rdma_writel(priv, 0, RDMA_END_ADDR_HI); rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO); - rdma_writel(priv, 1, RDMA_MBDONE_INTR); - netif_dbg(priv, hw, priv->netdev, "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n", priv->num_rx_bds, priv->rx_bds); @@ -1766,6 +1852,8 @@ static void bcm_sysport_netif_start(struct net_device *dev) struct bcm_sysport_priv *priv = netdev_priv(dev); /* Enable NAPI */ + bcm_sysport_init_dim(priv, bcm_sysport_dim_work); + bcm_sysport_init_rx_coalesce(priv); napi_enable(&priv->napi); /* Enable RX interrupt and TX ring full interrupt */ @@ -1951,6 +2039,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev) /* stop all software from updating hardware */ netif_tx_stop_all_queues(dev); napi_disable(&priv->napi); + cancel_work_sync(&priv->dim.dim.work); phy_stop(dev->phydev); /* mask all interrupts */ @@ -2055,14 +2144,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_select_queue = bcm_sysport_select_queue, }; -static int bcm_sysport_map_queues(struct net_device *dev, +static int bcm_sysport_map_queues(struct notifier_block *nb, struct dsa_notifier_register_info *info) { - struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_tx_ring *ring; + struct bcm_sysport_priv *priv; struct net_device *slave_dev; unsigned int num_tx_queues; unsigned int q, start, port; + struct net_device *dev; + + priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); + if (priv->netdev != info->master) + return 0; + + dev = info->master; /* We can't be setting up queue inspection for non directly attached * switches @@ -2085,11 +2181,12 @@ static int bcm_sysport_map_queues(struct net_device *dev, if (priv->is_lite) netif_set_real_num_tx_queues(slave_dev, slave_dev->num_tx_queues / 2); + num_tx_queues = slave_dev->real_num_tx_queues; if (priv->per_port_num_tx_queues && priv->per_port_num_tx_queues != num_tx_queues) - netdev_warn(slave_dev, "asymetric number of per-port queues\n"); + netdev_warn(slave_dev, "asymmetric number of per-port queues\n"); priv->per_port_num_tx_queues = num_tx_queues; @@ -2112,7 +2209,7 @@ static int bcm_sysport_map_queues(struct net_device *dev, return 0; } -static int bcm_sysport_dsa_notifier(struct notifier_block *unused, +static int bcm_sysport_dsa_notifier(struct notifier_block *nb, unsigned long event, void *ptr) { struct dsa_notifier_register_info *info; @@ -2122,7 +2219,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused, info = ptr; - return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); + return notifier_from_errno(bcm_sysport_map_queues(nb, info)); } #define REV_FMT "v%2x.%02x" @@ -2270,6 +2367,7 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* libphy will adjust the link state accordingly */ netif_carrier_off(dev); + priv->rx_max_coalesced_frames = 1; u64_stats_init(&priv->syncp); priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f5a984c1c986..d6e5d0cbf3a3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -12,6 +12,7 @@ #define __BCM_SYSPORT_H #include <linux/if_vlan.h> +#include <linux/net_dim.h> /* Receive/transmit descriptor format */ #define DESC_ADDR_HI_STATUS_LEN 0x00 @@ -695,6 +696,14 @@ struct bcm_sysport_hw_params { unsigned int num_rx_desc_words; }; +struct bcm_sysport_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct net_dim dim; +}; + /* Software view of the TX ring */ struct bcm_sysport_tx_ring { spinlock_t lock; /* Ring lock for tx reclaim/xmit */ @@ -706,7 +715,7 @@ struct bcm_sysport_tx_ring { unsigned int desc_count; /* Number of descriptors */ unsigned int curr_desc; /* Current descriptor */ unsigned int c_index; /* Last consumer index */ - unsigned int p_index; /* Current producer index */ + unsigned int clean_index; /* Current clean index */ struct bcm_sysport_cb *cbs; /* Transmit control blocks */ struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct bcm_sysport_priv *priv; /* private context backpointer */ @@ -743,6 +752,10 @@ struct bcm_sysport_priv { unsigned int rx_read_ptr; unsigned int rx_c_index; + struct bcm_sysport_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; + /* PHY device */ struct device_node *phy_dn; phy_interface_t phy_interface; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 8eef9fb6b1fe..e6ea8e61f96d 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -533,7 +533,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, int i; for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) { - int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; + u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1); + unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN; slot = &ring->slots[i]; dev_kfree_skb(slot->skb); @@ -1190,7 +1191,7 @@ static int bgmac_open(struct net_device *net_dev) bgmac_chip_init(bgmac); err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED, - KBUILD_MODNAME, net_dev); + net_dev->name, net_dev); if (err < 0) { dev_err(bgmac->dev, "IRQ request error: %d!\n", err); bgmac_dma_cleanup(bgmac); @@ -1492,6 +1493,8 @@ int bgmac_enet_probe(struct bgmac *bgmac) struct net_device *net_dev = bgmac->net_dev; int err; + bgmac_chip_intrs_off(bgmac); + net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); dev_set_drvdata(bgmac->dev, bgmac); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 4040d846da8e..40d02fec2747 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -479,9 +479,9 @@ struct bgmac_rx_header { struct bgmac { union { struct { - void *base; - void *idm_base; - void *nicpm_base; + void __iomem *base; + void __iomem *idm_base; + void __iomem *nicpm_base; } plat; struct { struct bcma_device *core; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 5e34b34f7740..9ffc4a8c5fc7 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -87,7 +87,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax); static int disable_msi = 0; -module_param(disable_msi, int, S_IRUGO); +module_param(disable_msi, int, 0444); MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); typedef enum { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 352beff796ae..d847e1b9c37b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -166,6 +166,12 @@ do { \ #define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset)) #define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset)) +#define REG_WR_RELAXED(bp, offset, val) \ + writel_relaxed((u32)val, REG_ADDR(bp, offset)) + +#define REG_WR16_RELAXED(bp, offset, val) \ + writew_relaxed((u16)val, REG_ADDR(bp, offset)) + #define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset)) #define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset)) #define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset)) @@ -758,10 +764,8 @@ struct bnx2x_fastpath { #if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT) #error "Min DB doorbell stride is 8" #endif -#define DOORBELL(bp, cid, val) \ - do { \ - writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \ - } while (0) +#define DOORBELL_RELAXED(bp, cid, val) \ + writel_relaxed((u32)(val), (bp)->doorbells + ((bp)->db_size * (cid))) /* TX CSUM helpers */ #define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index d7c98e807ca8..95871576ab92 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4153,9 +4153,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) wmb(); txdata->tx_db.data.prod += nbd; - barrier(); + /* make sure descriptor update is observed by HW */ + wmb(); - DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); mmiowb(); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index a5265e1344f1..a8ce5c55bbb0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -522,8 +522,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp, wmb(); for (i = 0; i < sizeof(rx_prods)/4; i++) - REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, - ((u32 *)&rx_prods)[i]); + REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4, + ((u32 *)&rx_prods)[i]); mmiowb(); /* keep prod updates ordered */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 1e33abde4a3e..da18aa239acb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2591,8 +2591,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) wmb(); txdata->tx_db.data.prod += 2; - barrier(); - DOORBELL(bp, txdata->cid, txdata->tx_db.raw); + /* make sure descriptor update is observed by the HW */ + wmb(); + DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw); mmiowb(); barrier(); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 74fc9af4aadb..c766ae23bc74 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -97,29 +97,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H); MODULE_FIRMWARE(FW_FILE_NAME_E2); int bnx2x_num_queues; -module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO); +module_param_named(num_queues, bnx2x_num_queues, int, 0444); MODULE_PARM_DESC(num_queues, " Set number of queues (default is as a number of CPUs)"); static int disable_tpa; -module_param(disable_tpa, int, S_IRUGO); +module_param(disable_tpa, int, 0444); MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); static int int_mode; -module_param(int_mode, int, S_IRUGO); +module_param(int_mode, int, 0444); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " "(1 INT#x; 2 MSI)"); static int dropless_fc; -module_param(dropless_fc, int, S_IRUGO); +module_param(dropless_fc, int, 0444); MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring"); static int mrrs = -1; -module_param(mrrs, int, S_IRUGO); +module_param(mrrs, int, 0444); MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)"); static int debug; -module_param(debug, int, S_IRUGO); +module_param(debug, int, 0444); MODULE_PARM_DESC(debug, " Default debug msglevel"); static struct workqueue_struct *bnx2x_wq; @@ -3817,8 +3817,8 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp) */ mb(); - REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), - bp->spq_prod_idx); + REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func), + bp->spq_prod_idx); mmiowb(); } @@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp) bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); if (IS_ERR(bp->ptp_clock)) { bp->ptp_clock = NULL; - BNX2X_ERR("PTP clock registeration failed\n"); + BNX2X_ERR("PTP clock registration failed\n"); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 76a4668c50fe..8e0a317b31f7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -170,7 +170,9 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping) wmb(); /* Trigger the PF FW */ - writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid); + writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid); + + mmiowb(); /* Wait for PF to complete */ while ((tout >= 0) && (!*done)) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1500243b9886..f83769d8047b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u16 vlan_proto = tpa_info->metadata >> RX_CMP_FLAGS2_METADATA_TPID_SFT; - u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; + u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); } @@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); - u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; + u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); @@ -1922,7 +1922,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) /* Sync BD data before updating doorbell */ wmb(); - bnxt_db_write(bp, db, DB_KEY_TX | prod); + bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod); } cpr->cp_raw_cons = raw_cons; @@ -2317,6 +2317,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp) if (rc) return rc; + ring->grp_idx = i; rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; mem_size = rxr->rx_agg_bmap_size / 8; rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); @@ -2389,6 +2390,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) if (rc) return rc; + ring->grp_idx = txr->bnapi->index; if (bp->tx_push_size) { dma_addr_t mapping; @@ -2442,8 +2444,10 @@ static void bnxt_free_cp_rings(struct bnxt *bp) static int bnxt_alloc_cp_rings(struct bnxt *bp) { - int i, rc; + int i, rc, ulp_base_vec, ulp_msix; + ulp_msix = bnxt_get_ulp_msix_num(bp); + ulp_base_vec = bnxt_get_ulp_msix_base(bp); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr; @@ -2458,6 +2462,11 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) rc = bnxt_alloc_ring(bp, ring); if (rc) return rc; + + if (ulp_msix && i >= ulp_base_vec) + ring->map_idx = i + ulp_msix; + else + ring->map_idx = i; } return 0; } @@ -3059,12 +3068,21 @@ static void bnxt_free_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; + bp->flags &= ~BNXT_FLAG_PORT_STATS; + bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT; + if (bp->hw_rx_port_stats) { dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, bp->hw_rx_port_stats, bp->hw_rx_port_stats_map); bp->hw_rx_port_stats = NULL; - bp->flags &= ~BNXT_FLAG_PORT_STATS; + } + + if (bp->hw_rx_port_stats_ext) { + dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), + bp->hw_rx_port_stats_ext, + bp->hw_rx_port_stats_ext_map); + bp->hw_rx_port_stats_ext = NULL; } if (!bp->bnapi) @@ -3120,6 +3138,21 @@ static int bnxt_alloc_stats(struct bnxt *bp) bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + sizeof(struct rx_port_stats) + 512; bp->flags |= BNXT_FLAG_PORT_STATS; + + /* Display extended statistics only if FW supports it */ + if (bp->hwrm_spec_code < 0x10804 || + bp->hwrm_spec_code == 0x10900) + return 0; + + bp->hw_rx_port_stats_ext = + dma_zalloc_coherent(&pdev->dev, + sizeof(struct rx_port_stats_ext), + &bp->hw_rx_port_stats_ext_map, + GFP_KERNEL); + if (!bp->hw_rx_port_stats_ext) + return 0; + + bp->flags |= BNXT_FLAG_PORT_STATS_EXT; } return 0; } @@ -3357,6 +3390,15 @@ static void bnxt_disable_int(struct bnxt *bp) } } +static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n) +{ + struct bnxt_napi *bnapi = bp->bnapi[n]; + struct bnxt_cp_ring_info *cpr; + + cpr = &bnapi->cp_ring; + return cpr->cp_ring_struct.map_idx; +} + static void bnxt_disable_int_sync(struct bnxt *bp) { int i; @@ -3364,8 +3406,11 @@ static void bnxt_disable_int_sync(struct bnxt *bp) atomic_inc(&bp->intr_sem); bnxt_disable_int(bp); - for (i = 0; i < bp->cp_nr_rings; i++) - synchronize_irq(bp->irq_tbl[i].vector); + for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + synchronize_irq(bp->irq_tbl[map_idx].vector); + } } static void bnxt_enable_int(struct bnxt *bp) @@ -3398,7 +3443,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, int i, intr_process, rc, tmo_count; struct input *req = msg; u32 *data = msg; - __le32 *resp_len, *valid; + __le32 *resp_len; + u8 *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; @@ -3450,6 +3496,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, i = 0; tmo_count = timeout * 40; + resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && @@ -3462,9 +3509,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, le16_to_cpu(req->req_type)); return -1; } + len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> + HWRM_RESP_LEN_SFT; + valid = bp->hwrm_cmd_resp_addr + len - 1; } else { /* Check if response len is updated */ - resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; for (i = 0; i < tmo_count; i++) { len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; @@ -3480,10 +3529,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, return -1; } - /* Last word of resp contains valid bit */ - valid = bp->hwrm_cmd_resp_addr + len - 4; + /* Last byte of resp contains valid bit */ + valid = bp->hwrm_cmd_resp_addr + len - 1; for (i = 0; i < 5; i++) { - if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK) + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) break; udelay(1); } @@ -3496,6 +3547,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } } + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; rc = le16_to_cpu(resp->error_code); if (rc && !silent) netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", @@ -3577,9 +3633,13 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) FUNC_DRV_RGTR_REQ_ENABLES_VER); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); - req.ver_maj = DRV_VER_MAJ; - req.ver_min = DRV_VER_MIN; - req.ver_upd = DRV_VER_UPD; + req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE); + req.ver_maj_8b = DRV_VER_MAJ; + req.ver_min_8b = DRV_VER_MIN; + req.ver_upd_8b = DRV_VER_UPD; + req.ver_maj = cpu_to_le16(DRV_VER_MAJ); + req.ver_min = cpu_to_le16(DRV_VER_MIN); + req.ver_upd = cpu_to_le16(DRV_VER_UPD); if (BNXT_PF(bp)) { u32 data[8]; @@ -3847,6 +3907,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct hwrm_vnic_tpa_cfg_input req = {0}; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); if (tpa_flags) { @@ -3995,6 +4058,13 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) return rc; } +static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) +{ + if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP) + return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE; + return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE; +} + int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { unsigned int ring = 0, grp_idx; @@ -4050,8 +4120,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - req.flags |= - cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE); + req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } @@ -4132,9 +4201,13 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { - if (resp->flags & - cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)) + u32 flags = le32_to_cpu(resp->flags); + + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP) bp->flags |= BNXT_FLAG_NEW_RSS_CAP; + if (flags & + VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP) + bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP; } mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -4201,12 +4274,12 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp) static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, - u32 ring_type, u32 map_index, - u32 stats_ctx_id) + u32 ring_type, u32 map_index) { int rc = 0, err = 0; struct hwrm_ring_alloc_input req = {0}; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_ring_grp_info *grp_info; u16 ring_id; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); @@ -4228,10 +4301,10 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, case HWRM_RING_ALLOC_TX: req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ - req.cmpl_ring_id = - cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id); + grp_info = &bp->grp_info[ring->grp_idx]; + req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); req.length = cpu_to_le32(bp->tx_ring_mask + 1); - req.stat_ctx_id = cpu_to_le32(stats_ctx_id); + req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); req.queue_id = cpu_to_le16(ring->queue_id); break; case HWRM_RING_ALLOC_RX: @@ -4318,10 +4391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; + u32 map_idx = ring->map_idx; - cpr->cp_doorbell = bp->bar1 + i * 0x80; - rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i, - INVALID_STATS_CTX_ID); + cpr->cp_doorbell = bp->bar1 + map_idx * 0x80; + rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, + map_idx); if (rc) goto err_out; BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); @@ -4337,11 +4411,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) for (i = 0; i < bp->tx_nr_rings; i++) { struct bnxt_tx_ring_info *txr = &bp->tx_ring[i]; struct bnxt_ring_struct *ring = &txr->tx_ring_struct; - u32 map_idx = txr->bnapi->index; - u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx; + u32 map_idx = i; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, - map_idx, fw_stats_ctx); + map_idx); if (rc) goto err_out; txr->tx_doorbell = bp->bar1 + map_idx * 0x80; @@ -4353,7 +4426,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) u32 map_idx = rxr->bnapi->index; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, - map_idx, INVALID_STATS_CTX_ID); + map_idx); if (rc) goto err_out; rxr->rx_doorbell = bp->bar1 + map_idx * 0x80; @@ -4366,13 +4439,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; - u32 grp_idx = rxr->bnapi->index; + u32 grp_idx = ring->grp_idx; u32 map_idx = grp_idx + bp->rx_nr_rings; rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_AGG, - map_idx, - INVALID_STATS_CTX_ID); + map_idx); if (rc) goto err_out; @@ -4558,18 +4630,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) return rc; } -static int -bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) +static void +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, + int tx_rings, int rx_rings, int ring_grps, + int cp_rings, int vnics) { - struct hwrm_func_cfg_input req = {0}; u32 enables = 0; - int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); + bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); + req->fid = cpu_to_le16(0xffff); enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - req.num_tx_rings = cpu_to_le16(tx_rings); + req->num_tx_rings = cpu_to_le16(tx_rings); if (bp->flags & BNXT_FLAG_NEW_RM) { enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | @@ -4578,16 +4649,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_stat_ctxs = req.num_cmpl_rings; - req.num_vnics = cpu_to_le16(vnics); + req->num_rx_rings = cpu_to_le16(rx_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_stat_ctxs = req->num_cmpl_rings; + req->num_vnics = cpu_to_le16(vnics); } - if (!enables) + req->enables = cpu_to_le32(enables); +} + +static void +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, + struct hwrm_func_vf_cfg_input *req, int tx_rings, + int rx_rings, int ring_grps, int cp_rings, + int vnics) +{ + u32 enables = 0; + + bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; + enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; + enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; + enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; + enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; + + req->num_tx_rings = cpu_to_le16(tx_rings); + req->num_rx_rings = cpu_to_le16(rx_rings); + req->num_hw_ring_grps = cpu_to_le16(ring_grps); + req->num_cmpl_rings = cpu_to_le16(cp_rings); + req->num_stat_ctxs = req->num_cmpl_rings; + req->num_vnics = cpu_to_le16(vnics); + + req->enables = cpu_to_le32(enables); +} + +static int +bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int vnics) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); + if (!req.enables) return 0; - req.enables = cpu_to_le32(enables); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4604,7 +4712,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; - u32 enables = 0; int rc; if (!(bp->flags & BNXT_FLAG_NEW_RM)) { @@ -4612,22 +4719,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; - enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; - enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0; - enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; - enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; - - req.num_tx_rings = cpu_to_le16(tx_rings); - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_stat_ctxs = req.num_cmpl_rings; - req.num_vnics = cpu_to_le16(vnics); - - req.enables = cpu_to_le32(enables); + __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4645,20 +4738,59 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); } +static int bnxt_cp_rings_in_use(struct bnxt *bp) +{ + int cp = bp->cp_nr_rings; + int ulp_msix, ulp_base; + + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (ulp_msix) { + ulp_base = bnxt_get_ulp_msix_base(bp); + cp += ulp_msix; + if ((ulp_base + ulp_msix) > cp) + cp = ulp_base + ulp_msix; + } + return cp; +} + +static bool bnxt_need_reserve_rings(struct bnxt *bp) +{ + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); + int rx = bp->rx_nr_rings; + int vnic = 1, grp = rx; + + if (bp->hwrm_spec_code < 0x10601) + return false; + + if (hw_resc->resv_tx_rings != bp->tx_nr_rings) + return true; + + if (bp->flags & BNXT_FLAG_RFS) + vnic = rx + 1; + if (bp->flags & BNXT_FLAG_AGG_RINGS) + rx <<= 1; + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || + hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic)) + return true; + return false; +} + static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, bool shared); static int __bnxt_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + int cp = bnxt_cp_rings_in_use(bp); int tx = bp->tx_nr_rings; int rx = bp->rx_nr_rings; - int cp = bp->cp_nr_rings; int grp, rx_rings, rc; bool sh = false; int vnic = 1; - if (bp->hwrm_spec_code < 0x10601) + if (!bnxt_need_reserve_rings(bp)) return 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -4667,14 +4799,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; - grp = bp->rx_nr_rings; - if (tx == hw_resc->resv_tx_rings && - (!(bp->flags & BNXT_FLAG_NEW_RM) || - (rx == hw_resc->resv_rx_rings && - grp == hw_resc->resv_hw_ring_grps && - cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics))) - return 0; rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); if (rc) @@ -4718,64 +4843,26 @@ static int __bnxt_reserve_rings(struct bnxt *bp) return rc; } -static bool bnxt_need_reserve_rings(struct bnxt *bp) -{ - struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int rx = bp->rx_nr_rings; - int vnic = 1; - - if (bp->hwrm_spec_code < 0x10601) - return false; - - if (hw_resc->resv_tx_rings != bp->tx_nr_rings) - return true; - - if (bp->flags & BNXT_FLAG_RFS) - vnic = rx + 1; - if (bp->flags & BNXT_FLAG_AGG_RINGS) - rx <<= 1; - if ((bp->flags & BNXT_FLAG_NEW_RM) && - (hw_resc->resv_rx_rings != rx || - hw_resc->resv_cp_rings != bp->cp_nr_rings || - hw_resc->resv_vnics != vnic)) - return true; - return false; -} - static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; - u32 flags, enables; + u32 flags; int rc; if (!(bp->flags & BNXT_FLAG_NEW_RM)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); + __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; - enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | - FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS; req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(enables); - req.num_tx_rings = cpu_to_le16(tx_rings); - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_stat_ctxs = cpu_to_le16(cp_rings); - req.num_vnics = cpu_to_le16(1); - if (bp->flags & BNXT_FLAG_RFS) - req.num_vnics = cpu_to_le16(rx_rings + 1); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4783,38 +4870,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { struct hwrm_func_cfg_input req = {0}; - u32 flags, enables; + u32 flags; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); + __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, + cp_rings, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; - enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; - req.num_tx_rings = cpu_to_le16(tx_rings); - if (bp->flags & BNXT_FLAG_NEW_RM) { + if (bp->flags & BNXT_FLAG_NEW_RM) flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; - enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS; - req.num_rx_rings = cpu_to_le16(rx_rings); - req.num_cmpl_rings = cpu_to_le16(cp_rings); - req.num_hw_ring_grps = cpu_to_le16(ring_grps); - req.num_stat_ctxs = cpu_to_le16(cp_rings); - req.num_vnics = cpu_to_le16(1); - if (bp->flags & BNXT_FLAG_RFS) - req.num_vnics = cpu_to_le16(rx_rings + 1); - } + req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(enables); rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -4822,17 +4894,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings) + int ring_grps, int cp_rings, int vnics) { if (bp->hwrm_spec_code < 0x10801) return 0; if (BNXT_PF(bp)) return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, - ring_grps, cp_rings); + ring_grps, cp_rings, vnics); return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings); + cp_rings, vnics); } static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, @@ -5060,7 +5132,7 @@ func_qcfg_exit: return rc; } -static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_resource_qcaps_input req = {0}; @@ -5077,6 +5149,10 @@ static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) goto hwrm_func_resc_qcaps_exit; } + hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs); + if (!all) + goto hwrm_func_resc_qcaps_exit; + hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx); hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings); @@ -5183,7 +5259,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) if (rc) return rc; if (bp->hwrm_spec_code >= 0x10803) { - rc = bnxt_hwrm_func_resc_qcaps(bp); + rc = bnxt_hwrm_func_resc_qcaps(bp, true); if (!rc) bp->flags |= BNXT_FLAG_NEW_RM; } @@ -5331,6 +5407,21 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) return rc; } +static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) +{ + struct hwrm_port_qstats_ext_input req = {0}; + struct bnxt_pf_info *pf = &bp->pf; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map); + return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); +} + static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) { @@ -5423,10 +5514,9 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); req.fid = cpu_to_le16(0xffff); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); - req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64; + req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; if (size == 128) - req.cache_linesize = - FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128; + req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) @@ -5745,6 +5835,7 @@ static void bnxt_setup_msix(struct bnxt *bp) } for (i = 0; i < bp->cp_nr_rings; i++) { + int map_idx = bnxt_cp_num_to_irq_num(bp, i); char *attr; if (bp->flags & BNXT_FLAG_SHARED_RINGS) @@ -5754,9 +5845,9 @@ static void bnxt_setup_msix(struct bnxt *bp) else attr = "tx"; - snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr, - i); - bp->irq_tbl[i].handler = bnxt_msix; + snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name, + attr, i); + bp->irq_tbl[map_idx].handler = bnxt_msix; } } @@ -5817,7 +5908,7 @@ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max) bp->hw_resc.max_cp_rings = max; } -static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) +unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; @@ -5829,12 +5920,44 @@ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) bp->hw_resc.max_irqs = max_irqs; } +int bnxt_get_avail_msix(struct bnxt *bp, int num) +{ + int max_cp = bnxt_get_max_func_cp_rings(bp); + int max_irq = bnxt_get_max_func_irqs(bp); + int total_req = bp->cp_nr_rings + num; + int max_idx, avail_msix; + + max_idx = min_t(int, bp->total_irqs, max_cp); + avail_msix = max_idx - bp->cp_nr_rings; + if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num) + return avail_msix; + + if (max_irq < total_req) { + num = max_irq - bp->cp_nr_rings; + if (num <= 0) + return 0; + } + return num; +} + +static int bnxt_get_num_msix(struct bnxt *bp) +{ + if (!(bp->flags & BNXT_FLAG_NEW_RM)) + return bnxt_get_max_func_irqs(bp); + + return bnxt_cp_rings_in_use(bp); +} + static int bnxt_init_msix(struct bnxt *bp) { - int i, total_vecs, rc = 0, min = 1; + int i, total_vecs, max, rc = 0, min = 1, ulp_msix; struct msix_entry *msix_ent; - total_vecs = bnxt_get_max_func_irqs(bp); + total_vecs = bnxt_get_num_msix(bp); + max = bnxt_get_max_func_irqs(bp); + if (total_vecs > max) + total_vecs = max; + msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL); if (!msix_ent) return -ENOMEM; @@ -5848,7 +5971,8 @@ static int bnxt_init_msix(struct bnxt *bp) min = 2; total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs); - if (total_vecs < 0) { + ulp_msix = bnxt_get_ulp_msix_num(bp); + if (total_vecs < 0 || total_vecs < ulp_msix) { rc = -ENODEV; goto msix_setup_exit; } @@ -5861,11 +5985,10 @@ static int bnxt_init_msix(struct bnxt *bp) bp->total_irqs = total_vecs; /* Trim rings based upon num of vectors allocated */ rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings, - total_vecs, min == 1); + total_vecs - ulp_msix, min == 1); if (rc) goto msix_setup_exit; - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->cp_nr_rings = (min == 1) ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; @@ -5897,7 +6020,6 @@ static int bnxt_init_inta(struct bnxt *bp) bp->rx_nr_rings = 1; bp->tx_nr_rings = 1; bp->cp_nr_rings = 1; - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->irq_tbl[0].vector = bp->pdev->irq; return 0; @@ -5927,9 +6049,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_USING_MSIX; } -static int bnxt_reserve_rings(struct bnxt *bp) +int bnxt_reserve_rings(struct bnxt *bp) { - int orig_cp = bp->hw_resc.resv_cp_rings; int tcs = netdev_get_num_tc(bp->dev); int rc; @@ -5941,9 +6062,12 @@ static int bnxt_reserve_rings(struct bnxt *bp) netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); return rc; } - if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) { + if ((bp->flags & BNXT_FLAG_NEW_RM) && + (bnxt_get_num_msix(bp) != bp->total_irqs)) { + bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, rc); if (rc) return rc; } @@ -5966,11 +6090,13 @@ static void bnxt_free_irq(struct bnxt *bp) free_irq_cpu_rmap(bp->dev->rx_cpu_rmap); bp->dev->rx_cpu_rmap = NULL; #endif - if (!bp->irq_tbl) + if (!bp->irq_tbl || !bp->bnapi) return; for (i = 0; i < bp->cp_nr_rings; i++) { - irq = &bp->irq_tbl[i]; + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + + irq = &bp->irq_tbl[map_idx]; if (irq->requested) { if (irq->have_cpumask) { irq_set_affinity_hint(irq->vector, NULL); @@ -5989,14 +6115,25 @@ static int bnxt_request_irq(struct bnxt *bp) int i, j, rc = 0; unsigned long flags = 0; #ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap; + struct cpu_rmap *rmap; #endif + rc = bnxt_setup_int_mode(bp); + if (rc) { + netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", + rc); + return rc; + } +#ifdef CONFIG_RFS_ACCEL + rmap = bp->dev->rx_cpu_rmap; +#endif if (!(bp->flags & BNXT_FLAG_USING_MSIX)) flags = IRQF_SHARED; for (i = 0, j = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_irq *irq = &bp->irq_tbl[i]; + int map_idx = bnxt_cp_num_to_irq_num(bp, i); + struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; + #ifdef CONFIG_RFS_ACCEL if (rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); @@ -6716,13 +6853,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) rc = bnxt_reserve_rings(bp); if (rc) return rc; - - rc = bnxt_setup_int_mode(bp); - if (rc) { - netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n", - rc); - return rc; - } } if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_USING_MSIX)) { @@ -7485,8 +7615,10 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE); } - if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) { bnxt_hwrm_port_qstats(bp); + bnxt_hwrm_port_qstats_ext(bp); + } if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { int rc; @@ -7531,7 +7663,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int max_rx, max_tx, tx_sets = 1; int tx_rings_needed; int rx_rings = rx; - int cp, rc; + int cp, vnics, rc; if (tcs) tx_sets = tcs; @@ -7547,10 +7679,17 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (max_tx < tx_rings_needed) return -ENOMEM; + vnics = 1; + if (bp->flags & BNXT_FLAG_RFS) + vnics += rx_rings; + if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; - return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); + if (bp->flags & BNXT_FLAG_NEW_RM) + cp += bnxt_get_ulp_msix_num(bp); + return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, + vnics); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -8195,6 +8334,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_set_vf_rate = bnxt_set_vf_bw, .ndo_set_vf_link_state = bnxt_set_vf_link_state, .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, + .ndo_set_vf_trust = bnxt_set_vf_trust, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bnxt_poll_controller, @@ -8392,9 +8532,15 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) if (sh) bp->flags |= BNXT_FLAG_SHARED_RINGS; dflt_rings = netif_get_num_default_rss_queues(); - /* Reduce default rings to reduce memory usage on multi-port cards */ - if (bp->port_count > 1) - dflt_rings = min_t(int, dflt_rings, 4); + /* Reduce default rings on multi-port cards so that total default + * rings do not exceed CPU count. + */ + if (bp->port_count > 1) { + int max_rings = + max_t(int, num_online_cpus() / bp->port_count, 1); + + dflt_rings = min_t(int, dflt_rings, max_rings); + } rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); if (rc) return rc; @@ -8433,17 +8579,23 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp) int rc; ASSERT_RTNL(); - if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - return 0; - bnxt_hwrm_func_qcaps(bp); - __bnxt_close_nic(bp, true, false); + + if (netif_running(bp->dev)) + __bnxt_close_nic(bp, true, false); + + bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); rc = bnxt_init_int_mode(bp); - if (rc) - dev_close(bp->dev); - else - rc = bnxt_open_nic(bp, true, false); + bnxt_ulp_irq_restart(bp, rc); + + if (netif_running(bp->dev)) { + if (rc) + dev_close(bp->dev); + else + rc = bnxt_open_nic(bp, true, false); + } + return rc; } @@ -8664,6 +8816,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + /* No TC has been set yet and rings may have been trimmed due to + * limited MSIX, so we re-initialize the TX rings per TC. + */ + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + bnxt_get_wol_settings(bp); if (bp->flags & BNXT_FLAG_WOL_CAP) device_set_wakeup_enable(&pdev->dev, bp->wol); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 1989c470172c..3d55d3b56865 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,11 +12,11 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.9.0" +#define DRV_MODULE_VERSION "1.9.1" #define DRV_VER_MAJ 1 #define DRV_VER_MIN 9 -#define DRV_VER_UPD 0 +#define DRV_VER_UPD 1 #include <linux/interrupt.h> #include <linux/rhashtable.h> @@ -189,6 +189,7 @@ struct rx_cmp_ext { #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) __le32 rx_cmp_meta_data; + #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 @@ -572,6 +573,10 @@ struct bnxt_ring_struct { void **vmem; u16 fw_ring_id; /* Ring id filled by Chimp FW */ + union { + u16 grp_idx; + u16 map_idx; /* Used by cmpl rings */ + }; u8 queue_id; }; @@ -785,6 +790,7 @@ struct bnxt_hw_resc { u16 min_tx_rings; u16 max_tx_rings; u16 resv_tx_rings; + u16 max_tx_sch_inputs; u16 min_rx_rings; u16 max_rx_rings; u16 resv_rx_rings; @@ -814,6 +820,7 @@ struct bnxt_vf_info { #define BNXT_VF_SPOOFCHK 0x2 #define BNXT_VF_LINK_FORCED 0x4 #define BNXT_VF_LINK_UP 0x8 +#define BNXT_VF_TRUST 0x10 u32 func_flags; /* func cfg flags */ u32 min_tx_rate; u32 max_tx_rate; @@ -1150,7 +1157,9 @@ struct bnxt { #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_DIM 0x2000000 + #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000 #define BNXT_FLAG_NEW_RM 0x8000000 + #define BNXT_FLAG_PORT_STATS_EXT 0x10000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -1270,8 +1279,10 @@ struct bnxt { struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; + struct rx_port_stats_ext *hw_rx_port_stats_ext; dma_addr_t hw_rx_port_stats_map; dma_addr_t hw_tx_port_stats_map; + dma_addr_t hw_rx_port_stats_ext_map; int hw_port_stats_size; u16 hwrm_max_req_len; @@ -1382,6 +1393,9 @@ struct bnxt { ((offsetof(struct tx_port_stats, counter) + \ sizeof(struct rx_port_stats) + 512) / 8) +#define BNXT_RX_STATS_EXT_OFFSET(counter) \ + (offsetof(struct rx_port_stats_ext, counter) / 8) + #define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A2 0xa2 #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e @@ -1401,6 +1415,15 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); } +/* For TX and RX ring doorbells with no ordering guarantee*/ +static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db, + u32 val) +{ + writel_relaxed(val, db); + if (bp->flags & BNXT_FLAG_DOUBLE_DB) + writel_relaxed(val, db); +} + /* For TX and RX ring doorbells */ static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val) { @@ -1431,13 +1454,17 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); +unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max); +int bnxt_get_avail_msix(struct bnxt *bp, int num); +int bnxt_reserve_rings(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool); int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp); int bnxt_hwrm_free_wol_fltr(struct bnxt *bp); +int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); int bnxt_hwrm_fw_set_time(struct bnxt *); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index d2e0af960bf5..69efde785f23 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,7 +34,8 @@ struct bnxt_cos2bw_cfg { }; #define BNXT_LLQ(q_profile) \ - ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS) + ((q_profile) == \ + QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE) #define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 1801582076be..8ba14ae00e8f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -137,6 +137,9 @@ reset_coalesce: #define BNXT_TX_STATS_ENTRY(counter) \ { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } +#define BNXT_RX_STATS_EXT_ENTRY(counter) \ + { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) } + static const struct { long offset; char string[ETH_GSTRING_LEN]; @@ -181,6 +184,8 @@ static const struct { BNXT_RX_STATS_ENTRY(rx_bytes), BNXT_RX_STATS_ENTRY(rx_runt_bytes), BNXT_RX_STATS_ENTRY(rx_runt_frames), + BNXT_RX_STATS_ENTRY(rx_stat_discard), + BNXT_RX_STATS_ENTRY(rx_stat_err), BNXT_TX_STATS_ENTRY(tx_64b_frames), BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), @@ -216,9 +221,24 @@ static const struct { BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), BNXT_TX_STATS_ENTRY(tx_total_collisions), BNXT_TX_STATS_ENTRY(tx_bytes), + BNXT_TX_STATS_ENTRY(tx_xthol_frames), + BNXT_TX_STATS_ENTRY(tx_stat_discard), + BNXT_TX_STATS_ENTRY(tx_stat_error), +}; + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_port_stats_ext_arr[] = { + BNXT_RX_STATS_EXT_ENTRY(link_down_events), + BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events), + BNXT_RX_STATS_EXT_ENTRY(resume_pause_events), + BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events), + BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events), }; #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr) static int bnxt_get_num_stats(struct bnxt *bp) { @@ -227,6 +247,9 @@ static int bnxt_get_num_stats(struct bnxt *bp) if (bp->flags & BNXT_FLAG_PORT_STATS) num_stats += BNXT_NUM_PORT_STATS; + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) + num_stats += BNXT_NUM_PORT_STATS_EXT; + return num_stats; } @@ -274,6 +297,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, bnxt_port_stats_arr[i].offset)); } } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext; + + for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) { + buf[j] = le64_to_cpu(*(port_stats_ext + + bnxt_port_stats_ext_arr[i].offset)); + } + } } static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -334,6 +365,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) buf += ETH_GSTRING_LEN; } } + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { + for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) { + strcpy(buf, bnxt_port_stats_ext_arr[i].string); + buf += ETH_GSTRING_LEN; + } + } break; case ETH_SS_TEST: if (bp->num_tests) @@ -388,15 +425,26 @@ static void bnxt_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct bnxt *bp = netdev_priv(dev); + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int max_rx_rings, max_tx_rings, tcs; + int max_tx_sch_inputs; + + /* Get the most up-to-date max_tx_sch_inputs. */ + if (bp->flags & BNXT_FLAG_NEW_RM) + bnxt_hwrm_func_resc_qcaps(bp, false); + max_tx_sch_inputs = hw_resc->max_tx_sch_inputs; bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true); + if (max_tx_sch_inputs) + max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); channel->max_combined = min_t(int, max_rx_rings, max_tx_rings); if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) { max_rx_rings = 0; max_tx_rings = 0; } + if (max_tx_sch_inputs) + max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs); tcs = netdev_get_num_tc(dev); if (tcs > 1) @@ -822,17 +870,22 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct bnxt *bp = netdev_priv(dev); - struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct bnxt_vnic_info *vnic; int i = 0; if (hfunc) *hfunc = ETH_RSS_HASH_TOP; - if (indir) + if (!bp->vnic_info) + return 0; + + vnic = &bp->vnic_info[0]; + if (indir && vnic->rss_table) { for (i = 0; i < HW_HASH_INDEX_SIZE; i++) indir[i] = le16_to_cpu(vnic->rss_table[i]); + } - if (key) + if (key && vnic->rss_hash_key) memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE); return 0; @@ -1874,22 +1927,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) return retval; } -static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) +static void bnxt_get_pkgver(struct net_device *dev) { + struct bnxt *bp = netdev_priv(dev); u16 index = 0; - u32 datalen; + char *pkgver; + u32 pkglen; + u8 *pkgbuf; + int len; if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, - &index, NULL, &datalen) != 0) - return NULL; + &index, NULL, &pkglen) != 0) + return; - memset(buf, 0, buflen); - if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) - return NULL; + pkgbuf = kzalloc(pkglen, GFP_KERNEL); + if (!pkgbuf) { + dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", + pkglen); + return; + } + + if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) + goto err; - return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, - datalen); + pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, + pkglen); + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { + len = strlen(bp->fw_ver_str); + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, + "/pkg %s", pkgver); + } +err: + kfree(pkgbuf); } static int bnxt_get_eeprom(struct net_device *dev, @@ -2535,16 +2605,20 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return -EOPNOTSUPP; rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); - if (!rc) + if (!rc) { netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); + *flags = 0; + } } else if (*flags == ETH_RESET_AP) { /* This feature is not supported in older firmware versions */ if (bp->hwrm_spec_code < 0x10803) return -EOPNOTSUPP; rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP); - if (!rc) + if (!rc) { netdev_info(dev, "Reset Application Processor request successful.\n"); + *flags = 0; + } } else { rc = -EINVAL; } @@ -2558,22 +2632,10 @@ void bnxt_ethtool_init(struct bnxt *bp) struct hwrm_selftest_qlist_input req = {0}; struct bnxt_test_info *test_info; struct net_device *dev = bp->dev; - char *pkglog; int i, rc; - pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); - if (pkglog) { - char *pkgver; - int len; + bnxt_get_pkgver(dev); - pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); - if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { - len = strlen(bp->fw_ver_str); - snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, - "/pkg %s", pkgver); - } - kfree(pkglog); - } if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) return; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 82d17f8cc0db..0fe0ea8dce6c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -188,6 +188,7 @@ struct cmd_nums { #define HWRM_STAT_CTX_FREE 0xb1UL #define HWRM_STAT_CTX_QUERY 0xb2UL #define HWRM_STAT_CTX_CLR_STATS 0xb3UL + #define HWRM_PORT_QSTATS_EXT 0xb4UL #define HWRM_FW_RESET 0xc0UL #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_SET_TIME 0xc8UL @@ -199,6 +200,7 @@ struct cmd_nums { #define HWRM_REJECT_FWD_RESP 0xd1UL #define HWRM_FWD_RESP 0xd2UL #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL + #define HWRM_OEM_CMD 0xd4UL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_WOL_FILTER_ALLOC 0xf0UL #define HWRM_WOL_FILTER_FREE 0xf1UL @@ -271,6 +273,7 @@ struct cmd_nums { #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL + #define HWRM_PCIE_QSTATS 0x204UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL #define HWRM_DBG_WRITE_DIRECT 0xff12UL @@ -341,9 +344,9 @@ struct hwrm_err_output { #define HWRM_RESP_VALID_KEY 1 #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 9 -#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 0 -#define HWRM_VERSION_STR "1.9.0.0" +#define HWRM_VERSION_UPDATE 1 +#define HWRM_VERSION_RSVD 15 +#define HWRM_VERSION_STR "1.9.1.15" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -616,30 +619,6 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change { #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL }; -/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */ -struct hwrm_async_event_cmpl_pf_drvr_unload { - __le16 type; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT - __le16 event_id; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD - __le32 event_data2; - u8 opaque_v; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1 - u8 timestamp_lo; - __le16 timestamp_hi; - __le32 event_data1; - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0 - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL - #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16 -}; - /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_vf_cfg_change { __le16 type; @@ -854,6 +833,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL + #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -966,10 +946,14 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA - u8 cache_linesize; - #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL - #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL - #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 + u8 options; + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL + #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2 __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; @@ -1124,10 +1108,14 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA - u8 cache_linesize; - #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL - #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL - #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 + u8 options; + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0 + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL + #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 + #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL + #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2 __le16 num_mcast_filters; }; @@ -1248,7 +1236,7 @@ struct hwrm_func_vf_vnic_ids_query_output { u8 valid; }; -/* hwrm_func_drv_rgtr_input (size:832b/104B) */ +/* hwrm_func_drv_rgtr_input (size:896b/112B) */ struct hwrm_func_drv_rgtr_input { __le16 req_type; __le16 cmpl_ring; @@ -1256,8 +1244,9 @@ struct hwrm_func_drv_rgtr_input { __le16 target_id; __le64 resp_addr; __le32 flags; - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL - #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL + #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL + #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -1277,14 +1266,18 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI - u8 ver_maj; - u8 ver_min; - u8 ver_upd; + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; u8 unused_0[3]; __le32 timestamp; u8 unused_1[4]; __le32 vf_req_fwd[8]; __le32 async_event_fwd[8]; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; }; /* hwrm_func_drv_rgtr_output (size:128b/16B) */ @@ -1379,7 +1372,7 @@ struct hwrm_func_drv_qver_input { u8 unused_0[2]; }; -/* hwrm_func_drv_qver_output (size:128b/16B) */ +/* hwrm_func_drv_qver_output (size:192b/24B) */ struct hwrm_func_drv_qver_output { __le16 error_code; __le16 req_type; @@ -1398,11 +1391,15 @@ struct hwrm_func_drv_qver_output { #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI - u8 ver_maj; - u8 ver_min; - u8 ver_upd; + u8 ver_maj_8b; + u8 ver_min_8b; + u8 ver_upd_8b; u8 unused_0[2]; u8 valid; + __le16 ver_maj; + __le16 ver_min; + __le16 ver_upd; + __le16 ver_patch; }; /* hwrm_func_resource_qcaps_input (size:192b/24B) */ @@ -1416,7 +1413,7 @@ struct hwrm_func_resource_qcaps_input { u8 unused_0[6]; }; -/* hwrm_func_resource_qcaps_output (size:384b/48B) */ +/* hwrm_func_resource_qcaps_output (size:448b/56B) */ struct hwrm_func_resource_qcaps_output { __le16 error_code; __le16 req_type; @@ -1425,9 +1422,10 @@ struct hwrm_func_resource_qcaps_output { __le16 max_vfs; __le16 max_msix; __le16 vf_reservation_strategy; - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL - #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL + #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC __le16 min_rsscos_ctx; __le16 max_rsscos_ctx; __le16 min_cmpl_rings; @@ -1444,7 +1442,8 @@ struct hwrm_func_resource_qcaps_output { __le16 max_stat_ctx; __le16 min_hw_ring_grps; __le16 max_hw_ring_grps; - u8 unused_0; + __le16 max_tx_scheduler_inputs; + u8 unused_0[7]; u8 valid; }; @@ -1627,6 +1626,16 @@ struct hwrm_port_phy_cfg_output { u8 valid; }; +/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */ +struct hwrm_port_phy_cfg_cmd_err { + u8 code; + #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL + #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL + #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL + #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY + u8 unused_0[7]; +}; + /* hwrm_port_phy_qcfg_input (size:192b/24B) */ struct hwrm_port_phy_qcfg_input { __le16 req_type; @@ -2030,6 +2039,33 @@ struct hwrm_port_qstats_output { u8 valid; }; +/* hwrm_port_qstats_ext_input (size:320b/40B) */ +struct hwrm_port_qstats_ext_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[2]; + __le64 tx_stat_host_addr; + __le64 rx_stat_host_addr; +}; + +/* hwrm_port_qstats_ext_output (size:128b/16B) */ +struct hwrm_port_qstats_ext_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 tx_stat_size; + __le16 rx_stat_size; + u8 unused_0[3]; + u8 valid; +}; + /* hwrm_port_lpbk_qstats_input (size:128b/16B) */ struct hwrm_port_lpbk_qstats_input { __le16 req_type; @@ -2552,7 +2588,11 @@ struct hwrm_queue_qportcfg_input { #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX __le16 port_id; - u8 unused_0[2]; + u8 drv_qmap_cap; + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL + #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED + u8 unused_0; }; /* hwrm_queue_qportcfg_output (size:256b/32B) */ @@ -2571,52 +2611,68 @@ struct hwrm_queue_qportcfg_output { u8 queue_cos2bw_cfg_allowed; u8 queue_id0; u8 queue_id0_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN u8 queue_id1; u8 queue_id1_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN u8 queue_id2; u8 queue_id2_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN u8 queue_id3; u8 queue_id3_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN u8 queue_id4; u8 queue_id4_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN u8 queue_id5; u8 queue_id5_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN u8 queue_id6; u8 queue_id6_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN u8 queue_id7; u8 queue_id7_service_profile; - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL - #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL + #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN u8 valid; }; @@ -5180,6 +5236,29 @@ struct hwrm_stat_ctx_clr_stats_output { u8 valid; }; +/* hwrm_pcie_qstats_input (size:256b/32B) */ +struct hwrm_pcie_qstats_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 pcie_stat_size; + u8 unused_0[6]; + __le64 pcie_stat_host_addr; +}; + +/* hwrm_pcie_qstats_output (size:128b/16B) */ +struct hwrm_pcie_qstats_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 pcie_stat_size; + u8 unused_0[5]; + u8 valid; +}; + /* tx_port_stats (size:3264b/408B) */ struct tx_port_stats { __le64 tx_64b_frames; @@ -5305,6 +5384,30 @@ struct rx_port_stats { __le64 rx_stat_err; }; +/* rx_port_stats_ext (size:320b/40B) */ +struct rx_port_stats_ext { + __le64 link_down_events; + __le64 continuous_pause_events; + __le64 resume_pause_events; + __le64 continuous_roce_pause_events; + __le64 resume_roce_pause_events; +}; + +/* pcie_ctx_hw_stats (size:768b/96B) */ +struct pcie_ctx_hw_stats { + __le64 pcie_pl_signal_integrity; + __le64 pcie_dl_signal_integrity; + __le64 pcie_tl_signal_integrity; + __le64 pcie_link_integrity; + __le64 pcie_tx_traffic_rate; + __le64 pcie_rx_traffic_rate; + __le64 pcie_tx_dllp_statistics; + __le64 pcie_rx_dllp_statistics; + __le64 pcie_equalization_time; + __le32 pcie_ltssm_histogram[4]; + __le64 pcie_recovery_histogram; +}; + /* hwrm_fw_reset_input (size:192b/24B) */ struct hwrm_fw_reset_input { __le16 req_type; @@ -5313,14 +5416,15 @@ struct hwrm_fw_reset_input { __le16 target_id; __le64 resp_addr; u8 embedded_proc_type; - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL @@ -6253,8 +6357,7 @@ struct hwrm_selftest_exec_input { #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL - u8 pcie_lane_num; - u8 unused_0[6]; + u8 unused_0[7]; }; /* hwrm_selftest_exec_output (size:128b/16B) */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 73f2249555b5..83444811d3c6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type { #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) -#define BNX_PKG_LOG_MAX_LENGTH 4096 - enum bnxnvm_pkglog_field_index { BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d87faad901fe..f952963d594e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -121,6 +121,23 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) return rc; } +int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_vf_info *vf; + + if (bnxt_vf_ndo_prep(bp, vf_id)) + return -EINVAL; + + vf = &bp->pf.vf[vf_id]; + if (trusted) + vf->flags |= BNXT_VF_TRUST; + else + vf->flags &= ~BNXT_VF_TRUST; + + return 0; +} + int bnxt_get_vf_config(struct net_device *dev, int vf_id, struct ifla_vf_info *ivi) { @@ -147,6 +164,7 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, else ivi->qos = 0; ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); + ivi->trusted = !!(vf->flags & BNXT_VF_TRUST); if (!(vf->flags & BNXT_VF_LINK_FORCED)) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->flags & BNXT_VF_LINK_UP) @@ -492,18 +510,16 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) } mutex_unlock(&bp->hwrm_cmd_lock); if (pf->active_vfs) { - u16 n = 1; + u16 n = pf->active_vfs; - if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL) - n = pf->active_vfs; - - hw_resc->max_tx_rings -= vf_tx_rings * n; - hw_resc->max_rx_rings -= vf_rx_rings * n; - hw_resc->max_hw_ring_grps -= vf_ring_grps * n; - hw_resc->max_cp_rings -= vf_cp_rings * n; + hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * + n; + hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; hw_resc->max_rsscos_ctxs -= pf->active_vfs; - hw_resc->max_stat_ctxs -= vf_stat_ctx * n; - hw_resc->max_vnics -= vf_vnics * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; rc = pf->active_vfs; } @@ -886,18 +902,19 @@ exec_fwd_resp_exit: return rc; } -static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf) +static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) { u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input); struct hwrm_func_vf_cfg_input *req = (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr; - /* Only allow VF to set a valid MAC address if the PF assigned MAC - * address is zero + /* Allow VF to set a valid MAC address, if trust is set to on or + * if the PF assigned MAC address is zero */ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { if (is_valid_ether_addr(req->dflt_mac_addr) && - !is_valid_ether_addr(vf->mac_addr)) { + ((vf->flags & BNXT_VF_TRUST) || + (!is_valid_ether_addr(vf->mac_addr)))) { ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); } @@ -913,11 +930,17 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr; bool mac_ok = false; - /* VF MAC address must first match PF MAC address, if it is valid. + if (!is_valid_ether_addr((const u8 *)req->l2_addr)) + return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size); + + /* Allow VF to set a valid MAC address, if trust is set to on. + * Or VF MAC address must first match MAC address in PF's context. * Otherwise, it must match the VF MAC address if firmware spec >= * 1.2.2 */ - if (is_valid_ether_addr(vf->mac_addr)) { + if (vf->flags & BNXT_VF_TRUST) { + mac_ok = true; + } else if (is_valid_ether_addr(vf->mac_addr)) { if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) mac_ok = true; } else if (is_valid_ether_addr(vf->vf_mac_addr)) { @@ -951,7 +974,9 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, sizeof(phy_qcfg_resp)); mutex_unlock(&bp->hwrm_cmd_lock); + phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + phy_qcfg_resp.valid = 1; if (vf->flags & BNXT_VF_LINK_UP) { /* if physical link is down, force link up on VF */ @@ -993,7 +1018,7 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) switch (req_type) { case HWRM_FUNC_VF_CFG: - rc = bnxt_vf_store_mac(bp, vf); + rc = bnxt_vf_configure_mac(bp, vf); break; case HWRM_CFA_L2_FILTER_ALLOC: rc = bnxt_vf_validate_set_mac(bp, vf); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h index dbc8d977fc5a..d10f6f6c7860 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h @@ -1,7 +1,7 @@ /* Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2014-2016 Broadcom Corporation - * Copyright (c) 2016-2017 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -17,6 +17,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16); int bnxt_set_vf_bw(struct net_device *, int, int, int); int bnxt_set_vf_link_state(struct net_device *, int, int); int bnxt_set_vf_spoofchk(struct net_device *, int, bool); +int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust); int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs); void bnxt_sriov_disable(struct bnxt *); void bnxt_hwrm_exec_fwd_req(struct bnxt *); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index fbe6e208e17b..795f45024c20 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) if (rc) netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", __func__, flow_handle, rc); + + if (rc) + rc = -EIO; return rc; } @@ -374,6 +377,30 @@ static bool is_wildcard(void *mask, int len) return true; } +static bool is_exactmatch(void *mask, int len) +{ + const u8 *p = mask; + int i; + + for (i = 0; i < len; i++) + if (p[i] != 0xff) + return false; + + return true; +} + +static bool bits_set(void *key, int len) +{ + const u8 *p = key; + int i; + + for (i = 0; i < len; i++) + if (p[i] != 0) + return true; + + return false; +} + static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, __le16 ref_flow_handle, __le32 tunnel_handle, __le16 *flow_handle) @@ -484,13 +511,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, req.action_flags = cpu_to_le16(action_flags); mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) *flow_handle = resp->flow_handle; - mutex_unlock(&bp->hwrm_cmd_lock); + if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) + rc = -ENOSPC; + else if (rc) + rc = -EIO; return rc; } @@ -561,6 +590,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + rc = -EIO; return rc; } @@ -576,6 +607,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp, rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + + if (rc) + rc = -EIO; return rc; } @@ -624,6 +658,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + rc = -EIO; return rc; } @@ -639,6 +675,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp, rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + + if (rc) + rc = -EIO; return rc; } @@ -749,6 +788,41 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) return false; } + /* Currently source/dest MAC cannot be partial wildcard */ + if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) && + !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) { + netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n"); + return false; + } + if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) && + !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) { + netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n"); + return false; + } + + /* Currently VLAN fields cannot be partial wildcard */ + if (bits_set(&flow->l2_key.inner_vlan_tci, + sizeof(flow->l2_key.inner_vlan_tci)) && + !is_exactmatch(&flow->l2_mask.inner_vlan_tci, + sizeof(flow->l2_mask.inner_vlan_tci))) { + netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n"); + return false; + } + if (bits_set(&flow->l2_key.inner_vlan_tpid, + sizeof(flow->l2_key.inner_vlan_tpid)) && + !is_exactmatch(&flow->l2_mask.inner_vlan_tpid, + sizeof(flow->l2_mask.inner_vlan_tpid))) { + netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n"); + return false; + } + + /* Currently Ethertype must be set */ + if (!is_exactmatch(&flow->l2_mask.ether_type, + sizeof(flow->l2_mask.ether_type))) { + netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n"); + return false; + } + return true; } @@ -977,8 +1051,10 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, /* Check if there's another flow using the same tunnel decap. * If not, add this tunnel to the table and resolve the other - * tunnel header fileds + * tunnel header fileds. Ignore src_port in the tunnel_key, + * since it is not required for decap filters. */ + decap_key->tp_src = 0; decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table, &tc_info->decap_ht_params, decap_key); @@ -1269,11 +1345,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp, flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, tc_info->flow_ht_params); - if (!flow_node) { - netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx", - tc_flow_cmd->cookie); + if (!flow_node) return -EINVAL; - } return __bnxt_tc_del_flow(bp, flow_node); } @@ -1290,11 +1363,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, tc_info->flow_ht_params); - if (!flow_node) { - netdev_info(bp->dev, "Error: no flow_node for cookie %lx", - tc_flow_cmd->cookie); + if (!flow_node) return -1; - } flow = &flow_node->flow; curr_stats = &flow->stats; @@ -1344,8 +1414,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, } else { netdev_info(bp->dev, "error rc=%d", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); + + if (rc) + rc = -EIO; return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 997e10e8b863..347e4f946eb2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -101,13 +101,28 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) return 0; } +static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent) +{ + struct bnxt_en_dev *edev = bp->edev; + int num_msix, idx, i; + + num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested; + idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base; + for (i = 0; i < num_msix; i++) { + ent[i].vector = bp->irq_tbl[idx + i].vector; + ent[i].ring_idx = idx + i; + ent[i].db_offset = (idx + i) * 0x80; + } +} + static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, struct bnxt_msix_entry *ent, int num_msix) { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); int max_idx, max_cp_rings; - int avail_msix, i, idx; + int avail_msix, idx; + int rc = 0; ASSERT_RTNL(); if (ulp_id != BNXT_ROCE_ULP) @@ -116,23 +131,47 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id, if (!(bp->flags & BNXT_FLAG_USING_MSIX)) return -ENODEV; + if (edev->ulp_tbl[ulp_id].msix_requested) + return -EAGAIN; + max_cp_rings = bnxt_get_max_func_cp_rings(bp); - max_idx = min_t(int, bp->total_irqs, max_cp_rings); - avail_msix = max_idx - bp->cp_nr_rings; + avail_msix = bnxt_get_avail_msix(bp, num_msix); if (!avail_msix) return -ENOMEM; if (avail_msix > num_msix) avail_msix = num_msix; - idx = max_idx - avail_msix; - for (i = 0; i < avail_msix; i++) { - ent[i].vector = bp->irq_tbl[idx + i].vector; - ent[i].ring_idx = idx + i; - ent[i].db_offset = (idx + i) * 0x80; + if (bp->flags & BNXT_FLAG_NEW_RM) { + idx = bp->cp_nr_rings; + } else { + max_idx = min_t(int, bp->total_irqs, max_cp_rings); + idx = max_idx - avail_msix; } - bnxt_set_max_func_irqs(bp, max_idx - avail_msix); - bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); + edev->ulp_tbl[ulp_id].msix_base = idx; edev->ulp_tbl[ulp_id].msix_requested = avail_msix; + if (bp->total_irqs < (idx + avail_msix)) { + if (netif_running(dev)) { + bnxt_close_nic(bp, true, false); + rc = bnxt_open_nic(bp, true, false); + } else { + rc = bnxt_reserve_rings(bp); + } + } + if (rc) { + edev->ulp_tbl[ulp_id].msix_requested = 0; + return -EAGAIN; + } + + if (bp->flags & BNXT_FLAG_NEW_RM) { + struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + + avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings; + edev->ulp_tbl[ulp_id].msix_requested = avail_msix; + } + bnxt_fill_msix_vecs(bp, ent); + bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix); + bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); + edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; return avail_msix; } @@ -146,11 +185,40 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) if (ulp_id != BNXT_ROCE_ULP) return -EINVAL; + if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return 0; + max_cp_rings = bnxt_get_max_func_cp_rings(bp); msix_requested = edev->ulp_tbl[ulp_id].msix_requested; bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); edev->ulp_tbl[ulp_id].msix_requested = 0; - bnxt_set_max_func_irqs(bp, bp->total_irqs); + bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested); + edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; + if (netif_running(dev)) { + bnxt_close_nic(bp, true, false); + bnxt_open_nic(bp, true, false); + } + return 0; +} + +int bnxt_get_ulp_msix_num(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_en_dev *edev = bp->edev; + + return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested; + } + return 0; +} + +int bnxt_get_ulp_msix_base(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_en_dev *edev = bp->edev; + + if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested) + return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base; + } return 0; } @@ -287,6 +355,58 @@ void bnxt_ulp_shutdown(struct bnxt *bp) } } +void bnxt_ulp_irq_stop(struct bnxt *bp) +{ + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + + if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return; + + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP]; + + if (!ulp->msix_requested) + return; + + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_irq_stop) + return; + ops->ulp_irq_stop(ulp->handle); + } +} + +void bnxt_ulp_irq_restart(struct bnxt *bp, int err) +{ + struct bnxt_en_dev *edev = bp->edev; + struct bnxt_ulp_ops *ops; + + if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED)) + return; + + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) { + struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP]; + struct bnxt_msix_entry *ent = NULL; + + if (!ulp->msix_requested) + return; + + ops = rtnl_dereference(ulp->ulp_ops); + if (!ops || !ops->ulp_irq_restart) + return; + + if (!err) { + ent = kcalloc(ulp->msix_requested, sizeof(*ent), + GFP_KERNEL); + if (!ent) + return; + bnxt_fill_msix_vecs(bp, ent); + } + ops->ulp_irq_restart(ulp->handle, ent); + kfree(ent); + } +} + void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl) { u16 event_id = le16_to_cpu(cmpl->event_id); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index d2471067dc37..df48ac71729f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -1,6 +1,6 @@ /* Broadcom NetXtreme-C/E network driver. * - * Copyright (c) 2016 Broadcom Limited + * Copyright (c) 2016-2018 Broadcom Limited * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,6 +20,12 @@ struct hwrm_async_event_cmpl; struct bnxt; +struct bnxt_msix_entry { + u32 vector; + u32 ring_idx; + u32 db_offset; +}; + struct bnxt_ulp_ops { /* async_notifier() cannot sleep (in BH context) */ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *); @@ -27,12 +33,8 @@ struct bnxt_ulp_ops { void (*ulp_start)(void *); void (*ulp_sriov_config)(void *, int); void (*ulp_shutdown)(void *); -}; - -struct bnxt_msix_entry { - u32 vector; - u32 ring_idx; - u32 db_offset; + void (*ulp_irq_stop)(void *); + void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *); }; struct bnxt_fw_msg { @@ -49,6 +51,7 @@ struct bnxt_ulp { unsigned long *async_events_bmap; u16 max_async_event_id; u16 msix_requested; + u16 msix_base; atomic_t ref_count; }; @@ -60,6 +63,7 @@ struct bnxt_en_dev { #define BNXT_EN_FLAG_ROCEV2_CAP 0x2 #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \ BNXT_EN_FLAG_ROCEV2_CAP) + #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4 const struct bnxt_en_ops *en_ops; struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP]; }; @@ -84,11 +88,15 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) return false; } +int bnxt_get_ulp_msix_num(struct bnxt *bp); +int bnxt_get_ulp_msix_base(struct bnxt *bp); void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id); void bnxt_ulp_stop(struct bnxt *bp); void bnxt_ulp_start(struct bnxt *bp); void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); void bnxt_ulp_shutdown(struct bnxt *bp); +void bnxt_ulp_irq_stop(struct bnxt *bp); +void bnxt_ulp_irq_restart(struct bnxt *bp, int err); void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl); struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 26290403f38f..38f635cf8408 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -64,6 +64,31 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) return rc; } +static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + u16 *max_mtu) +{ + struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_input req = {0}; + u16 mtu; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); + req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); + + mutex_lock(&bp->hwrm_cmd_lock); + + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + mtu = le16_to_cpu(resp->max_mtu_configured); + if (!mtu) + *max_mtu = BNXT_MAX_MTU; + else + *max_mtu = mtu; + } + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_vf_rep_open(struct net_device *dev) { struct bnxt_vf_rep *vf_rep = netdev_priv(dev); @@ -365,6 +390,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, struct net_device *dev) { struct net_device *pf_dev = bp->dev; + u16 max_mtu; dev->netdev_ops = &bnxt_vf_rep_netdev_ops; dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; @@ -380,6 +406,10 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, dev->perm_addr); ether_addr_copy(dev->dev_addr, dev->perm_addr); + /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */ + if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu)) + dev->max_mtu = max_mtu; + dev->min_mtu = ETH_ZLEN; } static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[]) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index b1e35a9accf1..0445f2c0c629 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -603,6 +603,8 @@ static int bcmgenet_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct bcmgenet_rx_ring *ring; + unsigned int i; ec->tx_max_coalesced_frames = bcmgenet_tdma_ring_readl(priv, DESC_INDEX, @@ -613,15 +615,57 @@ static int bcmgenet_get_coalesce(struct net_device *dev, ec->rx_coalesce_usecs = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000; + for (i = 0; i < priv->hw_params->rx_queues; i++) { + ring = &priv->rx_rings[i]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + } + ring = &priv->rx_rings[DESC_INDEX]; + ec->use_adaptive_rx_coalesce |= ring->dim.use_dim; + return 0; } +static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring, + u32 usecs, u32 pkts) +{ + struct bcmgenet_priv *priv = ring->priv; + unsigned int i = ring->index; + u32 reg; + + bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH); + + reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); + reg &= ~DMA_TIMEOUT_MASK; + reg |= DIV_ROUND_UP(usecs * 1000, 8192); + bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); +} + +static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, + struct ethtool_coalesce *ec) +{ + struct net_dim_cq_moder moder; + u32 usecs, pkts; + + ring->rx_coalesce_usecs = ec->rx_coalesce_usecs; + ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames; + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) { + moder = net_dim_get_def_profile(ring->dim.dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + ring->dim.use_dim = ec->use_adaptive_rx_coalesce; + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + static int bcmgenet_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned int i; - u32 reg; /* Base system clock is 125Mhz, DMA timeout is this reference clock * divided by 1024, which yields roughly 8.192us, our maximum value @@ -641,7 +685,8 @@ static int bcmgenet_set_coalesce(struct net_device *dev, * transmitted, or when the ring is empty. */ if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high || - ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low) + ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low || + ec->use_adaptive_tx_coalesce) return -EOPNOTSUPP; /* Program all TX queues with the same values, as there is no @@ -655,25 +700,9 @@ static int bcmgenet_set_coalesce(struct net_device *dev, ec->tx_max_coalesced_frames, DMA_MBUF_DONE_THRESH); - for (i = 0; i < priv->hw_params->rx_queues; i++) { - bcmgenet_rdma_ring_writel(priv, i, - ec->rx_max_coalesced_frames, - DMA_MBUF_DONE_THRESH); - - reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i); - reg &= ~DMA_TIMEOUT_MASK; - reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); - bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i); - } - - bcmgenet_rdma_ring_writel(priv, DESC_INDEX, - ec->rx_max_coalesced_frames, - DMA_MBUF_DONE_THRESH); - - reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT); - reg &= ~DMA_TIMEOUT_MASK; - reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192); - bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT); + for (i = 0; i < priv->hw_params->rx_queues; i++) + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec); + bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec); return 0; } @@ -1321,7 +1350,7 @@ static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, dma_unmap_addr_set(cb, dma_addr, 0); } - return 0; + return NULL; } /* Simple helper to free a receive control block's resources */ @@ -1460,7 +1489,7 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *new_skb; u16 offset; u8 ip_proto; - u16 ip_ver; + __be16 ip_ver; u32 tx_csum_info; if (unlikely(skb_headroom(skb) < sizeof(*status))) { @@ -1480,12 +1509,12 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, status = (struct status_64 *)skb->data; if (skb->ip_summed == CHECKSUM_PARTIAL) { - ip_ver = htons(skb->protocol); + ip_ver = skb->protocol; switch (ip_ver) { - case ETH_P_IP: + case htons(ETH_P_IP): ip_proto = ip_hdr(skb)->protocol; break; - case ETH_P_IPV6: + case htons(ETH_P_IPV6): ip_proto = ipv6_hdr(skb)->nexthdr; break; default: @@ -1501,7 +1530,8 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, */ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) { tx_csum_info |= STATUS_TX_CSUM_LV; - if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP) + if (ip_proto == IPPROTO_UDP && + ip_ver == htons(ETH_P_IP)) tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP; } else { tx_csum_info = 0; @@ -1713,6 +1743,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, unsigned long dma_flag; int len; unsigned int rxpktprocessed = 0, rxpkttoprocess; + unsigned int bytes_processed = 0; unsigned int p_index, mask; unsigned int discards; unsigned int chksum_ok = 0; @@ -1832,6 +1863,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, len -= ETH_FCS_LEN; } + bytes_processed += len; + /*Finish setting up the received SKB and send it to the kernel*/ skb->protocol = eth_type_trans(skb, priv->dev); ring->packets++; @@ -1854,6 +1887,9 @@ next: bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX); } + ring->dim.bytes = bytes_processed; + ring->dim.packets = rxpktprocessed; + return rxpktprocessed; } @@ -1862,6 +1898,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) { struct bcmgenet_rx_ring *ring = container_of(napi, struct bcmgenet_rx_ring, napi); + struct net_dim_sample dim_sample; unsigned int work_done; work_done = bcmgenet_desc_rx(ring, budget); @@ -1871,9 +1908,29 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget) ring->int_enable(ring); } + if (ring->dim.use_dim) { + net_dim_sample(ring->dim.event_ctr, ring->dim.packets, + ring->dim.bytes, &dim_sample); + net_dim(&ring->dim.dim, dim_sample); + } + return work_done; } +static void bcmgenet_dim_work(struct work_struct *work) +{ + struct net_dim *dim = container_of(work, struct net_dim, work); + struct bcmgenet_net_dim *ndim = + container_of(dim, struct bcmgenet_net_dim, dim); + struct bcmgenet_rx_ring *ring = + container_of(ndim, struct bcmgenet_rx_ring, dim); + struct net_dim_cq_moder cur_profile = + net_dim_get_profile(dim->mode, dim->profile_ix); + + bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts); + dim->state = NET_DIM_START_MEASURE; +} + /* Assign skb to RX DMA descriptor. */ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, struct bcmgenet_rx_ring *ring) @@ -2022,6 +2079,37 @@ static void init_umac(struct bcmgenet_priv *priv) dev_dbg(kdev, "done init umac\n"); } +static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring, + void (*cb)(struct work_struct *work)) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + + INIT_WORK(&dim->dim.work, cb); + dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE; + dim->event_ctr = 0; + dim->packets = 0; + dim->bytes = 0; +} + +static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring) +{ + struct bcmgenet_net_dim *dim = &ring->dim; + struct net_dim_cq_moder moder; + u32 usecs, pkts; + + usecs = ring->rx_coalesce_usecs; + pkts = ring->rx_max_coalesced_frames; + + /* If DIM was enabled, re-apply default parameters */ + if (dim->use_dim) { + moder = net_dim_get_def_profile(dim->dim.mode); + usecs = moder.usec; + pkts = moder.pkts; + } + + bcmgenet_set_rx_coalesce(ring, usecs, pkts); +} + /* Initialize a Tx ring along with corresponding hardware registers */ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, unsigned int index, unsigned int size, @@ -2111,13 +2199,15 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, if (ret) return ret; + bcmgenet_init_dim(ring, bcmgenet_dim_work); + bcmgenet_init_rx_coalesce(ring); + /* Initialize Rx NAPI */ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, NAPI_POLL_WEIGHT); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); - bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); bcmgenet_rdma_ring_writel(priv, index, ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH), DMA_RING_BUF_SIZE); @@ -2276,10 +2366,12 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) for (i = 0; i < priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); } ring = &priv->rx_rings[DESC_INDEX]; napi_disable(&ring->napi); + cancel_work_sync(&ring->dim.dim.work); } static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv) @@ -2557,6 +2649,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) continue; rx_ring = &priv->rx_rings[index]; + rx_ring->dim.event_ctr++; if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring->int_disable(rx_ring); @@ -2601,6 +2694,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) if (status & UMAC_IRQ_RXDMA_DONE) { rx_ring = &priv->rx_rings[DESC_INDEX]; + rx_ring->dim.event_ctr++; if (likely(napi_schedule_prep(&rx_ring->napi))) { rx_ring->int_disable(rx_ring); @@ -3351,6 +3445,7 @@ static int bcmgenet_probe(struct platform_device *pdev) struct net_device *dev; const void *macaddr; struct resource *r; + unsigned int i; int err = -EIO; const char *phy_mode_str; @@ -3479,6 +3574,11 @@ static int bcmgenet_probe(struct platform_device *pdev) netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); + /* Set default coalescing parameters */ + for (i = 0; i < priv->hw_params->rx_queues; i++) + priv->rx_rings[i].rx_max_coalesced_frames = 1; + priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1; + /* libphy will determine the link state */ netif_carrier_off(dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 3c50431ccd2a..b773bc07edf7 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -16,6 +16,7 @@ #include <linux/mii.h> #include <linux/if_vlan.h> #include <linux/phy.h> +#include <linux/net_dim.h> /* total number of Buffer Descriptors, same for Rx/Tx */ #define TOTAL_DESC 256 @@ -572,6 +573,14 @@ struct bcmgenet_tx_ring { struct bcmgenet_priv *priv; }; +struct bcmgenet_net_dim { + u16 use_dim; + u16 event_ctr; + unsigned long packets; + unsigned long bytes; + struct net_dim dim; +}; + struct bcmgenet_rx_ring { struct napi_struct napi; /* Rx NAPI struct */ unsigned long bytes; @@ -586,6 +595,9 @@ struct bcmgenet_rx_ring { unsigned int cb_ptr; /* Rx ring initial CB ptr */ unsigned int end_ptr; /* Rx ring end CB ptr */ unsigned int old_discards; + struct bcmgenet_net_dim dim; + u32 rx_max_coalesced_frames; + u32 rx_coalesce_usecs; void (*int_enable)(struct bcmgenet_rx_ring *); void (*int_disable)(struct bcmgenet_rx_ring *); struct bcmgenet_priv *priv; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index ecdef42f0ae6..ef4a0c326736 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -63,24 +63,24 @@ MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver"); /* 1 normal messages, 0 quiet .. 7 verbose. */ static int debug = 1; -module_param(debug, int, S_IRUGO); +module_param(debug, int, 0444); MODULE_PARM_DESC(debug, "Debug messages"); #ifdef CONFIG_SBMAC_COALESCE static int int_pktcnt_tx = 255; -module_param(int_pktcnt_tx, int, S_IRUGO); +module_param(int_pktcnt_tx, int, 0444); MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count"); static int int_timeout_tx = 255; -module_param(int_timeout_tx, int, S_IRUGO); +module_param(int_timeout_tx, int, 0444); MODULE_PARM_DESC(int_timeout_tx, "TX timeout value"); static int int_pktcnt_rx = 64; -module_param(int_pktcnt_rx, int, S_IRUGO); +module_param(int_pktcnt_rx, int, 0444); MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count"); static int int_timeout_rx = 64; -module_param(int_timeout_rx, int, S_IRUGO); +module_param(int_timeout_rx, int, 0444); MODULE_PARM_DESC(int_timeout_rx, "RX timeout value"); #endif diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index c1841db1b500..9f59b1270a7c 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us) tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); - usleep_range(10, 20); + udelay(10); timeout_us -= (timeout_us > 10) ? 10 : timeout_us; } @@ -8733,14 +8733,15 @@ static void tg3_free_consistent(struct tg3 *tp) tg3_mem_rx_release(tp); tg3_mem_tx_release(tp); - /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */ - tg3_full_lock(tp, 0); + /* tp->hw_stats can be referenced safely: + * 1. under rtnl_lock + * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set. + */ if (tp->hw_stats) { dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats), tp->hw_stats, tp->stats_mapping); tp->hw_stats = NULL; } - tg3_full_unlock(tp); } /* @@ -10799,11 +10800,11 @@ static ssize_t tg3_show_temp(struct device *dev, } -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL, +static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL, TG3_TEMP_SENSOR_OFFSET); -static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL, +static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL, TG3_TEMP_CAUTION_OFFSET); -static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL, +static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL, TG3_TEMP_MAX_OFFSET); static struct attribute *tg3_attrs[] = { @@ -14178,7 +14179,7 @@ static void tg3_get_stats64(struct net_device *dev, struct tg3 *tp = netdev_priv(dev); spin_lock_bh(&tp->lock); - if (!tp->hw_stats) { + if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) { *stats = tp->net_stats_prev; spin_unlock_bh(&tp->lock); return; |