summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c130
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h13
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c341
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c56
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h289
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c61
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c144
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h22
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c145
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h12
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c10
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
24 files changed, 1018 insertions, 310 deletions
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 3fc549b88c43..4a75b1de22e0 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -574,6 +574,34 @@ static int bcm_sysport_set_wol(struct net_device *dev,
return 0;
}
+static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
+ u32 usecs, u32 pkts)
+{
+ u32 reg;
+
+ reg = rdma_readl(priv, RDMA_MBDONE_INTR);
+ reg &= ~(RDMA_INTR_THRESH_MASK |
+ RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
+ reg |= pkts;
+ reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
+ rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+}
+
+static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
+ struct ethtool_coalesce *ec)
+{
+ struct bcm_sysport_priv *priv = ring->priv;
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
+ reg &= ~(RING_INTR_THRESH_MASK |
+ RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
+ reg |= ec->tx_max_coalesced_frames;
+ reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
+ RING_TIMEOUT_SHIFT;
+ tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
+}
+
static int bcm_sysport_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
@@ -589,6 +617,7 @@ static int bcm_sysport_get_coalesce(struct net_device *dev,
ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
+ ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
return 0;
}
@@ -597,8 +626,9 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct net_dim_cq_moder moder;
+ u32 usecs, pkts;
unsigned int i;
- u32 reg;
/* Base system clock is 125Mhz, DMA timeout is this reference clock
* divided by 1024, which yield roughly 8.192 us, our maximum value has
@@ -611,26 +641,28 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
return -EINVAL;
if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
- (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
+ (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
+ ec->use_adaptive_tx_coalesce)
return -EINVAL;
- for (i = 0; i < dev->num_tx_queues; i++) {
- reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(i));
- reg &= ~(RING_INTR_THRESH_MASK |
- RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
- reg |= ec->tx_max_coalesced_frames;
- reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
- RING_TIMEOUT_SHIFT;
- tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(i));
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
+
+ priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
+ priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
+ usecs = priv->rx_coalesce_usecs;
+ pkts = priv->rx_max_coalesced_frames;
+
+ if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
+ moder = net_dim_get_def_profile(priv->dim.dim.mode);
+ usecs = moder.usec;
+ pkts = moder.pkts;
}
- reg = rdma_readl(priv, RDMA_MBDONE_INTR);
- reg &= ~(RDMA_INTR_THRESH_MASK |
- RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
- reg |= ec->rx_max_coalesced_frames;
- reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192) <<
- RDMA_TIMEOUT_SHIFT;
- rdma_writel(priv, reg, RDMA_MBDONE_INTR);
+ priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
+
+ /* Apply desired coalescing parameters */
+ bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
return 0;
}
@@ -709,6 +741,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
struct net_device *ndev = priv->netdev;
unsigned int processed = 0, to_process;
+ unsigned int processed_bytes = 0;
struct bcm_sysport_cb *cb;
struct sk_buff *skb;
unsigned int p_index;
@@ -800,6 +833,7 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
*/
skb_pull(skb, sizeof(*rsb) + 2);
len -= (sizeof(*rsb) + 2);
+ processed_bytes += len;
/* UniMAC may forward CRC */
if (priv->crc_fwd) {
@@ -824,6 +858,9 @@ next:
priv->rx_read_ptr = 0;
}
+ priv->dim.packets = processed;
+ priv->dim.bytes = processed_bytes;
+
return processed;
}
@@ -972,6 +1009,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_priv *priv =
container_of(napi, struct bcm_sysport_priv, napi);
+ struct net_dim_sample dim_sample;
unsigned int work_done = 0;
work_done = bcm_sysport_desc_rx(priv, budget);
@@ -994,6 +1032,12 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
}
+ if (priv->dim.use_dim) {
+ net_dim_sample(priv->dim.event_ctr, priv->dim.packets,
+ priv->dim.bytes, &dim_sample);
+ net_dim(&priv->dim.dim, dim_sample);
+ }
+
return work_done;
}
@@ -1012,6 +1056,20 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
+static void bcm_sysport_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct bcm_sysport_net_dim *ndim =
+ container_of(dim, struct bcm_sysport_net_dim, dim);
+ struct bcm_sysport_priv *priv =
+ container_of(ndim, struct bcm_sysport_priv, dim);
+ struct net_dim_cq_moder cur_profile =
+ net_dim_get_profile(dim->mode, dim->profile_ix);
+
+ bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
+ dim->state = NET_DIM_START_MEASURE;
+}
+
/* RX and misc interrupt routine */
static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
{
@@ -1030,6 +1088,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
}
if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+ priv->dim.event_ctr++;
if (likely(napi_schedule_prep(&priv->napi))) {
/* disable RX interrupts */
intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
@@ -1354,6 +1413,37 @@ out:
phy_print_status(phydev);
}
+static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
+ void (*cb)(struct work_struct *work))
+{
+ struct bcm_sysport_net_dim *dim = &priv->dim;
+
+ INIT_WORK(&dim->dim.work, cb);
+ dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ dim->event_ctr = 0;
+ dim->packets = 0;
+ dim->bytes = 0;
+}
+
+static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_net_dim *dim = &priv->dim;
+ struct net_dim_cq_moder moder;
+ u32 usecs, pkts;
+
+ usecs = priv->rx_coalesce_usecs;
+ pkts = priv->rx_max_coalesced_frames;
+
+ /* If DIM was enabled, re-apply default parameters */
+ if (dim->use_dim) {
+ moder = net_dim_get_def_profile(dim->dim.mode);
+ usecs = moder.usec;
+ pkts = moder.pkts;
+ }
+
+ bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
+}
+
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
unsigned int index)
{
@@ -1594,8 +1684,6 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
rdma_writel(priv, 0, RDMA_END_ADDR_HI);
rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
- rdma_writel(priv, 1, RDMA_MBDONE_INTR);
-
netif_dbg(priv, hw, priv->netdev,
"RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
priv->num_rx_bds, priv->rx_bds);
@@ -1763,6 +1851,8 @@ static void bcm_sysport_netif_start(struct net_device *dev)
struct bcm_sysport_priv *priv = netdev_priv(dev);
/* Enable NAPI */
+ bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
+ bcm_sysport_init_rx_coalesce(priv);
napi_enable(&priv->napi);
/* Enable RX interrupt and TX ring full interrupt */
@@ -1948,6 +2038,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
/* stop all software from updating hardware */
netif_tx_stop_all_queues(dev);
napi_disable(&priv->napi);
+ cancel_work_sync(&priv->dim.dim.work);
phy_stop(dev->phydev);
/* mask all interrupts */
@@ -2267,6 +2358,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* libphy will adjust the link state accordingly */
netif_carrier_off(dev);
+ priv->rx_max_coalesced_frames = 1;
u64_stats_init(&priv->syncp);
priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 19c91c76e327..d6e5d0cbf3a3 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -12,6 +12,7 @@
#define __BCM_SYSPORT_H
#include <linux/if_vlan.h>
+#include <linux/net_dim.h>
/* Receive/transmit descriptor format */
#define DESC_ADDR_HI_STATUS_LEN 0x00
@@ -695,6 +696,14 @@ struct bcm_sysport_hw_params {
unsigned int num_rx_desc_words;
};
+struct bcm_sysport_net_dim {
+ u16 use_dim;
+ u16 event_ctr;
+ unsigned long packets;
+ unsigned long bytes;
+ struct net_dim dim;
+};
+
/* Software view of the TX ring */
struct bcm_sysport_tx_ring {
spinlock_t lock; /* Ring lock for tx reclaim/xmit */
@@ -743,6 +752,10 @@ struct bcm_sysport_priv {
unsigned int rx_read_ptr;
unsigned int rx_c_index;
+ struct bcm_sysport_net_dim dim;
+ u32 rx_max_coalesced_frames;
+ u32 rx_coalesce_usecs;
+
/* PHY device */
struct device_node *phy_dn;
phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 8eef9fb6b1fe..e6ea8e61f96d 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -533,7 +533,8 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
int i;
for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
- int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;
+ u32 ctl1 = le32_to_cpu(dma_desc[i].ctl1);
+ unsigned int len = ctl1 & BGMAC_DESC_CTL1_LEN;
slot = &ring->slots[i];
dev_kfree_skb(slot->skb);
@@ -1190,7 +1191,7 @@ static int bgmac_open(struct net_device *net_dev)
bgmac_chip_init(bgmac);
err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, net_dev);
+ net_dev->name, net_dev);
if (err < 0) {
dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
bgmac_dma_cleanup(bgmac);
@@ -1492,6 +1493,8 @@ int bgmac_enet_probe(struct bgmac *bgmac)
struct net_device *net_dev = bgmac->net_dev;
int err;
+ bgmac_chip_intrs_off(bgmac);
+
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
dev_set_drvdata(bgmac->dev, bgmac);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4040d846da8e..40d02fec2747 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -479,9 +479,9 @@ struct bgmac_rx_header {
struct bgmac {
union {
struct {
- void *base;
- void *idm_base;
- void *nicpm_base;
+ void __iomem *base;
+ void __iomem *idm_base;
+ void __iomem *nicpm_base;
} plat;
struct {
struct bcma_device *core;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 5e34b34f7740..9ffc4a8c5fc7 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -87,7 +87,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
static int disable_msi = 0;
-module_param(disable_msi, int, S_IRUGO);
+module_param(disable_msi, int, 0444);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
typedef enum {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 352beff796ae..d847e1b9c37b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -166,6 +166,12 @@ do { \
#define REG_RD8(bp, offset) readb(REG_ADDR(bp, offset))
#define REG_RD16(bp, offset) readw(REG_ADDR(bp, offset))
+#define REG_WR_RELAXED(bp, offset, val) \
+ writel_relaxed((u32)val, REG_ADDR(bp, offset))
+
+#define REG_WR16_RELAXED(bp, offset, val) \
+ writew_relaxed((u16)val, REG_ADDR(bp, offset))
+
#define REG_WR(bp, offset, val) writel((u32)val, REG_ADDR(bp, offset))
#define REG_WR8(bp, offset, val) writeb((u8)val, REG_ADDR(bp, offset))
#define REG_WR16(bp, offset, val) writew((u16)val, REG_ADDR(bp, offset))
@@ -758,10 +764,8 @@ struct bnx2x_fastpath {
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
-#define DOORBELL(bp, cid, val) \
- do { \
- writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
- } while (0)
+#define DOORBELL_RELAXED(bp, cid, val) \
+ writel_relaxed((u32)(val), (bp)->doorbells + ((bp)->db_size * (cid)))
/* TX CSUM helpers */
#define SKB_CS_OFF(skb) (offsetof(struct tcphdr, check) - \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index d7c98e807ca8..95871576ab92 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4153,9 +4153,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
txdata->tx_db.data.prod += nbd;
- barrier();
+ /* make sure descriptor update is observed by HW */
+ wmb();
- DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+ DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
mmiowb();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a5265e1344f1..a8ce5c55bbb0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -522,8 +522,8 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
wmb();
for (i = 0; i < sizeof(rx_prods)/4; i++)
- REG_WR(bp, fp->ustorm_rx_prods_offset + i*4,
- ((u32 *)&rx_prods)[i]);
+ REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
+ ((u32 *)&rx_prods)[i]);
mmiowb(); /* keep prod updates ordered */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 1e33abde4a3e..da18aa239acb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2591,8 +2591,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
wmb();
txdata->tx_db.data.prod += 2;
- barrier();
- DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+ /* make sure descriptor update is observed by the HW */
+ wmb();
+ DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
mmiowb();
barrier();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b8388e93520a..c766ae23bc74 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -97,29 +97,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
int bnx2x_num_queues;
-module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
+module_param_named(num_queues, bnx2x_num_queues, int, 0444);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, S_IRUGO);
+module_param(disable_tpa, int, 0444);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
-module_param(int_mode, int, S_IRUGO);
+module_param(int_mode, int, 0444);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, S_IRUGO);
+module_param(dropless_fc, int, 0444);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, S_IRUGO);
+module_param(mrrs, int, 0444);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, S_IRUGO);
+module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, " Default debug msglevel");
static struct workqueue_struct *bnx2x_wq;
@@ -3817,8 +3817,8 @@ static void bnx2x_sp_prod_update(struct bnx2x *bp)
*/
mb();
- REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
- bp->spq_prod_idx);
+ REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+ bp->spq_prod_idx);
mmiowb();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 76a4668c50fe..8e0a317b31f7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -170,7 +170,9 @@ static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
wmb();
/* Trigger the PF FW */
- writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+ writeb_relaxed(1, &zone_data->trigger.vf_pf_channel.addr_valid);
+
+ mmiowb();
/* Wait for PF to complete */
while ((tout >= 0) && (!*done)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c7e5e6f09647..1991f0c7bc0e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1922,7 +1922,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
/* Sync BD data before updating doorbell */
wmb();
- bnxt_db_write(bp, db, DB_KEY_TX | prod);
+ bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod);
}
cpr->cp_raw_cons = raw_cons;
@@ -2317,6 +2317,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
if (rc)
return rc;
+ ring->grp_idx = i;
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
mem_size = rxr->rx_agg_bmap_size / 8;
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
@@ -2389,6 +2390,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
if (rc)
return rc;
+ ring->grp_idx = txr->bnapi->index;
if (bp->tx_push_size) {
dma_addr_t mapping;
@@ -2442,8 +2444,10 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
- int i, rc;
+ int i, rc, ulp_base_vec, ulp_msix;
+ ulp_msix = bnxt_get_ulp_msix_num(bp);
+ ulp_base_vec = bnxt_get_ulp_msix_base(bp);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
@@ -2458,6 +2462,11 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
rc = bnxt_alloc_ring(bp, ring);
if (rc)
return rc;
+
+ if (ulp_msix && i >= ulp_base_vec)
+ ring->map_idx = i + ulp_msix;
+ else
+ ring->map_idx = i;
}
return 0;
}
@@ -3059,12 +3068,21 @@ static void bnxt_free_stats(struct bnxt *bp)
u32 size, i;
struct pci_dev *pdev = bp->pdev;
+ bp->flags &= ~BNXT_FLAG_PORT_STATS;
+ bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
+
if (bp->hw_rx_port_stats) {
dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
bp->hw_rx_port_stats,
bp->hw_rx_port_stats_map);
bp->hw_rx_port_stats = NULL;
- bp->flags &= ~BNXT_FLAG_PORT_STATS;
+ }
+
+ if (bp->hw_rx_port_stats_ext) {
+ dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
+ bp->hw_rx_port_stats_ext,
+ bp->hw_rx_port_stats_ext_map);
+ bp->hw_rx_port_stats_ext = NULL;
}
if (!bp->bnapi)
@@ -3120,6 +3138,21 @@ static int bnxt_alloc_stats(struct bnxt *bp)
bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
sizeof(struct rx_port_stats) + 512;
bp->flags |= BNXT_FLAG_PORT_STATS;
+
+ /* Display extended statistics only if FW supports it */
+ if (bp->hwrm_spec_code < 0x10804 ||
+ bp->hwrm_spec_code == 0x10900)
+ return 0;
+
+ bp->hw_rx_port_stats_ext =
+ dma_zalloc_coherent(&pdev->dev,
+ sizeof(struct rx_port_stats_ext),
+ &bp->hw_rx_port_stats_ext_map,
+ GFP_KERNEL);
+ if (!bp->hw_rx_port_stats_ext)
+ return 0;
+
+ bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
}
return 0;
}
@@ -3357,6 +3390,15 @@ static void bnxt_disable_int(struct bnxt *bp)
}
}
+static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
+{
+ struct bnxt_napi *bnapi = bp->bnapi[n];
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = &bnapi->cp_ring;
+ return cpr->cp_ring_struct.map_idx;
+}
+
static void bnxt_disable_int_sync(struct bnxt *bp)
{
int i;
@@ -3364,8 +3406,11 @@ static void bnxt_disable_int_sync(struct bnxt *bp)
atomic_inc(&bp->intr_sem);
bnxt_disable_int(bp);
- for (i = 0; i < bp->cp_nr_rings; i++)
- synchronize_irq(bp->irq_tbl[i].vector);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
+
+ synchronize_irq(bp->irq_tbl[map_idx].vector);
+ }
}
static void bnxt_enable_int(struct bnxt *bp)
@@ -3398,7 +3443,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int i, intr_process, rc, tmo_count;
struct input *req = msg;
u32 *data = msg;
- __le32 *resp_len, *valid;
+ __le32 *resp_len;
+ u8 *valid;
u16 cp_ring_id, len = 0;
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
@@ -3450,6 +3496,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
i = 0;
tmo_count = timeout * 40;
+ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
@@ -3462,9 +3509,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
le16_to_cpu(req->req_type));
return -1;
}
+ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+ HWRM_RESP_LEN_SFT;
+ valid = bp->hwrm_cmd_resp_addr + len - 1;
} else {
/* Check if response len is updated */
- resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
for (i = 0; i < tmo_count; i++) {
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
@@ -3480,10 +3529,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
return -1;
}
- /* Last word of resp contains valid bit */
- valid = bp->hwrm_cmd_resp_addr + len - 4;
+ /* Last byte of resp contains valid bit */
+ valid = bp->hwrm_cmd_resp_addr + len - 1;
for (i = 0; i < 5; i++) {
- if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
+ /* make sure we read from updated DMA memory */
+ dma_rmb();
+ if (*valid)
break;
udelay(1);
}
@@ -3496,6 +3547,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
}
}
+ /* Zero valid bit for compatibility. Valid bit in an older spec
+ * may become a new field in a newer spec. We must make sure that
+ * a new field not implemented by old spec will read zero.
+ */
+ *valid = 0;
rc = le16_to_cpu(resp->error_code);
if (rc && !silent)
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
@@ -3577,9 +3633,13 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
FUNC_DRV_RGTR_REQ_ENABLES_VER);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
- req.ver_maj = DRV_VER_MAJ;
- req.ver_min = DRV_VER_MIN;
- req.ver_upd = DRV_VER_UPD;
+ req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
+ req.ver_maj_8b = DRV_VER_MAJ;
+ req.ver_min_8b = DRV_VER_MIN;
+ req.ver_upd_8b = DRV_VER_UPD;
+ req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
+ req.ver_min = cpu_to_le16(DRV_VER_MIN);
+ req.ver_upd = cpu_to_le16(DRV_VER_UPD);
if (BNXT_PF(bp)) {
u32 data[8];
@@ -3998,6 +4058,13 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
return rc;
}
+static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
+{
+ if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
+ return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
+ return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
+}
+
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{
unsigned int ring = 0, grp_idx;
@@ -4053,8 +4120,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- req.flags |=
- cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
+ req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -4135,9 +4201,13 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
- if (resp->flags &
- cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
+ u32 flags = le32_to_cpu(resp->flags);
+
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)
bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
+ if (flags &
+ VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
+ bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4204,12 +4274,12 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
struct bnxt_ring_struct *ring,
- u32 ring_type, u32 map_index,
- u32 stats_ctx_id)
+ u32 ring_type, u32 map_index)
{
int rc = 0, err = 0;
struct hwrm_ring_alloc_input req = {0};
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_ring_grp_info *grp_info;
u16 ring_id;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
@@ -4231,10 +4301,10 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
case HWRM_RING_ALLOC_TX:
req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
/* Association of transmit ring with completion ring */
- req.cmpl_ring_id =
- cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
+ grp_info = &bp->grp_info[ring->grp_idx];
+ req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
req.length = cpu_to_le32(bp->tx_ring_mask + 1);
- req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
+ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
req.queue_id = cpu_to_le16(ring->queue_id);
break;
case HWRM_RING_ALLOC_RX:
@@ -4321,10 +4391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
+ u32 map_idx = ring->map_idx;
- cpr->cp_doorbell = bp->bar1 + i * 0x80;
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
- INVALID_STATS_CTX_ID);
+ cpr->cp_doorbell = bp->bar1 + map_idx * 0x80;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL,
+ map_idx);
if (rc)
goto err_out;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
@@ -4340,11 +4411,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
- u32 map_idx = txr->bnapi->index;
- u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
+ u32 map_idx = i;
rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
- map_idx, fw_stats_ctx);
+ map_idx);
if (rc)
goto err_out;
txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
@@ -4356,7 +4426,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
u32 map_idx = rxr->bnapi->index;
rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
- map_idx, INVALID_STATS_CTX_ID);
+ map_idx);
if (rc)
goto err_out;
rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
@@ -4369,13 +4439,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring =
&rxr->rx_agg_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
+ u32 grp_idx = ring->grp_idx;
u32 map_idx = grp_idx + bp->rx_nr_rings;
rc = hwrm_ring_alloc_send_msg(bp, ring,
HWRM_RING_ALLOC_AGG,
- map_idx,
- INVALID_STATS_CTX_ID);
+ map_idx);
if (rc)
goto err_out;
@@ -4669,20 +4738,59 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
}
+static int bnxt_cp_rings_in_use(struct bnxt *bp)
+{
+ int cp = bp->cp_nr_rings;
+ int ulp_msix, ulp_base;
+
+ ulp_msix = bnxt_get_ulp_msix_num(bp);
+ if (ulp_msix) {
+ ulp_base = bnxt_get_ulp_msix_base(bp);
+ cp += ulp_msix;
+ if ((ulp_base + ulp_msix) > cp)
+ cp = ulp_base + ulp_msix;
+ }
+ return cp;
+}
+
+static bool bnxt_need_reserve_rings(struct bnxt *bp)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int cp = bnxt_cp_rings_in_use(bp);
+ int rx = bp->rx_nr_rings;
+ int vnic = 1, grp = rx;
+
+ if (bp->hwrm_spec_code < 0x10601)
+ return false;
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+ return true;
+
+ if (bp->flags & BNXT_FLAG_RFS)
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
+ hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
+ return true;
+ return false;
+}
+
static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
bool shared);
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int cp = bnxt_cp_rings_in_use(bp);
int tx = bp->tx_nr_rings;
int rx = bp->rx_nr_rings;
- int cp = bp->cp_nr_rings;
int grp, rx_rings, rc;
bool sh = false;
int vnic = 1;
- if (bp->hwrm_spec_code < 0x10601)
+ if (!bnxt_need_reserve_rings(bp))
return 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -4691,14 +4799,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
-
grp = bp->rx_nr_rings;
- if (tx == hw_resc->resv_tx_rings &&
- (!(bp->flags & BNXT_FLAG_NEW_RM) ||
- (rx == hw_resc->resv_rx_rings &&
- grp == hw_resc->resv_hw_ring_grps &&
- cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
- return 0;
rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
if (rc)
@@ -4742,30 +4843,6 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
return rc;
}
-static bool bnxt_need_reserve_rings(struct bnxt *bp)
-{
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
- int rx = bp->rx_nr_rings;
- int vnic = 1;
-
- if (bp->hwrm_spec_code < 0x10601)
- return false;
-
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
- return true;
-
- if (bp->flags & BNXT_FLAG_RFS)
- vnic = rx + 1;
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
- rx <<= 1;
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
- (hw_resc->resv_rx_rings != rx ||
- hw_resc->resv_cp_rings != bp->cp_nr_rings ||
- hw_resc->resv_vnics != vnic))
- return true;
- return false;
-}
-
static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
int ring_grps, int cp_rings, int vnics)
{
@@ -5055,7 +5132,7 @@ func_qcfg_exit:
return rc;
}
-static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
{
struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_resource_qcaps_input req = {0};
@@ -5072,6 +5149,10 @@ static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
goto hwrm_func_resc_qcaps_exit;
}
+ hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
+ if (!all)
+ goto hwrm_func_resc_qcaps_exit;
+
hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
@@ -5178,7 +5259,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc)
return rc;
if (bp->hwrm_spec_code >= 0x10803) {
- rc = bnxt_hwrm_func_resc_qcaps(bp);
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
if (!rc)
bp->flags |= BNXT_FLAG_NEW_RM;
}
@@ -5326,6 +5407,21 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
return rc;
}
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
+{
+ struct hwrm_port_qstats_ext_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
+ req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_port_cnt) {
@@ -5418,10 +5514,9 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
- req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
+ req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
if (size == 128)
- req.cache_linesize =
- FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
+ req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
@@ -5740,6 +5835,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
}
for (i = 0; i < bp->cp_nr_rings; i++) {
+ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
char *attr;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -5749,9 +5845,9 @@ static void bnxt_setup_msix(struct bnxt *bp)
else
attr = "tx";
- snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
- i);
- bp->irq_tbl[i].handler = bnxt_msix;
+ snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
+ attr, i);
+ bp->irq_tbl[map_idx].handler = bnxt_msix;
}
}
@@ -5812,7 +5908,7 @@ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
bp->hw_resc.max_cp_rings = max;
}
-static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -5824,12 +5920,44 @@ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
bp->hw_resc.max_irqs = max_irqs;
}
+int bnxt_get_avail_msix(struct bnxt *bp, int num)
+{
+ int max_cp = bnxt_get_max_func_cp_rings(bp);
+ int max_irq = bnxt_get_max_func_irqs(bp);
+ int total_req = bp->cp_nr_rings + num;
+ int max_idx, avail_msix;
+
+ max_idx = min_t(int, bp->total_irqs, max_cp);
+ avail_msix = max_idx - bp->cp_nr_rings;
+ if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
+ return avail_msix;
+
+ if (max_irq < total_req) {
+ num = max_irq - bp->cp_nr_rings;
+ if (num <= 0)
+ return 0;
+ }
+ return num;
+}
+
+static int bnxt_get_num_msix(struct bnxt *bp)
+{
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ return bnxt_get_max_func_irqs(bp);
+
+ return bnxt_cp_rings_in_use(bp);
+}
+
static int bnxt_init_msix(struct bnxt *bp)
{
- int i, total_vecs, rc = 0, min = 1;
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
struct msix_entry *msix_ent;
- total_vecs = bnxt_get_max_func_irqs(bp);
+ total_vecs = bnxt_get_num_msix(bp);
+ max = bnxt_get_max_func_irqs(bp);
+ if (total_vecs > max)
+ total_vecs = max;
+
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
@@ -5843,7 +5971,8 @@ static int bnxt_init_msix(struct bnxt *bp)
min = 2;
total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
- if (total_vecs < 0) {
+ ulp_msix = bnxt_get_ulp_msix_num(bp);
+ if (total_vecs < 0 || total_vecs < ulp_msix) {
rc = -ENODEV;
goto msix_setup_exit;
}
@@ -5856,7 +5985,7 @@ static int bnxt_init_msix(struct bnxt *bp)
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
- total_vecs, min == 1);
+ total_vecs - ulp_msix, min == 1);
if (rc)
goto msix_setup_exit;
@@ -5920,9 +6049,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
-static int bnxt_reserve_rings(struct bnxt *bp)
+int bnxt_reserve_rings(struct bnxt *bp)
{
- int orig_cp = bp->hw_resc.resv_cp_rings;
int tcs = netdev_get_num_tc(bp->dev);
int rc;
@@ -5934,9 +6062,12 @@ static int bnxt_reserve_rings(struct bnxt *bp)
netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
return rc;
}
- if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ (bnxt_get_num_msix(bp) != bp->total_irqs)) {
+ bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
+ bnxt_ulp_irq_restart(bp, rc);
if (rc)
return rc;
}
@@ -5963,7 +6094,9 @@ static void bnxt_free_irq(struct bnxt *bp)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
- irq = &bp->irq_tbl[i];
+ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
+
+ irq = &bp->irq_tbl[map_idx];
if (irq->requested) {
if (irq->have_cpumask) {
irq_set_affinity_hint(irq->vector, NULL);
@@ -5982,14 +6115,25 @@ static int bnxt_request_irq(struct bnxt *bp)
int i, j, rc = 0;
unsigned long flags = 0;
#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
+ struct cpu_rmap *rmap;
#endif
+ rc = bnxt_setup_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+ rc);
+ return rc;
+ }
+#ifdef CONFIG_RFS_ACCEL
+ rmap = bp->dev->rx_cpu_rmap;
+#endif
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
flags = IRQF_SHARED;
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_irq *irq = &bp->irq_tbl[i];
+ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
+ struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
+
#ifdef CONFIG_RFS_ACCEL
if (rmap && bp->bnapi[i]->rx_ring) {
rc = irq_cpu_rmap_add(rmap, irq->vector);
@@ -6709,13 +6853,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_reserve_rings(bp);
if (rc)
return rc;
-
- rc = bnxt_setup_int_mode(bp);
- if (rc) {
- netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
- rc);
- return rc;
- }
}
if ((bp->flags & BNXT_FLAG_RFS) &&
!(bp->flags & BNXT_FLAG_USING_MSIX)) {
@@ -7478,8 +7615,10 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp);
+ bnxt_hwrm_port_qstats_ext(bp);
+ }
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
int rc;
@@ -8193,6 +8332,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_set_vf_rate = bnxt_set_vf_bw,
.ndo_set_vf_link_state = bnxt_set_vf_link_state,
.ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
+ .ndo_set_vf_trust = bnxt_set_vf_trust,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bnxt_poll_controller,
@@ -8390,9 +8530,15 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues();
- /* Reduce default rings to reduce memory usage on multi-port cards */
- if (bp->port_count > 1)
- dflt_rings = min_t(int, dflt_rings, 4);
+ /* Reduce default rings on multi-port cards so that total default
+ * rings do not exceed CPU count.
+ */
+ if (bp->port_count > 1) {
+ int max_rings =
+ max_t(int, num_online_cpus() / bp->port_count, 1);
+
+ dflt_rings = min_t(int, dflt_rings, max_rings);
+ }
rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc)
return rc;
@@ -8431,16 +8577,15 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
int rc;
ASSERT_RTNL();
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
- return 0;
-
bnxt_hwrm_func_qcaps(bp);
if (netif_running(bp->dev))
__bnxt_close_nic(bp, true, false);
+ bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
+ bnxt_ulp_irq_restart(bp, rc);
if (netif_running(bp->dev)) {
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5e3d62189cab..3d55d3b56865 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,11 +12,11 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.9.0"
+#define DRV_MODULE_VERSION "1.9.1"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 9
-#define DRV_VER_UPD 0
+#define DRV_VER_UPD 1
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
@@ -573,6 +573,10 @@ struct bnxt_ring_struct {
void **vmem;
u16 fw_ring_id; /* Ring id filled by Chimp FW */
+ union {
+ u16 grp_idx;
+ u16 map_idx; /* Used by cmpl rings */
+ };
u8 queue_id;
};
@@ -786,6 +790,7 @@ struct bnxt_hw_resc {
u16 min_tx_rings;
u16 max_tx_rings;
u16 resv_tx_rings;
+ u16 max_tx_sch_inputs;
u16 min_rx_rings;
u16 max_rx_rings;
u16 resv_rx_rings;
@@ -815,6 +820,7 @@ struct bnxt_vf_info {
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
+#define BNXT_VF_TRUST 0x10
u32 func_flags; /* func cfg flags */
u32 min_tx_rate;
u32 max_tx_rate;
@@ -1151,7 +1157,9 @@ struct bnxt {
#define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_DIM 0x2000000
+ #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_NEW_RM 0x8000000
+ #define BNXT_FLAG_PORT_STATS_EXT 0x10000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1271,8 +1279,10 @@ struct bnxt {
struct rx_port_stats *hw_rx_port_stats;
struct tx_port_stats *hw_tx_port_stats;
+ struct rx_port_stats_ext *hw_rx_port_stats_ext;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
+ dma_addr_t hw_rx_port_stats_ext_map;
int hw_port_stats_size;
u16 hwrm_max_req_len;
@@ -1383,6 +1393,9 @@ struct bnxt {
((offsetof(struct tx_port_stats, counter) + \
sizeof(struct rx_port_stats) + 512) / 8)
+#define BNXT_RX_STATS_EXT_OFFSET(counter) \
+ (offsetof(struct rx_port_stats_ext, counter) / 8)
+
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
@@ -1402,6 +1415,15 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
}
+/* For TX and RX ring doorbells with no ordering guarantee*/
+static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db,
+ u32 val)
+{
+ writel_relaxed(val, db);
+ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
+ writel_relaxed(val, db);
+}
+
/* For TX and RX ring doorbells */
static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val)
{
@@ -1432,13 +1454,17 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
+unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
+int bnxt_get_avail_msix(struct bnxt *bp, int num);
+int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index d2e0af960bf5..69efde785f23 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -34,7 +34,8 @@ struct bnxt_cos2bw_cfg {
};
#define BNXT_LLQ(q_profile) \
- ((q_profile) == QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS)
+ ((q_profile) == \
+ QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE)
#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1801582076be..8d8ccd67e0e2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -137,6 +137,9 @@ reset_coalesce:
#define BNXT_TX_STATS_ENTRY(counter) \
{ BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
+#define BNXT_RX_STATS_EXT_ENTRY(counter) \
+ { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+
static const struct {
long offset;
char string[ETH_GSTRING_LEN];
@@ -181,6 +184,8 @@ static const struct {
BNXT_RX_STATS_ENTRY(rx_bytes),
BNXT_RX_STATS_ENTRY(rx_runt_bytes),
BNXT_RX_STATS_ENTRY(rx_runt_frames),
+ BNXT_RX_STATS_ENTRY(rx_stat_discard),
+ BNXT_RX_STATS_ENTRY(rx_stat_err),
BNXT_TX_STATS_ENTRY(tx_64b_frames),
BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
@@ -216,9 +221,24 @@ static const struct {
BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
BNXT_TX_STATS_ENTRY(tx_total_collisions),
BNXT_TX_STATS_ENTRY(tx_bytes),
+ BNXT_TX_STATS_ENTRY(tx_xthol_frames),
+ BNXT_TX_STATS_ENTRY(tx_stat_discard),
+ BNXT_TX_STATS_ENTRY(tx_stat_error),
+};
+
+static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+} bnxt_port_stats_ext_arr[] = {
+ BNXT_RX_STATS_EXT_ENTRY(link_down_events),
+ BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
+ BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
+ BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
+ BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
};
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
+#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
@@ -227,6 +247,9 @@ static int bnxt_get_num_stats(struct bnxt *bp)
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
+ num_stats += BNXT_NUM_PORT_STATS_EXT;
+
return num_stats;
}
@@ -274,6 +297,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
bnxt_port_stats_arr[i].offset));
}
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
+
+ for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) {
+ buf[j] = le64_to_cpu(*(port_stats_ext +
+ bnxt_port_stats_ext_arr[i].offset));
+ }
+ }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -334,6 +365,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
buf += ETH_GSTRING_LEN;
}
}
+ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
+ for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) {
+ strcpy(buf, bnxt_port_stats_ext_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
break;
case ETH_SS_TEST:
if (bp->num_tests)
@@ -388,15 +425,26 @@ static void bnxt_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_rx_rings, max_tx_rings, tcs;
+ int max_tx_sch_inputs;
+
+ /* Get the most up-to-date max_tx_sch_inputs. */
+ if (bp->flags & BNXT_FLAG_NEW_RM)
+ bnxt_hwrm_func_resc_qcaps(bp, false);
+ max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
+ if (max_tx_sch_inputs)
+ max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
max_rx_rings = 0;
max_tx_rings = 0;
}
+ if (max_tx_sch_inputs)
+ max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
tcs = netdev_get_num_tc(dev);
if (tcs > 1)
@@ -2535,16 +2583,20 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return -EOPNOTSUPP;
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP);
- if (!rc)
+ if (!rc) {
netdev_info(dev, "Reset request successful. Reload driver to complete reset\n");
+ *flags = 0;
+ }
} else if (*flags == ETH_RESET_AP) {
/* This feature is not supported in older firmware versions */
if (bp->hwrm_spec_code < 0x10803)
return -EOPNOTSUPP;
rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_AP);
- if (!rc)
+ if (!rc) {
netdev_info(dev, "Reset Application Processor request successful.\n");
+ *flags = 0;
+ }
} else {
rc = -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 82d17f8cc0db..0fe0ea8dce6c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -188,6 +188,7 @@ struct cmd_nums {
#define HWRM_STAT_CTX_FREE 0xb1UL
#define HWRM_STAT_CTX_QUERY 0xb2UL
#define HWRM_STAT_CTX_CLR_STATS 0xb3UL
+ #define HWRM_PORT_QSTATS_EXT 0xb4UL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_SET_TIME 0xc8UL
@@ -199,6 +200,7 @@ struct cmd_nums {
#define HWRM_REJECT_FWD_RESP 0xd1UL
#define HWRM_FWD_RESP 0xd2UL
#define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
+ #define HWRM_OEM_CMD 0xd4UL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
@@ -271,6 +273,7 @@ struct cmd_nums {
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
+ #define HWRM_PCIE_QSTATS 0x204UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -341,9 +344,9 @@ struct hwrm_err_output {
#define HWRM_RESP_VALID_KEY 1
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 9
-#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 0
-#define HWRM_VERSION_STR "1.9.0.0"
+#define HWRM_VERSION_UPDATE 1
+#define HWRM_VERSION_RSVD 15
+#define HWRM_VERSION_STR "1.9.1.15"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -616,30 +619,6 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change {
#define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
-/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
-struct hwrm_async_event_cmpl_pf_drvr_unload {
- __le16 type;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
- __le16 event_id;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
- __le32 event_data2;
- u8 opaque_v;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
- u8 timestamp_lo;
- __le16 timestamp_hi;
- __le32 event_data1;
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
-};
-
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
@@ -854,6 +833,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
#define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
#define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
+ #define FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED 0x10000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -966,10 +946,14 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
#define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
- u8 cache_linesize;
- #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
- #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
- #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
+ u8 options;
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_MASK 0xfcUL
+ #define FUNC_QCFG_RESP_OPTIONS_RSVD_SFT 2
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
@@ -1124,10 +1108,14 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
#define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
#define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
- u8 cache_linesize;
- #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
- #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
- #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
+ u8 options;
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_MASK 0x3UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SFT 0
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64 0x0UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128 0x1UL
+ #define FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_LAST FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_MASK 0xfcUL
+ #define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 2
__le16 num_mcast_filters;
};
@@ -1248,7 +1236,7 @@ struct hwrm_func_vf_vnic_ids_query_output {
u8 valid;
};
-/* hwrm_func_drv_rgtr_input (size:832b/104B) */
+/* hwrm_func_drv_rgtr_input (size:896b/112B) */
struct hwrm_func_drv_rgtr_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1256,8 +1244,9 @@ struct hwrm_func_drv_rgtr_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1277,14 +1266,18 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
#define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
#define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
u8 unused_0[3];
__le32 timestamp;
u8 unused_1[4];
__le32 vf_req_fwd[8];
__le32 async_event_fwd[8];
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
};
/* hwrm_func_drv_rgtr_output (size:128b/16B) */
@@ -1379,7 +1372,7 @@ struct hwrm_func_drv_qver_input {
u8 unused_0[2];
};
-/* hwrm_func_drv_qver_output (size:128b/16B) */
+/* hwrm_func_drv_qver_output (size:192b/24B) */
struct hwrm_func_drv_qver_output {
__le16 error_code;
__le16 req_type;
@@ -1398,11 +1391,15 @@ struct hwrm_func_drv_qver_output {
#define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
#define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
#define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
- u8 ver_maj;
- u8 ver_min;
- u8 ver_upd;
+ u8 ver_maj_8b;
+ u8 ver_min_8b;
+ u8 ver_upd_8b;
u8 unused_0[2];
u8 valid;
+ __le16 ver_maj;
+ __le16 ver_min;
+ __le16 ver_upd;
+ __le16 ver_patch;
};
/* hwrm_func_resource_qcaps_input (size:192b/24B) */
@@ -1416,7 +1413,7 @@ struct hwrm_func_resource_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_resource_qcaps_output (size:384b/48B) */
+/* hwrm_func_resource_qcaps_output (size:448b/56B) */
struct hwrm_func_resource_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1425,9 +1422,10 @@ struct hwrm_func_resource_qcaps_output {
__le16 max_vfs;
__le16 max_msix;
__le16 vf_reservation_strategy;
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
- #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC 0x2UL
+ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
__le16 min_rsscos_ctx;
__le16 max_rsscos_ctx;
__le16 min_cmpl_rings;
@@ -1444,7 +1442,8 @@ struct hwrm_func_resource_qcaps_output {
__le16 max_stat_ctx;
__le16 min_hw_ring_grps;
__le16 max_hw_ring_grps;
- u8 unused_0;
+ __le16 max_tx_scheduler_inputs;
+ u8 unused_0[7];
u8 valid;
};
@@ -1627,6 +1626,16 @@ struct hwrm_port_phy_cfg_output {
u8 valid;
};
+/* hwrm_port_phy_cfg_cmd_err (size:64b/8B) */
+struct hwrm_port_phy_cfg_cmd_err {
+ u8 code;
+ #define PORT_PHY_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_ILLEGAL_SPEED 0x1UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_RETRY 0x2UL
+ #define PORT_PHY_CFG_CMD_ERR_CODE_LAST PORT_PHY_CFG_CMD_ERR_CODE_RETRY
+ u8 unused_0[7];
+};
+
/* hwrm_port_phy_qcfg_input (size:192b/24B) */
struct hwrm_port_phy_qcfg_input {
__le16 req_type;
@@ -2030,6 +2039,33 @@ struct hwrm_port_qstats_output {
u8 valid;
};
+/* hwrm_port_qstats_ext_input (size:320b/40B) */
+struct hwrm_port_qstats_ext_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 unused_0[2];
+ __le64 tx_stat_host_addr;
+ __le64 rx_stat_host_addr;
+};
+
+/* hwrm_port_qstats_ext_output (size:128b/16B) */
+struct hwrm_port_qstats_ext_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 tx_stat_size;
+ __le16 rx_stat_size;
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
struct hwrm_port_lpbk_qstats_input {
__le16 req_type;
@@ -2552,7 +2588,11 @@ struct hwrm_queue_qportcfg_input {
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
- u8 unused_0[2];
+ u8 drv_qmap_cap;
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_DISABLED 0x0UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED 0x1UL
+ #define QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_LAST QUEUE_QPORTCFG_REQ_DRV_QMAP_CAP_ENABLED
+ u8 unused_0;
};
/* hwrm_queue_qportcfg_output (size:256b/32B) */
@@ -2571,52 +2611,68 @@ struct hwrm_queue_qportcfg_output {
u8 queue_cos2bw_cfg_allowed;
u8 queue_id0;
u8 queue_id0_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
u8 queue_id1;
u8 queue_id1_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
u8 queue_id2;
u8 queue_id2_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
u8 queue_id3;
u8 queue_id3_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
u8 queue_id4;
u8 queue_id4_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
u8 queue_id5;
u8 queue_id5_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
u8 queue_id6;
u8 queue_id6_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
u8 queue_id7;
u8 queue_id7_service_profile;
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY_ROCE_CNP 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
u8 valid;
};
@@ -5180,6 +5236,29 @@ struct hwrm_stat_ctx_clr_stats_output {
u8 valid;
};
+/* hwrm_pcie_qstats_input (size:256b/32B) */
+struct hwrm_pcie_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 pcie_stat_size;
+ u8 unused_0[6];
+ __le64 pcie_stat_host_addr;
+};
+
+/* hwrm_pcie_qstats_output (size:128b/16B) */
+struct hwrm_pcie_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 pcie_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
/* tx_port_stats (size:3264b/408B) */
struct tx_port_stats {
__le64 tx_64b_frames;
@@ -5305,6 +5384,30 @@ struct rx_port_stats {
__le64 rx_stat_err;
};
+/* rx_port_stats_ext (size:320b/40B) */
+struct rx_port_stats_ext {
+ __le64 link_down_events;
+ __le64 continuous_pause_events;
+ __le64 resume_pause_events;
+ __le64 continuous_roce_pause_events;
+ __le64 resume_roce_pause_events;
+};
+
+/* pcie_ctx_hw_stats (size:768b/96B) */
+struct pcie_ctx_hw_stats {
+ __le64 pcie_pl_signal_integrity;
+ __le64 pcie_dl_signal_integrity;
+ __le64 pcie_tl_signal_integrity;
+ __le64 pcie_link_integrity;
+ __le64 pcie_tx_traffic_rate;
+ __le64 pcie_rx_traffic_rate;
+ __le64 pcie_tx_dllp_statistics;
+ __le64 pcie_rx_dllp_statistics;
+ __le64 pcie_equalization_time;
+ __le32 pcie_ltssm_histogram[4];
+ __le64 pcie_recovery_histogram;
+};
+
/* hwrm_fw_reset_input (size:192b/24B) */
struct hwrm_fw_reset_input {
__le16 req_type;
@@ -5313,14 +5416,15 @@ struct hwrm_fw_reset_input {
__le16 target_id;
__le64 resp_addr;
u8 embedded_proc_type;
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT 0x7UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT
u8 selfrst_status;
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
@@ -6253,8 +6357,7 @@ struct hwrm_selftest_exec_input {
#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
#define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL
#define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL
- u8 pcie_lane_num;
- u8 unused_0[6];
+ u8 unused_0[7];
};
/* hwrm_selftest_exec_output (size:128b/16B) */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d87faad901fe..f952963d594e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -121,6 +121,23 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
return rc;
}
+int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_vf_info *vf;
+
+ if (bnxt_vf_ndo_prep(bp, vf_id))
+ return -EINVAL;
+
+ vf = &bp->pf.vf[vf_id];
+ if (trusted)
+ vf->flags |= BNXT_VF_TRUST;
+ else
+ vf->flags &= ~BNXT_VF_TRUST;
+
+ return 0;
+}
+
int bnxt_get_vf_config(struct net_device *dev, int vf_id,
struct ifla_vf_info *ivi)
{
@@ -147,6 +164,7 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
else
ivi->qos = 0;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
+ ivi->trusted = !!(vf->flags & BNXT_VF_TRUST);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
else if (vf->flags & BNXT_VF_LINK_UP)
@@ -492,18 +510,16 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
}
mutex_unlock(&bp->hwrm_cmd_lock);
if (pf->active_vfs) {
- u16 n = 1;
+ u16 n = pf->active_vfs;
- if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
- n = pf->active_vfs;
-
- hw_resc->max_tx_rings -= vf_tx_rings * n;
- hw_resc->max_rx_rings -= vf_rx_rings * n;
- hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
- hw_resc->max_cp_rings -= vf_cp_rings * n;
+ hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
+ hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
+ hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
+ n;
+ hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
hw_resc->max_rsscos_ctxs -= pf->active_vfs;
- hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
- hw_resc->max_vnics -= vf_vnics * n;
+ hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
+ hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
rc = pf->active_vfs;
}
@@ -886,18 +902,19 @@ exec_fwd_resp_exit:
return rc;
}
-static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
{
u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
struct hwrm_func_vf_cfg_input *req =
(struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
- /* Only allow VF to set a valid MAC address if the PF assigned MAC
- * address is zero
+ /* Allow VF to set a valid MAC address, if trust is set to on or
+ * if the PF assigned MAC address is zero
*/
if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
if (is_valid_ether_addr(req->dflt_mac_addr) &&
- !is_valid_ether_addr(vf->mac_addr)) {
+ ((vf->flags & BNXT_VF_TRUST) ||
+ (!is_valid_ether_addr(vf->mac_addr)))) {
ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
}
@@ -913,11 +930,17 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
(struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
bool mac_ok = false;
- /* VF MAC address must first match PF MAC address, if it is valid.
+ if (!is_valid_ether_addr((const u8 *)req->l2_addr))
+ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+
+ /* Allow VF to set a valid MAC address, if trust is set to on.
+ * Or VF MAC address must first match MAC address in PF's context.
* Otherwise, it must match the VF MAC address if firmware spec >=
* 1.2.2
*/
- if (is_valid_ether_addr(vf->mac_addr)) {
+ if (vf->flags & BNXT_VF_TRUST) {
+ mac_ok = true;
+ } else if (is_valid_ether_addr(vf->mac_addr)) {
if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
mac_ok = true;
} else if (is_valid_ether_addr(vf->vf_mac_addr)) {
@@ -951,7 +974,9 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
sizeof(phy_qcfg_resp));
mutex_unlock(&bp->hwrm_cmd_lock);
+ phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
/* if physical link is down, force link up on VF */
@@ -993,7 +1018,7 @@ static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
switch (req_type) {
case HWRM_FUNC_VF_CFG:
- rc = bnxt_vf_store_mac(bp, vf);
+ rc = bnxt_vf_configure_mac(bp, vf);
break;
case HWRM_CFA_L2_FILTER_ALLOC:
rc = bnxt_vf_validate_set_mac(bp, vf);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index dbc8d977fc5a..d10f6f6c7860 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016-2017 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -17,6 +17,7 @@ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
int bnxt_set_vf_bw(struct net_device *, int, int, int);
int bnxt_set_vf_link_state(struct net_device *, int, int);
int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
+int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 997e10e8b863..347e4f946eb2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2016 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -101,13 +101,28 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
return 0;
}
+static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ int num_msix, idx, i;
+
+ num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
+ idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ for (i = 0; i < num_msix; i++) {
+ ent[i].vector = bp->irq_tbl[idx + i].vector;
+ ent[i].ring_idx = idx + i;
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
+}
+
static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_msix_entry *ent, int num_msix)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
int max_idx, max_cp_rings;
- int avail_msix, i, idx;
+ int avail_msix, idx;
+ int rc = 0;
ASSERT_RTNL();
if (ulp_id != BNXT_ROCE_ULP)
@@ -116,23 +131,47 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
return -ENODEV;
+ if (edev->ulp_tbl[ulp_id].msix_requested)
+ return -EAGAIN;
+
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
- avail_msix = max_idx - bp->cp_nr_rings;
+ avail_msix = bnxt_get_avail_msix(bp, num_msix);
if (!avail_msix)
return -ENOMEM;
if (avail_msix > num_msix)
avail_msix = num_msix;
- idx = max_idx - avail_msix;
- for (i = 0; i < avail_msix; i++) {
- ent[i].vector = bp->irq_tbl[idx + i].vector;
- ent[i].ring_idx = idx + i;
- ent[i].db_offset = (idx + i) * 0x80;
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ idx = bp->cp_nr_rings;
+ } else {
+ max_idx = min_t(int, bp->total_irqs, max_cp_rings);
+ idx = max_idx - avail_msix;
}
- bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
- bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
+ edev->ulp_tbl[ulp_id].msix_base = idx;
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ if (bp->total_irqs < (idx + avail_msix)) {
+ if (netif_running(dev)) {
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
+ } else {
+ rc = bnxt_reserve_rings(bp);
+ }
+ }
+ if (rc) {
+ edev->ulp_tbl[ulp_id].msix_requested = 0;
+ return -EAGAIN;
+ }
+
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ }
+ bnxt_fill_msix_vecs(bp, ent);
+ bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
+ edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return avail_msix;
}
@@ -146,11 +185,40 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
if (ulp_id != BNXT_ROCE_ULP)
return -EINVAL;
+ if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ return 0;
+
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
edev->ulp_tbl[ulp_id].msix_requested = 0;
- bnxt_set_max_func_irqs(bp, bp->total_irqs);
+ bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
+ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
+ if (netif_running(dev)) {
+ bnxt_close_nic(bp, true, false);
+ bnxt_open_nic(bp, true, false);
+ }
+ return 0;
+}
+
+int bnxt_get_ulp_msix_num(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
+ }
+ return 0;
+}
+
+int bnxt_get_ulp_msix_base(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ }
return 0;
}
@@ -287,6 +355,58 @@ void bnxt_ulp_shutdown(struct bnxt *bp)
}
}
+void bnxt_ulp_irq_stop(struct bnxt *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+
+ if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ return;
+
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+
+ if (!ulp->msix_requested)
+ return;
+
+ ops = rtnl_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_irq_stop)
+ return;
+ ops->ulp_irq_stop(ulp->handle);
+ }
+}
+
+void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+
+ if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ return;
+
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ struct bnxt_msix_entry *ent = NULL;
+
+ if (!ulp->msix_requested)
+ return;
+
+ ops = rtnl_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_irq_restart)
+ return;
+
+ if (!err) {
+ ent = kcalloc(ulp->msix_requested, sizeof(*ent),
+ GFP_KERNEL);
+ if (!ent)
+ return;
+ bnxt_fill_msix_vecs(bp, ent);
+ }
+ ops->ulp_irq_restart(ulp->handle, ent);
+ kfree(ent);
+ }
+}
+
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index d2471067dc37..df48ac71729f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2016 Broadcom Limited
+ * Copyright (c) 2016-2018 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,6 +20,12 @@
struct hwrm_async_event_cmpl;
struct bnxt;
+struct bnxt_msix_entry {
+ u32 vector;
+ u32 ring_idx;
+ u32 db_offset;
+};
+
struct bnxt_ulp_ops {
/* async_notifier() cannot sleep (in BH context) */
void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
@@ -27,12 +33,8 @@ struct bnxt_ulp_ops {
void (*ulp_start)(void *);
void (*ulp_sriov_config)(void *, int);
void (*ulp_shutdown)(void *);
-};
-
-struct bnxt_msix_entry {
- u32 vector;
- u32 ring_idx;
- u32 db_offset;
+ void (*ulp_irq_stop)(void *);
+ void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
struct bnxt_fw_msg {
@@ -49,6 +51,7 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
+ u16 msix_base;
atomic_t ref_count;
};
@@ -60,6 +63,7 @@ struct bnxt_en_dev {
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
+ #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
};
@@ -84,11 +88,15 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
return false;
}
+int bnxt_get_ulp_msix_num(struct bnxt *bp);
+int bnxt_get_ulp_msix_base(struct bnxt *bp);
void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
void bnxt_ulp_shutdown(struct bnxt *bp);
+void bnxt_ulp_irq_stop(struct bnxt *bp);
+void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b1e35a9accf1..264fb37dd341 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -603,6 +603,8 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rx_ring *ring;
+ unsigned int i;
ec->tx_max_coalesced_frames =
bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
@@ -613,15 +615,57 @@ static int bcmgenet_get_coalesce(struct net_device *dev,
ec->rx_coalesce_usecs =
bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
+ for (i = 0; i < priv->hw_params->rx_queues; i++) {
+ ring = &priv->rx_rings[i];
+ ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
+ }
+ ring = &priv->rx_rings[DESC_INDEX];
+ ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
+
return 0;
}
+static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
+ u32 usecs, u32 pkts)
+{
+ struct bcmgenet_priv *priv = ring->priv;
+ unsigned int i = ring->index;
+ u32 reg;
+
+ bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
+ reg &= ~DMA_TIMEOUT_MASK;
+ reg |= DIV_ROUND_UP(usecs * 1000, 8192);
+ bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
+}
+
+static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
+ struct ethtool_coalesce *ec)
+{
+ struct net_dim_cq_moder moder;
+ u32 usecs, pkts;
+
+ ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
+ ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
+ usecs = ring->rx_coalesce_usecs;
+ pkts = ring->rx_max_coalesced_frames;
+
+ if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
+ moder = net_dim_get_def_profile(ring->dim.dim.mode);
+ usecs = moder.usec;
+ pkts = moder.pkts;
+ }
+
+ ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
+ bcmgenet_set_rx_coalesce(ring, usecs, pkts);
+}
+
static int bcmgenet_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned int i;
- u32 reg;
/* Base system clock is 125Mhz, DMA timeout is this reference clock
* divided by 1024, which yields roughly 8.192us, our maximum value
@@ -641,7 +685,8 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
* transmitted, or when the ring is empty.
*/
if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
- ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
+ ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low ||
+ ec->use_adaptive_tx_coalesce)
return -EOPNOTSUPP;
/* Program all TX queues with the same values, as there is no
@@ -655,25 +700,9 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
ec->tx_max_coalesced_frames,
DMA_MBUF_DONE_THRESH);
- for (i = 0; i < priv->hw_params->rx_queues; i++) {
- bcmgenet_rdma_ring_writel(priv, i,
- ec->rx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
-
- reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
- reg &= ~DMA_TIMEOUT_MASK;
- reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
- bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
- }
-
- bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
- ec->rx_max_coalesced_frames,
- DMA_MBUF_DONE_THRESH);
-
- reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
- reg &= ~DMA_TIMEOUT_MASK;
- reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
- bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
+ bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
return 0;
}
@@ -1321,7 +1350,7 @@ static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
dma_unmap_addr_set(cb, dma_addr, 0);
}
- return 0;
+ return NULL;
}
/* Simple helper to free a receive control block's resources */
@@ -1713,6 +1742,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
unsigned long dma_flag;
int len;
unsigned int rxpktprocessed = 0, rxpkttoprocess;
+ unsigned int bytes_processed = 0;
unsigned int p_index, mask;
unsigned int discards;
unsigned int chksum_ok = 0;
@@ -1832,6 +1862,8 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
len -= ETH_FCS_LEN;
}
+ bytes_processed += len;
+
/*Finish setting up the received SKB and send it to the kernel*/
skb->protocol = eth_type_trans(skb, priv->dev);
ring->packets++;
@@ -1854,6 +1886,9 @@ next:
bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
}
+ ring->dim.bytes = bytes_processed;
+ ring->dim.packets = rxpktprocessed;
+
return rxpktprocessed;
}
@@ -1862,6 +1897,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
{
struct bcmgenet_rx_ring *ring = container_of(napi,
struct bcmgenet_rx_ring, napi);
+ struct net_dim_sample dim_sample;
unsigned int work_done;
work_done = bcmgenet_desc_rx(ring, budget);
@@ -1871,9 +1907,29 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
ring->int_enable(ring);
}
+ if (ring->dim.use_dim) {
+ net_dim_sample(ring->dim.event_ctr, ring->dim.packets,
+ ring->dim.bytes, &dim_sample);
+ net_dim(&ring->dim.dim, dim_sample);
+ }
+
return work_done;
}
+static void bcmgenet_dim_work(struct work_struct *work)
+{
+ struct net_dim *dim = container_of(work, struct net_dim, work);
+ struct bcmgenet_net_dim *ndim =
+ container_of(dim, struct bcmgenet_net_dim, dim);
+ struct bcmgenet_rx_ring *ring =
+ container_of(ndim, struct bcmgenet_rx_ring, dim);
+ struct net_dim_cq_moder cur_profile =
+ net_dim_get_profile(dim->mode, dim->profile_ix);
+
+ bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
+ dim->state = NET_DIM_START_MEASURE;
+}
+
/* Assign skb to RX DMA descriptor. */
static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
struct bcmgenet_rx_ring *ring)
@@ -2022,6 +2078,37 @@ static void init_umac(struct bcmgenet_priv *priv)
dev_dbg(kdev, "done init umac\n");
}
+static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
+ void (*cb)(struct work_struct *work))
+{
+ struct bcmgenet_net_dim *dim = &ring->dim;
+
+ INIT_WORK(&dim->dim.work, cb);
+ dim->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ dim->event_ctr = 0;
+ dim->packets = 0;
+ dim->bytes = 0;
+}
+
+static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
+{
+ struct bcmgenet_net_dim *dim = &ring->dim;
+ struct net_dim_cq_moder moder;
+ u32 usecs, pkts;
+
+ usecs = ring->rx_coalesce_usecs;
+ pkts = ring->rx_max_coalesced_frames;
+
+ /* If DIM was enabled, re-apply default parameters */
+ if (dim->use_dim) {
+ moder = net_dim_get_def_profile(dim->dim.mode);
+ usecs = moder.usec;
+ pkts = moder.pkts;
+ }
+
+ bcmgenet_set_rx_coalesce(ring, usecs, pkts);
+}
+
/* Initialize a Tx ring along with corresponding hardware registers */
static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
unsigned int index, unsigned int size,
@@ -2111,13 +2198,15 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
if (ret)
return ret;
+ bcmgenet_init_dim(ring, bcmgenet_dim_work);
+ bcmgenet_init_rx_coalesce(ring);
+
/* Initialize Rx NAPI */
netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
NAPI_POLL_WEIGHT);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
- bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
bcmgenet_rdma_ring_writel(priv, index,
((size << DMA_RING_SIZE_SHIFT) |
RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
@@ -2276,10 +2365,12 @@ static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
for (i = 0; i < priv->hw_params->rx_queues; ++i) {
ring = &priv->rx_rings[i];
napi_disable(&ring->napi);
+ cancel_work_sync(&ring->dim.dim.work);
}
ring = &priv->rx_rings[DESC_INDEX];
napi_disable(&ring->napi);
+ cancel_work_sync(&ring->dim.dim.work);
}
static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
@@ -2557,6 +2648,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
continue;
rx_ring = &priv->rx_rings[index];
+ rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
rx_ring->int_disable(rx_ring);
@@ -2601,6 +2693,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
if (status & UMAC_IRQ_RXDMA_DONE) {
rx_ring = &priv->rx_rings[DESC_INDEX];
+ rx_ring->dim.event_ctr++;
if (likely(napi_schedule_prep(&rx_ring->napi))) {
rx_ring->int_disable(rx_ring);
@@ -3351,6 +3444,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
struct net_device *dev;
const void *macaddr;
struct resource *r;
+ unsigned int i;
int err = -EIO;
const char *phy_mode_str;
@@ -3479,6 +3573,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
+ /* Set default coalescing parameters */
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ priv->rx_rings[i].rx_max_coalesced_frames = 1;
+ priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
+
/* libphy will determine the link state */
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 3c50431ccd2a..b773bc07edf7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -16,6 +16,7 @@
#include <linux/mii.h>
#include <linux/if_vlan.h>
#include <linux/phy.h>
+#include <linux/net_dim.h>
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -572,6 +573,14 @@ struct bcmgenet_tx_ring {
struct bcmgenet_priv *priv;
};
+struct bcmgenet_net_dim {
+ u16 use_dim;
+ u16 event_ctr;
+ unsigned long packets;
+ unsigned long bytes;
+ struct net_dim dim;
+};
+
struct bcmgenet_rx_ring {
struct napi_struct napi; /* Rx NAPI struct */
unsigned long bytes;
@@ -586,6 +595,9 @@ struct bcmgenet_rx_ring {
unsigned int cb_ptr; /* Rx ring initial CB ptr */
unsigned int end_ptr; /* Rx ring end CB ptr */
unsigned int old_discards;
+ struct bcmgenet_net_dim dim;
+ u32 rx_max_coalesced_frames;
+ u32 rx_coalesce_usecs;
void (*int_enable)(struct bcmgenet_rx_ring *);
void (*int_disable)(struct bcmgenet_rx_ring *);
struct bcmgenet_priv *priv;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index ecdef42f0ae6..ef4a0c326736 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -63,24 +63,24 @@ MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
/* 1 normal messages, 0 quiet .. 7 verbose. */
static int debug = 1;
-module_param(debug, int, S_IRUGO);
+module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "Debug messages");
#ifdef CONFIG_SBMAC_COALESCE
static int int_pktcnt_tx = 255;
-module_param(int_pktcnt_tx, int, S_IRUGO);
+module_param(int_pktcnt_tx, int, 0444);
MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
static int int_timeout_tx = 255;
-module_param(int_timeout_tx, int, S_IRUGO);
+module_param(int_timeout_tx, int, 0444);
MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
static int int_pktcnt_rx = 64;
-module_param(int_pktcnt_rx, int, S_IRUGO);
+module_param(int_pktcnt_rx, int, 0444);
MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
static int int_timeout_rx = 64;
-module_param(int_timeout_rx, int, S_IRUGO);
+module_param(int_timeout_rx, int, 0444);
MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index f2593978ae75..08bbb639be1a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -10799,11 +10799,11 @@ static ssize_t tg3_show_temp(struct device *dev,
}
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
TG3_TEMP_SENSOR_OFFSET);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
+static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
TG3_TEMP_CAUTION_OFFSET);
-static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
+static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
TG3_TEMP_MAX_OFFSET);
static struct attribute *tg3_attrs[] = {