diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r-- | drivers/net/ethernet/broadcom/Kconfig | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/b44.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bcm4908_enet.c | 57 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bcmsysport.c | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bcmsysport.h | 11 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2.c | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.c | 112 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 134 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 281 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c | 39 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/cnic.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmgenet.c | 49 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/genet/bcmmii.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/broadcom/tg3.c | 22 |
18 files changed, 589 insertions, 207 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 55dfdb34e37b..f4ca0c6c0f51 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -71,13 +71,14 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" depends on HAS_IOMEM + depends on PTP_1588_CLOCK_OPTIONAL || !ARCH_BCM2835 select MII select PHYLIB select FIXED_PHY select BCM7XXX_PHY select MDIO_BCM_UNIMAC select DIMLIB - select BROADCOM_PHY if (ARCH_BCM2835 && PTP_1588_CLOCK_OPTIONAL) + select BROADCOM_PHY if ARCH_BCM2835 help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 7f876721596c..b751dc8486dc 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1680,7 +1680,7 @@ static void b44_get_stats64(struct net_device *dev, unsigned int start; do { - start = u64_stats_fetch_begin_irq(&hwstat->syncp); + start = u64_stats_fetch_begin(&hwstat->syncp); /* Convert HW stats into rtnl_link_stats64 stats. */ nstat->rx_packets = hwstat->rx_pkts; @@ -1714,7 +1714,7 @@ static void b44_get_stats64(struct net_device *dev, /* Carrier lost counter seems to be broken for some devices */ nstat->tx_carrier_errors = hwstat->tx_carrier_lost; #endif - } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); + } while (u64_stats_fetch_retry(&hwstat->syncp, start)); } @@ -2082,12 +2082,12 @@ static void b44_get_ethtool_stats(struct net_device *dev, do { data_src = &hwstat->tx_good_octets; data_dst = data; - start = u64_stats_fetch_begin_irq(&hwstat->syncp); + start = u64_stats_fetch_begin(&hwstat->syncp); for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) *data_dst++ = *data_src++; - } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start)); + } while (u64_stats_fetch_retry(&hwstat->syncp, start)); } static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index a737b1913cf9..33d86683af50 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -36,13 +36,24 @@ #define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \ ETH_FCS_LEN + 4) /* 32 */ +#define ENET_RX_SKB_BUF_SIZE (NET_SKB_PAD + NET_IP_ALIGN + \ + ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \ + ENET_MTU_MAX + ETH_FCS_LEN + 4) +#define ENET_RX_SKB_BUF_ALLOC_SIZE (SKB_DATA_ALIGN(ENET_RX_SKB_BUF_SIZE) + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define ENET_RX_BUF_DMA_OFFSET (NET_SKB_PAD + NET_IP_ALIGN) +#define ENET_RX_BUF_DMA_SIZE (ENET_RX_SKB_BUF_SIZE - ENET_RX_BUF_DMA_OFFSET) + struct bcm4908_enet_dma_ring_bd { __le32 ctl; __le32 addr; } __packed; struct bcm4908_enet_dma_ring_slot { - struct sk_buff *skb; + union { + void *buf; /* RX */ + struct sk_buff *skb; /* TX */ + }; unsigned int len; dma_addr_t dma_addr; }; @@ -260,22 +271,21 @@ static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int u32 tmp; int err; - slot->len = ENET_MTU_MAX + ENET_MAX_ETH_OVERHEAD; - - slot->skb = netdev_alloc_skb(enet->netdev, slot->len); - if (!slot->skb) + slot->buf = napi_alloc_frag(ENET_RX_SKB_BUF_ALLOC_SIZE); + if (!slot->buf) return -ENOMEM; - slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE); + slot->dma_addr = dma_map_single(dev, slot->buf + ENET_RX_BUF_DMA_OFFSET, + ENET_RX_BUF_DMA_SIZE, DMA_FROM_DEVICE); err = dma_mapping_error(dev, slot->dma_addr); if (err) { dev_err(dev, "Failed to map DMA buffer: %d\n", err); - kfree_skb(slot->skb); - slot->skb = NULL; + skb_free_frag(slot->buf); + slot->buf = NULL; return err; } - tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT; + tmp = ENET_RX_BUF_DMA_SIZE << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT; tmp |= DMA_CTL_STATUS_OWN; if (idx == enet->rx_ring.length - 1) tmp |= DMA_CTL_STATUS_WRAP; @@ -315,11 +325,11 @@ static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet) for (i = rx_ring->length - 1; i >= 0; i--) { slot = &rx_ring->slots[i]; - if (!slot->skb) + if (!slot->buf) continue; dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE); - kfree_skb(slot->skb); - slot->skb = NULL; + skb_free_frag(slot->buf); + slot->buf = NULL; } } @@ -495,6 +505,7 @@ static int bcm4908_enet_stop(struct net_device *netdev) netif_carrier_off(netdev); napi_disable(&rx_ring->napi); napi_disable(&tx_ring->napi); + netdev_reset_queue(netdev); bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring); bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring); @@ -554,6 +565,8 @@ static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_devic if (ring->write_idx + 1 == ring->length - 1) tmp |= DMA_CTL_STATUS_WRAP; + netdev_sent_queue(enet->netdev, skb->len); + buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr); buf_desc->ctl = cpu_to_le32(tmp); @@ -575,6 +588,7 @@ static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight) while (handled < weight) { struct bcm4908_enet_dma_ring_bd *buf_desc; struct bcm4908_enet_dma_ring_slot slot; + struct sk_buff *skb; u32 ctl; int len; int err; @@ -598,16 +612,24 @@ static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight) if (len < ETH_ZLEN || (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) { - kfree_skb(slot.skb); + skb_free_frag(slot.buf); enet->netdev->stats.rx_dropped++; break; } - dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE); + dma_unmap_single(dev, slot.dma_addr, ENET_RX_BUF_DMA_SIZE, DMA_FROM_DEVICE); + + skb = build_skb(slot.buf, ENET_RX_SKB_BUF_ALLOC_SIZE); + if (unlikely(!skb)) { + skb_free_frag(slot.buf); + enet->netdev->stats.rx_dropped++; + break; + } + skb_reserve(skb, ENET_RX_BUF_DMA_OFFSET); + skb_put(skb, len - ETH_FCS_LEN); + skb->protocol = eth_type_trans(skb, enet->netdev); - skb_put(slot.skb, len - ETH_FCS_LEN); - slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev); - netif_receive_skb(slot.skb); + netif_receive_skb(skb); enet->netdev->stats.rx_packets++; enet->netdev->stats.rx_bytes += len; @@ -652,6 +674,7 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight) tx_ring->read_idx = 0; } + netdev_completed_queue(enet->netdev, handled, bytes); enet->netdev->stats.tx_packets += handled; enet->netdev->stats.tx_bytes += bytes; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 425d6ccd5413..38d0cdaf22a5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -295,6 +295,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* RBUF misc statistics */ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), + /* RDMA misc statistics */ + STAT_RDMA("rdma_ovflow_cnt", mib.rdma_ovflow_cnt, RDMA_OVFL_DISC_CNTR), STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), @@ -333,6 +335,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_RXCHK: case BCM_SYSPORT_STAT_RBUF: + case BCM_SYSPORT_STAT_RDMA: case BCM_SYSPORT_STAT_SOFT: return true; default: @@ -436,6 +439,14 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) if (val == ~0) rbuf_writel(priv, 0, s->reg_offset); break; + case BCM_SYSPORT_STAT_RDMA: + if (!priv->is_lite) + continue; + + val = rdma_readl(priv, s->reg_offset); + if (val == ~0) + rdma_writel(priv, 0, s->reg_offset); + break; } j += s->stat_sizeof; @@ -457,10 +468,10 @@ static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv, for (q = 0; q < priv->netdev->num_tx_queues; q++) { ring = &priv->tx_rings[q]; do { - start = u64_stats_fetch_begin_irq(&priv->syncp); + start = u64_stats_fetch_begin(&priv->syncp); bytes = ring->bytes; packets = ring->packets; - } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + } while (u64_stats_fetch_retry(&priv->syncp, start)); *tx_bytes += bytes; *tx_packets += packets; @@ -504,9 +515,9 @@ static void bcm_sysport_get_stats(struct net_device *dev, if (s->stat_sizeof == sizeof(u64) && s->type == BCM_SYSPORT_STAT_NETDEV64) { do { - start = u64_stats_fetch_begin_irq(syncp); + start = u64_stats_fetch_begin(syncp); data[i] = *(u64 *)p; - } while (u64_stats_fetch_retry_irq(syncp, start)); + } while (u64_stats_fetch_retry(syncp, start)); } else data[i] = *(u32 *)p; j++; @@ -1878,10 +1889,10 @@ static void bcm_sysport_get_stats64(struct net_device *dev, &stats->tx_packets); do { - start = u64_stats_fetch_begin_irq(&priv->syncp); + start = u64_stats_fetch_begin(&priv->syncp); stats->rx_packets = stats64->rx_packets; stats->rx_bytes = stats64->rx_bytes; - } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + } while (u64_stats_fetch_retry(&priv->syncp, start)); } static void bcm_sysport_netif_start(struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 5af16e5f9ad0..335cf6631db5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -290,6 +290,7 @@ struct bcm_rsb { #define RDMA_WRITE_PTR_HI 0x1010 #define RDMA_WRITE_PTR_LO 0x1014 +#define RDMA_OVFL_DISC_CNTR 0x1018 #define RDMA_PROD_INDEX 0x1018 #define RDMA_PROD_INDEX_MASK 0xffff @@ -565,6 +566,7 @@ struct bcm_sysport_mib { u32 rxchk_other_pkt_disc; u32 rbuf_ovflow_cnt; u32 rbuf_err_cnt; + u32 rdma_ovflow_cnt; u32 alloc_rx_buff_failed; u32 rx_dma_failed; u32 tx_dma_failed; @@ -581,6 +583,7 @@ enum bcm_sysport_stat_type { BCM_SYSPORT_STAT_RUNT, BCM_SYSPORT_STAT_RXCHK, BCM_SYSPORT_STAT_RBUF, + BCM_SYSPORT_STAT_RDMA, BCM_SYSPORT_STAT_SOFT, }; @@ -627,6 +630,14 @@ enum bcm_sysport_stat_type { .reg_offset = ofs, \ } +#define STAT_RDMA(str, m, ofs) { \ + .stat_string = str, \ + .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ + .stat_offset = offsetof(struct bcm_sysport_priv, m), \ + .type = BCM_SYSPORT_STAT_RDMA, \ + .reg_offset = ofs, \ +} + /* TX bytes and packets */ #define NUM_SYSPORT_TXQ_STAT 2 diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index fec57f1982c8..9f473854b0f4 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -3045,7 +3045,7 @@ error: dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, DMA_FROM_DEVICE); - skb = build_skb(data, 0); + skb = slab_build_skb(data); if (!skb) { kfree(data); goto error; @@ -5415,8 +5415,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) bp->rx_buf_use_size = rx_size; /* hw alignment + build_skb() overhead*/ - bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + - NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + bp->rx_buf_size = kmalloc_size_roundup( + SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) + + NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET; bp->rx_ring_size = size; bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 51b1690fd045..5d1e4fe335aa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13671,19 +13671,20 @@ static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, return bnx2x_func_state_change(bp, &func_params); } -static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int bnx2x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); int rc; int drift_dir = 1; int val, period, period1, period2, dif, dif1, dif2; int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0; + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); - DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); + DP(BNX2X_MSG_PTP, "PTP adjfine called, ppb = %d\n", ppb); if (!netif_running(bp->dev)) { DP(BNX2X_MSG_PTP, - "PTP adjfreq called while the interface is down\n"); + "PTP adjfine called while the interface is down\n"); return -ENETDOWN; } @@ -13818,7 +13819,7 @@ void bnx2x_register_phc(struct bnx2x *bp) bp->ptp_clock_info.n_ext_ts = 0; bp->ptp_clock_info.n_per_out = 0; bp->ptp_clock_info.pps = 0; - bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; + bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine; bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9f8a6ce4b356..4c7d07c684c4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -389,6 +389,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } + if (unlikely(ipv6_hopopt_jumbo_remove(skb))) + goto tx_free; + length = skb->len; len = skb_headlen(skb); last_frag = skb_shinfo(skb)->nr_frags; @@ -5250,7 +5253,7 @@ int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) return 1; } -static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) { bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG); u16 i, j; @@ -5263,8 +5266,8 @@ static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) } } -static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, - struct bnxt_vnic_info *vnic) +static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, + struct bnxt_vnic_info *vnic) { __le16 *ring_tbl = vnic->rss_table; struct bnxt_rx_ring_info *rxr; @@ -5285,12 +5288,27 @@ static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, } } -static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) +static void +__bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req, + struct bnxt_vnic_info *vnic) { if (bp->flags & BNXT_FLAG_CHIP_P5) - __bnxt_fill_hw_rss_tbl_p5(bp, vnic); + bnxt_fill_hw_rss_tbl_p5(bp, vnic); else - __bnxt_fill_hw_rss_tbl(bp, vnic); + bnxt_fill_hw_rss_tbl(bp, vnic); + + if (bp->rss_hash_delta) { + req->hash_type = cpu_to_le32(bp->rss_hash_delta); + if (bp->rss_hash_cfg & bp->rss_hash_delta) + req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE; + else + req->flags |= VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE; + } else { + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + } + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) @@ -5307,14 +5325,8 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) if (rc) return rc; - if (set_rss) { - bnxt_fill_hw_rss_tbl(bp, vnic); - req->hash_type = cpu_to_le32(bp->rss_hash_cfg); - req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); - req->hash_key_tbl_addr = - cpu_to_le64(vnic->rss_hash_key_dma_addr); - } + if (set_rss) + __bnxt_hwrm_vnic_set_rss(bp, req, vnic); req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); return hwrm_req_send(bp, req); } @@ -5335,10 +5347,7 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) if (!set_rss) return hwrm_req_send(bp, req); - bnxt_fill_hw_rss_tbl(bp, vnic); - req->hash_type = cpu_to_le32(bp->rss_hash_cfg); - req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); + __bnxt_hwrm_vnic_set_rss(bp, req, vnic); ring_tbl_map = vnic->rss_table_dma_addr; nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); @@ -5357,6 +5366,25 @@ exit: return rc; } +static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct hwrm_vnic_rss_qcfg_output *resp; + struct hwrm_vnic_rss_qcfg_input *req; + + if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG)) + return; + + /* all contexts configured to same hash_type, zero always exists */ + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + resp = hwrm_req_hold(bp, req); + if (!hwrm_req_send(bp, req)) { + bp->rss_hash_cfg = le32_to_cpu(resp->hash_type) ?: bp->rss_hash_cfg; + bp->rss_hash_delta = 0; + } + hwrm_req_drop(bp, req); +} + static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; @@ -5614,6 +5642,8 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) (BNXT_CHIP_P5_THOR(bp) && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))) bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP; + if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP) + bp->fw_cap |= BNXT_FW_CAP_RSS_HASH_TYPE_DELTA; bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported); if (bp->max_tpa_v2) { if (BNXT_CHIP_P5_THOR(bp)) @@ -6958,8 +6988,11 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; } - if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) { bp->flags |= BNXT_FLAG_MULTI_HOST; + if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) + bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC; + } if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; @@ -8808,6 +8841,8 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init) rc = bnxt_setup_vnic(bp, 0); if (rc) goto err_out; + if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA) + bnxt_hwrm_update_rss_hash_cfg(bp); if (bp->flags & BNXT_FLAG_RFS) { rc = bnxt_alloc_rfs_vnics(bp); @@ -11283,6 +11318,7 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, u8 **nextp) { struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off); + struct hop_jumbo_hdr *jhdr; int hdr_count = 0; u8 *nexthdr; int start; @@ -11310,9 +11346,27 @@ static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off, if (hdrlen > 64) return false; + + /* The ext header may be a hop-by-hop header inserted for + * big TCP purposes. This will be removed before sending + * from NIC, so do not count it. + */ + if (*nexthdr == NEXTHDR_HOP) { + if (likely(skb->len <= GRO_LEGACY_MAX_SIZE)) + goto increment_hdr; + + jhdr = (struct hop_jumbo_hdr *)hp; + if (jhdr->tlv_type != IPV6_TLV_JUMBO || jhdr->hdrlen != 0 || + jhdr->nexthdr != IPPROTO_TCP) + goto increment_hdr; + + goto next_hdr; + } +increment_hdr: + hdr_count++; +next_hdr: nexthdr = &hp->nexthdr; start += hdrlen; - hdr_count++; } if (nextp) { /* Caller will check inner protocol */ @@ -12252,6 +12306,8 @@ static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp) VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 | VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 | VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6; + if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA) + bp->rss_hash_delta = bp->rss_hash_cfg; if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) { bp->flags |= BNXT_FLAG_UDP_RSS_CAP; bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 | @@ -13082,13 +13138,6 @@ int bnxt_get_port_parent_id(struct net_device *dev, return 0; } -static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev) -{ - struct bnxt *bp = netdev_priv(dev); - - return &bp->dl_port; -} - static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -13120,7 +13169,6 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_xdp_xmit = bnxt_xdp_xmit, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, - .ndo_get_devlink_port = bnxt_get_devlink_port, }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -13131,9 +13179,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) bnxt_sriov_disable(bp); - if (BNXT_PF(bp)) - devlink_port_type_clear(&bp->dl_port); - bnxt_ptp_clear(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); @@ -13546,6 +13591,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; bp = netdev_priv(dev); + SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port); bp->board_idx = ent->driver_data; bp->msg_enable = BNXT_DEF_MSG_ENABLE; bnxt_set_max_func_irqs(bp, max_irqs); @@ -13633,6 +13679,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features &= ~NETIF_F_LRO; dev->priv_flags |= IFF_UNICAST_FLT; + netif_set_tso_max_size(dev, GSO_MAX_SIZE); + #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); #endif @@ -13721,8 +13769,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_cleanup; - if (BNXT_PF(bp)) - devlink_port_type_eth_set(&bp->dl_port, bp->dev); bnxt_dl_fw_reporters_create(bp); bnxt_print_device_info(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d5fa43cfe524..41c6dd0ae447 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1901,6 +1901,7 @@ struct bnxt { u16 *rss_indir_tbl; u16 rss_indir_tbl_entries; u32 rss_hash_cfg; + u32 rss_hash_delta; u16 max_mtu; u8 max_tc; @@ -1966,6 +1967,7 @@ struct bnxt { #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 0x00010000 #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000 #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000 + #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA 0x00080000 #define BNXT_FW_CAP_ERR_RECOVER_RELOAD 0x00100000 #define BNXT_FW_CAP_HOT_RESET 0x00200000 #define BNXT_FW_CAP_PTP_RTC 0x00400000 @@ -2117,6 +2119,7 @@ struct bnxt { #define BNXT_PHY_FL_NO_FCS PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS #define BNXT_PHY_FL_NO_PAUSE (PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED << 8) #define BNXT_PHY_FL_NO_PFC (PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED << 8) +#define BNXT_PHY_FL_BANK_SEL (PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED << 8) u8 num_tests; struct bnxt_test_info *test_info; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 8a6f788f6294..26913dc816d3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -892,10 +892,6 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, u32 ver = 0; int rc; - rc = devlink_info_driver_name_put(req, DRV_MODULE_NAME); - if (rc) - return rc; - if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) { sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X", bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4], diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8cad15c458b3..cbf17fcfb7ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1234,6 +1234,8 @@ static int bnxt_srxfh(struct bnxt *bp, struct ethtool_rxnfc *cmd) if (bp->rss_hash_cfg == rss_hash_cfg) return 0; + if (bp->fw_cap & BNXT_FW_CAP_RSS_HASH_TYPE_DELTA) + bp->rss_hash_delta = bp->rss_hash_cfg ^ rss_hash_cfg; bp->rss_hash_cfg = rss_hash_cfg; if (netif_running(bp->dev)) { bnxt_close_nic(bp, false, false); @@ -2005,6 +2007,14 @@ static void bnxt_get_fec_stats(struct net_device *dev, rx = bp->rx_port_stats_ext.sw_stats; fec_stats->corrected_bits.total = *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits)); + + if (bp->fw_rx_stats_ext_size <= BNXT_RX_STATS_EXT_NUM_LEGACY) + return; + + fec_stats->corrected_blocks.total = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)); + fec_stats->uncorrectable_blocks.total = + *(rx + BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks)); } static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, @@ -2514,6 +2524,7 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, #define MSG_INTERNAL_ERR "PKG install error : Internal error" #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram" #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram" +#define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error" #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected" #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure" @@ -2564,6 +2575,32 @@ static int nvm_update_err_to_stderr(struct net_device *dev, u8 result, #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE)) #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST)) +static int bnxt_resize_update_entry(struct net_device *dev, size_t fw_size, + struct netlink_ext_ack *extack) +{ + u32 item_len; + int rc; + + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, NULL, + &item_len, NULL); + if (rc) { + BNXT_NVM_ERR_MSG(dev, extack, MSG_NO_PKG_UPDATE_AREA_ERR); + return rc; + } + + if (fw_size > item_len) { + rc = bnxt_flash_nvram(dev, BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, 0, 1, + round_up(fw_size, 4096), NULL, 0); + if (rc) { + BNXT_NVM_ERR_MSG(dev, extack, MSG_RESIZE_UPDATE_ERR); + return rc; + } + } + return 0; +} + int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, u32 install_type, struct netlink_ext_ack *extack) { @@ -2580,6 +2617,11 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware u16 index; int rc; + /* resize before flashing larger image than available space */ + rc = bnxt_resize_update_entry(dev, fw->size, extack); + if (rc) + return rc; + bnxt_hwrm_fw_set_time(bp); rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); @@ -3146,8 +3188,9 @@ static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata) } static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, - u16 page_number, u16 start_addr, - u16 data_length, u8 *buf) + u16 page_number, u8 bank, + u16 start_addr, u16 data_length, + u8 *buf) { struct hwrm_port_phy_i2c_read_output *output; struct hwrm_port_phy_i2c_read_input *req; @@ -3168,8 +3211,13 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, data_length -= xfer_size; req->page_offset = cpu_to_le16(start_addr + byte_offset); req->data_length = xfer_size; - req->enables = cpu_to_le32(start_addr + byte_offset ? - PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); + req->enables = + cpu_to_le32((start_addr + byte_offset ? + PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : + 0) | + (bank ? + PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER : + 0)); rc = hwrm_req_send(bp, req); if (!rc) memcpy(buf + byte_offset, output->data, xfer_size); @@ -3199,7 +3247,7 @@ static int bnxt_get_module_info(struct net_device *dev, if (bp->hwrm_spec_code < 0x10202) return -EOPNOTSUPP; - rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, 0, SFF_DIAG_SUPPORT_OFFSET + 1, data); if (!rc) { @@ -3244,7 +3292,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev, if (start < ETH_MODULE_SFF_8436_LEN) { if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN) length = ETH_MODULE_SFF_8436_LEN - start; - rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0, start, length, data); if (rc) return rc; @@ -3256,12 +3304,68 @@ static int bnxt_get_module_eeprom(struct net_device *dev, /* Read A2 portion of the EEPROM */ if (length) { start -= ETH_MODULE_SFF_8436_LEN; - rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 0, 0, start, length, data); } return rc; } +static int bnxt_get_module_status(struct bnxt *bp, struct netlink_ext_ack *extack) +{ + if (bp->link_info.module_status <= + PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG) + return 0; + + switch (bp->link_info.module_status) { + case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN: + NL_SET_ERR_MSG_MOD(extack, "Transceiver module is powering down"); + break; + case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED: + NL_SET_ERR_MSG_MOD(extack, "Transceiver module not inserted"); + break; + case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT: + NL_SET_ERR_MSG_MOD(extack, "Transceiver module disabled due to current fault"); + break; + default: + NL_SET_ERR_MSG_MOD(extack, "Unknown error"); + break; + } + return -EINVAL; +} + +static int bnxt_get_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page_data, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + rc = bnxt_get_module_status(bp, extack); + if (rc) + return rc; + + if (bp->hwrm_spec_code < 0x10202) { + NL_SET_ERR_MSG_MOD(extack, "Firmware version too old"); + return -EINVAL; + } + + if (page_data->bank && !(bp->phy_flags & BNXT_PHY_FL_BANK_SEL)) { + NL_SET_ERR_MSG_MOD(extack, "Firmware not capable for bank selection"); + return -EINVAL; + } + + rc = bnxt_read_sfp_module_eeprom_info(bp, page_data->i2c_address << 1, + page_data->page, page_data->bank, + page_data->offset, + page_data->length, + page_data->data); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Module`s eeprom read failed"); + return rc; + } + return page_data->length; +} + static int bnxt_nway_reset(struct net_device *dev) { int rc = 0; @@ -4018,6 +4122,20 @@ static void bnxt_get_rmon_stats(struct net_device *dev, *ranges = bnxt_rmon_ranges; } +static void bnxt_get_link_ext_stats(struct net_device *dev, + struct ethtool_link_ext_stats *stats) +{ + struct bnxt *bp = netdev_priv(dev); + u64 *rx; + + if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS_EXT)) + return; + + rx = bp->rx_port_stats_ext.sw_stats; + stats->link_down_events = + *(rx + BNXT_RX_STATS_EXT_OFFSET(link_down_events)); +} + void bnxt_ethtool_free(struct bnxt *bp) { kfree(bp->test_info); @@ -4067,10 +4185,12 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_eeprom = bnxt_get_eeprom, .set_eeprom = bnxt_set_eeprom, .get_link = bnxt_get_link, + .get_link_ext_stats = bnxt_get_link_ext_stats, .get_eee = bnxt_get_eee, .set_eee = bnxt_set_eee, .get_module_info = bnxt_get_module_info, .get_module_eeprom = bnxt_get_module_eeprom, + .get_module_eeprom_by_page = bnxt_get_module_eeprom_by_page, .nway_reset = bnxt_nway_reset, .set_phys_id = bnxt_set_phys_id, .self_test = bnxt_self_test, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index b753032a1047..2686a714a59f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -254,6 +254,8 @@ struct cmd_nums { #define HWRM_PORT_DSC_DUMP 0xd9UL #define HWRM_PORT_EP_TX_QCFG 0xdaUL #define HWRM_PORT_EP_TX_CFG 0xdbUL + #define HWRM_PORT_CFG 0xdcUL + #define HWRM_PORT_QCFG 0xddUL #define HWRM_TEMP_MONITOR_QUERY 0xe0UL #define HWRM_REG_POWER_QUERY 0xe1UL #define HWRM_CORE_FREQUENCY_QUERY 0xe2UL @@ -379,6 +381,8 @@ struct cmd_nums { #define HWRM_FUNC_BACKING_STORE_QCAPS_V2 0x1a8UL #define HWRM_FUNC_DBR_PACING_NQLIST_QUERY 0x1a9UL #define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL + #define HWRM_FUNC_SYNCE_CFG 0x1abUL + #define HWRM_FUNC_SYNCE_QCFG 0x1acUL #define HWRM_SELFTEST_QLIST 0x200UL #define HWRM_SELFTEST_EXEC 0x201UL #define HWRM_SELFTEST_IRQ 0x202UL @@ -417,6 +421,8 @@ struct cmd_nums { #define HWRM_TF_SESSION_RESC_FREE 0x2ceUL #define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL #define HWRM_TF_SESSION_RESC_INFO 0x2d0UL + #define HWRM_TF_SESSION_HOTUP_STATE_SET 0x2d1UL + #define HWRM_TF_SESSION_HOTUP_STATE_GET 0x2d2UL #define HWRM_TF_TBL_TYPE_GET 0x2daUL #define HWRM_TF_TBL_TYPE_SET 0x2dbUL #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL @@ -440,6 +446,25 @@ struct cmd_nums { #define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL #define HWRM_TF_IF_TBL_SET 0x2feUL #define HWRM_TF_IF_TBL_GET 0x2ffUL + #define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL + #define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL + #define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL + #define HWRM_TFC_TBL_SCOPE_DECONFIG 0x383UL + #define HWRM_TFC_TBL_SCOPE_FID_ADD 0x384UL + #define HWRM_TFC_TBL_SCOPE_FID_REM 0x385UL + #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC 0x386UL + #define HWRM_TFC_TBL_SCOPE_POOL_FREE 0x387UL + #define HWRM_TFC_SESSION_ID_ALLOC 0x388UL + #define HWRM_TFC_SESSION_FID_ADD 0x389UL + #define HWRM_TFC_SESSION_FID_REM 0x38aUL + #define HWRM_TFC_IDENT_ALLOC 0x38bUL + #define HWRM_TFC_IDENT_FREE 0x38cUL + #define HWRM_TFC_IDX_TBL_ALLOC 0x38dUL + #define HWRM_TFC_IDX_TBL_ALLOC_SET 0x38eUL + #define HWRM_TFC_IDX_TBL_SET 0x38fUL + #define HWRM_TFC_IDX_TBL_GET 0x390UL + #define HWRM_TFC_IDX_TBL_FREE 0x391UL + #define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL #define HWRM_SV 0x400UL #define HWRM_DBG_READ_DIRECT 0xff10UL #define HWRM_DBG_READ_INDIRECT 0xff11UL @@ -546,8 +571,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_RSVD 95 -#define HWRM_VERSION_STR "1.10.2.95" +#define HWRM_VERSION_RSVD 118 +#define HWRM_VERSION_STR "1.10.2.118" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -1657,6 +1682,10 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED 0x8UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED 0x10UL #define FUNC_QCAPS_RESP_FLAGS_EXT2_GENERIC_STATS_SUPPORTED 0x20UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED 0x40UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL + #define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL __le16 tunnel_disable_flag; #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL #define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL @@ -1804,7 +1833,20 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL - u8 unused_2[3]; + u8 db_page_size; + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4KB 0x0UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_8KB 0x1UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_16KB 0x2UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_32KB 0x3UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_64KB 0x4UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_128KB 0x5UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_256KB 0x6UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_512KB 0x7UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_1MB 0x8UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL + #define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB + u8 unused_2[2]; __le32 partition_min_bw; #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0 @@ -1876,6 +1918,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL + #define FUNC_CFG_REQ_FLAGS_KEY_CTX_ASSETS_TEST 0x80000000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_ADMIN_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -2021,12 +2064,26 @@ struct hwrm_func_cfg_input { __le16 num_tx_key_ctxs; __le16 num_rx_key_ctxs; __le32 enables2; - #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL + #define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL + #define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL u8 port_kdnet_mode; #define FUNC_CFG_REQ_PORT_KDNET_MODE_DISABLED 0x0UL #define FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED 0x1UL #define FUNC_CFG_REQ_PORT_KDNET_MODE_LAST FUNC_CFG_REQ_PORT_KDNET_MODE_ENABLED - u8 unused_0[7]; + u8 db_page_size; + #define FUNC_CFG_REQ_DB_PAGE_SIZE_4KB 0x0UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_8KB 0x1UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_16KB 0x2UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_32KB 0x3UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_64KB 0x4UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_128KB 0x5UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_256KB 0x6UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_512KB 0x7UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_1MB 0x8UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL + #define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB + u8 unused_0[6]; }; /* hwrm_func_cfg_output (size:128b/16B) */ @@ -2060,10 +2117,9 @@ struct hwrm_func_qstats_input { __le64 resp_addr; __le16 fid; u8 flags; - #define FUNC_QSTATS_REQ_FLAGS_UNUSED 0x0UL - #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL - #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL - #define FUNC_QSTATS_REQ_FLAGS_LAST FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK + #define FUNC_QSTATS_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x2UL + #define FUNC_QSTATS_REQ_FLAGS_L2_ONLY 0x4UL u8 unused_0[5]; }; @@ -2093,7 +2149,8 @@ struct hwrm_func_qstats_output { __le64 rx_agg_bytes; __le64 rx_agg_events; __le64 rx_agg_aborts; - u8 unused_0[7]; + u8 clear_seq; + u8 unused_0[6]; u8 valid; }; @@ -2106,10 +2163,8 @@ struct hwrm_func_qstats_ext_input { __le64 resp_addr; __le16 fid; u8 flags; - #define FUNC_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL - #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL - #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL - #define FUNC_QSTATS_EXT_REQ_FLAGS_LAST FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK + #define FUNC_QSTATS_EXT_REQ_FLAGS_ROCE_ONLY 0x1UL + #define FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x2UL u8 unused_0[1]; __le32 enables; #define FUNC_QSTATS_EXT_REQ_ENABLES_SCHQ_ID 0x1UL @@ -2210,6 +2265,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL #define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL #define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL + #define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -3155,19 +3211,23 @@ struct hwrm_func_ptp_pin_qcfg_output { #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT 0x4UL #define FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN1_USAGE_SYNC_OUT u8 pin2_usage; - #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT u8 pin3_usage; - #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL - #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_LAST FUNC_PTP_PIN_QCFG_RESP_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT u8 unused_0; u8 valid; }; @@ -3215,23 +3275,27 @@ struct hwrm_func_ptp_pin_cfg_input { #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED 0x1UL #define FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_STATE_ENABLED u8 pin2_usage; - #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL - #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL - #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL - #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL - #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL - #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN2_USAGE_SYNCE_SECONDARY_CLOCK_OUT u8 pin3_state; #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_DISABLED 0x0UL #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED 0x1UL #define FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_STATE_ENABLED u8 pin3_usage; - #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL - #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL - #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL - #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL - #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL - #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_NONE 0x0UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_IN 0x1UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_PPS_OUT 0x2UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_IN 0x3UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNC_OUT 0x4UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_PRIMARY_CLOCK_OUT 0x5UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT 0x6UL + #define FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_LAST FUNC_PTP_PIN_CFG_REQ_PIN3_USAGE_SYNCE_SECONDARY_CLOCK_OUT u8 unused_0[4]; }; @@ -3319,9 +3383,9 @@ struct hwrm_func_ptp_ts_query_output { __le16 seq_id; __le16 resp_len; __le64 pps_event_ts; - __le64 ptm_res_local_ts; - __le64 ptm_pmstr_ts; - __le32 ptm_mstr_prop_dly; + __le64 ptm_local_ts; + __le64 ptm_system_ts; + __le32 ptm_link_delay; u8 unused_0[3]; u8 valid; }; @@ -3417,7 +3481,9 @@ struct hwrm_func_backing_store_cfg_v2_input { #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID __le16 instance; __le32 flags; - #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_CFG_ALL_DONE 0x2UL + #define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_BS_EXTEND 0x4UL __le64 page_dir; __le32 num_entries; __le16 entry_size; @@ -3853,7 +3919,7 @@ struct hwrm_port_phy_qcfg_input { u8 unused_0[6]; }; -/* hwrm_port_phy_qcfg_output (size:768b/96B) */ +/* hwrm_port_phy_qcfg_output (size:832b/104B) */ struct hwrm_port_phy_qcfg_output { __le16 error_code; __le16 req_type; @@ -4150,6 +4216,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL + u8 link_down_reason; + #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL + u8 unused_0[7]; u8 valid; }; @@ -4422,9 +4491,7 @@ struct hwrm_port_qstats_input { __le64 resp_addr; __le16 port_id; u8 flags; - #define PORT_QSTATS_REQ_FLAGS_UNUSED 0x0UL - #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL - #define PORT_QSTATS_REQ_FLAGS_LAST PORT_QSTATS_REQ_FLAGS_COUNTER_MASK + #define PORT_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL u8 unused_0[5]; __le64 tx_stat_host_addr; __le64 rx_stat_host_addr; @@ -4552,9 +4619,7 @@ struct hwrm_port_qstats_ext_input { __le16 tx_stat_size; __le16 rx_stat_size; u8 flags; - #define PORT_QSTATS_EXT_REQ_FLAGS_UNUSED 0x0UL - #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL - #define PORT_QSTATS_EXT_REQ_FLAGS_LAST PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK + #define PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK 0x1UL u8 unused_0; __le64 tx_stat_host_addr; __le64 rx_stat_host_addr; @@ -4613,9 +4678,7 @@ struct hwrm_port_ecn_qstats_input { __le16 port_id; __le16 ecn_stat_buf_size; u8 flags; - #define PORT_ECN_QSTATS_REQ_FLAGS_UNUSED 0x0UL - #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL - #define PORT_ECN_QSTATS_REQ_FLAGS_LAST PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK + #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL u8 unused_0[3]; __le64 ecn_stat_host_addr; }; @@ -4814,8 +4877,9 @@ struct hwrm_port_phy_qcaps_output { #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL __le16 flags2; - #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL + #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL u8 internal_port_cnt; u8 valid; }; @@ -4830,9 +4894,10 @@ struct hwrm_port_phy_i2c_read_input { __le32 flags; __le32 enables; #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL + #define PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER 0x2UL __le16 port_id; u8 i2c_slave_addr; - u8 unused_0; + u8 bank_number; __le16 page_number; __le16 page_offset; u8 data_length; @@ -6537,6 +6602,7 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV4_CAP 0x400000UL #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL #define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL + #define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL __le16 max_aggs_supported; u8 unused_1[5]; u8 valid; @@ -6702,6 +6768,53 @@ struct hwrm_vnic_rss_cfg_cmd_err { u8 unused_0[7]; }; +/* hwrm_vnic_rss_qcfg_input (size:192b/24B) */ +struct hwrm_vnic_rss_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 rss_ctx_idx; + __le16 vnic_id; + u8 unused_0[4]; +}; + +/* hwrm_vnic_rss_qcfg_output (size:512b/64B) */ +struct hwrm_vnic_rss_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 hash_type; + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV4 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV4 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_TCP_IPV6 0x10UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_UDP_IPV6 0x20UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_IPV6_FLOW_LABEL 0x40UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV4 0x80UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV4 0x100UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_AH_SPI_IPV6 0x200UL + #define VNIC_RSS_QCFG_RESP_HASH_TYPE_ESP_SPI_IPV6 0x400UL + u8 unused_0[4]; + __le32 hash_key[10]; + u8 hash_mode_flags; + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_DEFAULT 0x1UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_4 0x2UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_INNERMOST_2 0x4UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_4 0x8UL + #define VNIC_RSS_QCFG_RESP_HASH_MODE_FLAGS_OUTERMOST_2 0x10UL + u8 ring_select_mode; + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ 0x0UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_XOR 0x1UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM 0x2UL + #define VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_LAST VNIC_RSS_QCFG_RESP_RING_SELECT_MODE_TOEPLITZ_CHECKSUM + u8 unused_1[5]; + u8 valid; +}; + /* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */ struct hwrm_vnic_plcmodes_cfg_input { __le16 req_type; @@ -6827,6 +6940,7 @@ struct hwrm_ring_alloc_input { #define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL #define RING_ALLOC_REQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION 0x2UL #define RING_ALLOC_REQ_FLAGS_NQ_DBR_PACING 0x4UL + #define RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE 0x8UL __le64 page_tbl_addr; __le32 fbo; u8 page_size; @@ -7626,7 +7740,10 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMP 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_ICMPV6 0x3aUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD 0xffUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_RSVD __le16 dst_id; __le16 mirror_vnic_id; u8 tunnel_type; @@ -8337,6 +8454,7 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output { #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NIC_FLOW_STATS_SUPPORTED 0x80000UL + #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED 0x100000UL u8 unused_0[3]; u8 valid; }; @@ -8355,7 +8473,9 @@ struct hwrm_tunnel_dst_port_query_input { #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI u8 unused_0[7]; }; @@ -8367,7 +8487,16 @@ struct hwrm_tunnel_dst_port_query_output { __le16 resp_len; __le16 tunnel_dst_port_id; __be16 tunnel_dst_port_val; - u8 unused_0[3]; + u8 upar_in_use; + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR0 0x1UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR1 0x2UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR2 0x4UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR3 0x8UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR4 0x10UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR5 0x20UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR6 0x40UL + #define TUNNEL_DST_PORT_QUERY_RESP_UPAR_IN_USE_UPAR7 0x80UL + u8 unused_0[2]; u8 valid; }; @@ -8385,7 +8514,9 @@ struct hwrm_tunnel_dst_port_alloc_input { #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI u8 unused_0; __be16 tunnel_dst_port_val; u8 unused_1[4]; @@ -8398,7 +8529,21 @@ struct hwrm_tunnel_dst_port_alloc_output { __le16 seq_id; __le16 resp_len; __le16 tunnel_dst_port_id; - u8 unused_0[5]; + u8 error_info; + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_SUCCESS 0x0UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_ALLOCATED 0x1UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE 0x2UL + #define TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_ALLOC_RESP_ERROR_INFO_ERR_NO_RESOURCE + u8 upar_in_use; + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR0 0x1UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR1 0x2UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR2 0x4UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR3 0x8UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR4 0x10UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR5 0x20UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR6 0x40UL + #define TUNNEL_DST_PORT_ALLOC_RESP_UPAR_IN_USE_UPAR7 0x80UL + u8 unused_0[3]; u8 valid; }; @@ -8416,7 +8561,9 @@ struct hwrm_tunnel_dst_port_free_input { #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI u8 unused_0; __le16 tunnel_dst_port_id; u8 unused_1[4]; @@ -8428,7 +8575,12 @@ struct hwrm_tunnel_dst_port_free_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - u8 unused_1[7]; + u8 error_info; + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_SUCCESS 0x0UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_OWNER 0x1UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED 0x2UL + #define TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_LAST TUNNEL_DST_PORT_FREE_RESP_ERROR_INFO_ERR_NOT_ALLOCATED + u8 unused_1[6]; u8 valid; }; @@ -8686,9 +8838,7 @@ struct hwrm_stat_generic_qstats_input { __le64 resp_addr; __le16 generic_stat_size; u8 flags; - #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER 0x0UL - #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL - #define STAT_GENERIC_QSTATS_REQ_FLAGS_LAST STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK + #define STAT_GENERIC_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL u8 unused_0[5]; __le64 generic_stat_host_addr; }; @@ -10202,6 +10352,7 @@ struct fw_status_reg { #define FW_STATUS_REG_SHUTDOWN 0x100000UL #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL #define FW_STATUS_REG_RECOVERING 0x400000UL + #define FW_STATUS_REG_MANU_DEBUG_STATUS 0x800000UL }; /* hcomm_status (size:64b/8B) */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 2132ce63193c..4ec8bba18cdd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -14,6 +14,7 @@ #include <linux/net_tstamp.h> #include <linux/timekeeping.h> #include <linux/ptp_classify.h> +#include <linux/clocksource.h> #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_hwrm.h" @@ -204,24 +205,33 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) return 0; } -static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb) +static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); struct hwrm_port_mac_cfg_input *req; struct bnxt *bp = ptp->bp; - int rc; + int rc = 0; - rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); - if (rc) - return rc; + if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) { + spin_lock_bh(&ptp->ptp_lock); + timecounter_read(&ptp->tc); + ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm); + spin_unlock_bh(&ptp->ptp_lock); + } else { + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); - req->ptp_freq_adj_ppb = cpu_to_le32(ppb); - req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); - rc = hwrm_req_send(ptp->bp, req); - if (rc) - netdev_err(ptp->bp->dev, - "ptp adjfreq failed. rc = %d\n", rc); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->ptp_freq_adj_ppb = cpu_to_le32(ppb); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); + rc = hwrm_req_send(ptp->bp, req); + if (rc) + netdev_err(ptp->bp->dev, + "ptp adjfine failed. rc = %d\n", rc); + } return rc; } @@ -749,7 +759,7 @@ static const struct ptp_clock_info bnxt_ptp_caps = { .n_per_out = 0, .n_pins = 0, .pps = 0, - .adjfreq = bnxt_ptp_adjfreq, + .adjfine = bnxt_ptp_adjfine, .adjtime = bnxt_ptp_adjtime, .do_aux_work = bnxt_ptp_ts_aux_work, .gettimex64 = bnxt_ptp_gettimex, @@ -846,8 +856,9 @@ static void bnxt_ptp_timecounter_init(struct bnxt *bp, bool init_tc) memset(&ptp->cc, 0, sizeof(ptp->cc)); ptp->cc.read = bnxt_cc_read; ptp->cc.mask = CYCLECOUNTER_MASK(48); - ptp->cc.shift = 0; - ptp->cc.mult = 1; + ptp->cc.shift = BNXT_CYCLES_SHIFT; + ptp->cc.mult = clocksource_khz2mult(BNXT_DEVCLK_FREQ, ptp->cc.shift); + ptp->cmult = ptp->cc.mult; ptp->next_overflow_check = jiffies + BNXT_PHC_OVERFLOW_PERIOD; } if (init_tc) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 4ce0a14c1e23..34162e07a119 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -17,6 +17,8 @@ #define BNXT_PTP_GRC_WIN_BASE 0x6000 #define BNXT_MAX_PHC_DRIFT 31000000 +#define BNXT_CYCLES_SHIFT 23 +#define BNXT_DEVCLK_FREQ 1000000 #define BNXT_LO_TIMER_MASK 0x0000ffffffffUL #define BNXT_HI_TIMER_MASK 0xffff00000000UL @@ -88,8 +90,9 @@ struct bnxt_ptp_cfg { u64 old_time; unsigned long next_period; unsigned long next_overflow_check; - /* 48-bit PHC overflows in 78 hours. Check overflow every 19 hours. */ - #define BNXT_PHC_OVERFLOW_PERIOD (19 * 3600 * HZ) + u32 cmult; + /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */ + #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ) u16 tx_seqid; u16 tx_hdr_off; diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 2198e35d9e18..7926aaef8f0c 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -1027,16 +1027,14 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) udev->l2_ring_size = pages * CNIC_PAGE_SIZE; udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, - &udev->l2_ring_map, - GFP_KERNEL | __GFP_COMP); + &udev->l2_ring_map, GFP_KERNEL); if (!udev->l2_ring) return -ENOMEM; udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, - &udev->l2_buf_map, - GFP_KERNEL | __GFP_COMP); + &udev->l2_buf_map, GFP_KERNEL); if (!udev->l2_buf) { __cnic_free_uio_rings(udev); return -ENOMEM; @@ -4105,7 +4103,7 @@ static int cnic_cm_alloc_mem(struct cnic_dev *dev) for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) atomic_set(&cp->csk_tbl[i].ref_count, 0); - port_id = prandom_u32_max(CNIC_LOCAL_PORT_RANGE); + port_id = get_random_u32_below(CNIC_LOCAL_PORT_RANGE); if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, CNIC_LOCAL_PORT_MIN, port_id)) { cnic_cm_free_mem(dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 25c450606985..21973046b12b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -117,24 +117,6 @@ static inline void dmadesc_set(struct bcmgenet_priv *priv, dmadesc_set_length_status(priv, d, val); } -static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv, - void __iomem *d) -{ - dma_addr_t addr; - - addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO); - - /* Register writes to GISB bus can take couple hundred nanoseconds - * and are done for each packet, save these expensive writes unless - * the platform is explicitly configured for 64-bits/LPAE. - */ -#ifdef CONFIG_PHYS_ADDR_T_64BIT - if (priv->hw_params->flags & GENET_HAS_40BITS) - addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32; -#endif - return addr; -} - #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x" #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ @@ -1387,7 +1369,8 @@ static int bcmgenet_validate_flow(struct net_device *dev, struct ethtool_usrip4_spec *l4_mask; struct ethhdr *eth_mask; - if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) { + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES && + cmd->fs.location != RX_CLS_LOC_ANY) { netdev_err(dev, "rxnfc: Invalid location (%d)\n", cmd->fs.location); return -EINVAL; @@ -1452,7 +1435,7 @@ static int bcmgenet_insert_flow(struct net_device *dev, { struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rxnfc_rule *loc_rule; - int err; + int err, i; if (priv->hw_params->hfb_filter_size < 128) { netdev_err(dev, "rxnfc: Not supported by this device\n"); @@ -1470,7 +1453,29 @@ static int bcmgenet_insert_flow(struct net_device *dev, if (err) return err; - loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + if (cmd->fs.location == RX_CLS_LOC_ANY) { + list_for_each_entry(loc_rule, &priv->rxnfc_list, list) { + cmd->fs.location = loc_rule->fs.location; + err = memcmp(&loc_rule->fs, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + if (!err) + /* rule exists so return current location */ + return 0; + } + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + loc_rule = &priv->rxnfc_rules[i]; + if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) { + cmd->fs.location = i; + break; + } + } + if (i == MAX_NUM_OF_FS_RULES) { + cmd->fs.location = RX_CLS_LOC_ANY; + return -ENOSPC; + } + } else { + loc_rule = &priv->rxnfc_rules[cmd->fs.location]; + } if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED) bcmgenet_hfb_disable_filter(priv, cmd->fs.location); if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) { @@ -1583,7 +1588,7 @@ static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, break; case ETHTOOL_GRXCLSRLCNT: cmd->rule_cnt = bcmgenet_get_num_flows(priv); - cmd->data = MAX_NUM_OF_FS_RULES; + cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL; break; case ETHTOOL_GRXCLSRULE: err = bcmgenet_get_flow(dev, cmd, cmd->fs.location); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 7ded559842e8..b615176338b2 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -72,7 +72,6 @@ static void bcmgenet_mac_config(struct net_device *dev) * Receive clock is provided by the PHY. */ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); - reg &= ~OOB_DISABLE; reg |= RGMII_LINK; bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); @@ -95,10 +94,18 @@ static void bcmgenet_mac_config(struct net_device *dev) */ void bcmgenet_mii_setup(struct net_device *dev) { + struct bcmgenet_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; + u32 reg; - if (phydev->link) + if (phydev->link) { bcmgenet_mac_config(dev); + } else { + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~RGMII_LINK; + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); + } + phy_print_status(phydev); } @@ -266,18 +273,20 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); /* This is an external PHY (xMII), so we need to enable the RGMII - * block for the interface to work + * block for the interface to work, unconditionally clear the + * Out-of-band disable since we do not need it. */ + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); + reg &= ~OOB_DISABLE; if (priv->ext_phy) { - reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL); reg &= ~ID_MODE_DIS; reg |= id_mode_dis; if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv)) reg |= RGMII_MODE_EN_V123; else reg |= RGMII_MODE_EN; - bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); } + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); if (init) dev_info(kdev, "configuring instance for %s\n", phy_name); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 4179a12fc881..59debdc344a5 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6179,34 +6179,26 @@ static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) return 0; } -static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); - bool neg_adj = false; - u32 correction = 0; - - if (ppb < 0) { - neg_adj = true; - ppb = -ppb; - } + u64 correction; + bool neg_adj; /* Frequency adjustment is performed using hardware with a 24 bit * accumulator and a programmable correction value. On each clk, the * correction value gets added to the accumulator and when it * overflows, the time counter is incremented/decremented. - * - * So conversion from ppb to correction value is - * ppb * (1 << 24) / 1000000000 */ - correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) & - TG3_EAV_REF_CLK_CORRECT_MASK; + neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction); tg3_full_lock(tp, 0); if (correction) tw32(TG3_EAV_REF_CLK_CORRECT_CTL, TG3_EAV_REF_CLK_CORRECT_EN | - (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction); + (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | + ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK)); else tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0); @@ -6330,7 +6322,7 @@ static const struct ptp_clock_info tg3_ptp_caps = { .n_per_out = 1, .n_pins = 0, .pps = 0, - .adjfreq = tg3_ptp_adjfreq, + .adjfine = tg3_ptp_adjfine, .adjtime = tg3_ptp_adjtime, .gettimex64 = tg3_ptp_gettimex, .settime64 = tg3_ptp_settime, |