diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom')
28 files changed, 4636 insertions, 846 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 948586bf1b5b..75ca3ddda1f5 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -255,4 +255,16 @@ config BNXT_HWMON Say Y if you want to expose the thermal sensor data on NetXtreme-C/E devices, via the hwmon sysfs interface. +config BCMASP + tristate "Broadcom ASP 2.0 Ethernet support" + depends on ARCH_BRCMSTB || COMPILE_TEST + default ARCH_BRCMSTB + depends on OF + select MII + select PHYLIB + select MDIO_BCM_UNIMAC + help + This configuration enables the Broadcom ASP 2.0 Ethernet controller + driver which is present in Broadcom STB SoCs such as 72165. + endif # NET_VENDOR_BROADCOM diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile index 0ddfb5b5d53c..bac5cb6ad0cd 100644 --- a/drivers/net/ethernet/broadcom/Makefile +++ b/drivers/net/ethernet/broadcom/Makefile @@ -17,3 +17,4 @@ obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o obj-$(CONFIG_BNXT) += bnxt/ +obj-$(CONFIG_BCMASP) += asp2/ diff --git a/drivers/net/ethernet/broadcom/asp2/Makefile b/drivers/net/ethernet/broadcom/asp2/Makefile new file mode 100644 index 000000000000..e07550315f83 --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_BCMASP) += bcm-asp.o +bcm-asp-objs := bcmasp.o bcmasp_intf.o bcmasp_ethtool.o diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c new file mode 100644 index 000000000000..d63d321f3e7b --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c @@ -0,0 +1,1437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Broadcom STB ASP 2.0 Driver + * + * Copyright (c) 2023 Broadcom + */ +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/clk.h> + +#include "bcmasp.h" +#include "bcmasp_intf_defs.h" + +static void _intr2_mask_clear(struct bcmasp_priv *priv, u32 mask) +{ + intr2_core_wl(priv, mask, ASP_INTR2_MASK_CLEAR); + priv->irq_mask &= ~mask; +} + +static void _intr2_mask_set(struct bcmasp_priv *priv, u32 mask) +{ + intr2_core_wl(priv, mask, ASP_INTR2_MASK_SET); + priv->irq_mask |= mask; +} + +void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en) +{ + struct bcmasp_priv *priv = intf->parent; + + if (en) + _intr2_mask_clear(priv, ASP_INTR2_TX_DESC(intf->channel)); + else + _intr2_mask_set(priv, ASP_INTR2_TX_DESC(intf->channel)); +} +EXPORT_SYMBOL_GPL(bcmasp_enable_tx_irq); + +void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en) +{ + struct bcmasp_priv *priv = intf->parent; + + if (en) + _intr2_mask_clear(priv, ASP_INTR2_RX_ECH(intf->channel)); + else + _intr2_mask_set(priv, ASP_INTR2_RX_ECH(intf->channel)); +} +EXPORT_SYMBOL_GPL(bcmasp_enable_rx_irq); + +static void bcmasp_intr2_mask_set_all(struct bcmasp_priv *priv) +{ + _intr2_mask_set(priv, 0xffffffff); + priv->irq_mask = 0xffffffff; +} + +static void bcmasp_intr2_clear_all(struct bcmasp_priv *priv) +{ + intr2_core_wl(priv, 0xffffffff, ASP_INTR2_CLEAR); +} + +static void bcmasp_intr2_handling(struct bcmasp_intf *intf, u32 status) +{ + if (status & ASP_INTR2_RX_ECH(intf->channel)) { + if (likely(napi_schedule_prep(&intf->rx_napi))) { + bcmasp_enable_rx_irq(intf, 0); + __napi_schedule_irqoff(&intf->rx_napi); + } + } + + if (status & ASP_INTR2_TX_DESC(intf->channel)) { + if (likely(napi_schedule_prep(&intf->tx_napi))) { + bcmasp_enable_tx_irq(intf, 0); + __napi_schedule_irqoff(&intf->tx_napi); + } + } +} + +static irqreturn_t bcmasp_isr(int irq, void *data) +{ + struct bcmasp_priv *priv = data; + struct bcmasp_intf *intf; + u32 status; + + status = intr2_core_rl(priv, ASP_INTR2_STATUS) & + ~intr2_core_rl(priv, ASP_INTR2_MASK_STATUS); + + intr2_core_wl(priv, status, ASP_INTR2_CLEAR); + + if (unlikely(status == 0)) { + dev_warn(&priv->pdev->dev, "l2 spurious interrupt\n"); + return IRQ_NONE; + } + + /* Handle intferfaces */ + list_for_each_entry(intf, &priv->intfs, list) + bcmasp_intr2_handling(intf, status); + + return IRQ_HANDLED; +} + +void bcmasp_flush_rx_port(struct bcmasp_intf *intf) +{ + struct bcmasp_priv *priv = intf->parent; + u32 mask; + + switch (intf->port) { + case 0: + mask = ASP_CTRL_UMAC0_FLUSH_MASK; + break; + case 1: + mask = ASP_CTRL_UMAC1_FLUSH_MASK; + break; + case 2: + mask = ASP_CTRL_SPB_FLUSH_MASK; + break; + default: + /* Not valid port */ + return; + } + + rx_ctrl_core_wl(priv, mask, priv->hw_info->rx_ctrl_flush); +} + +static void bcmasp_netfilt_hw_en_wake(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt) +{ + rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L3_1(64), + ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index)); + + rx_filter_core_wl(priv, ASP_RX_FILTER_NET_OFFSET_L2(32) | + ASP_RX_FILTER_NET_OFFSET_L3_0(32) | + ASP_RX_FILTER_NET_OFFSET_L3_1(96) | + ASP_RX_FILTER_NET_OFFSET_L4(32), + ASP_RX_FILTER_NET_OFFSET(nfilt->hw_index + 1)); + + rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) | + ASP_RX_FILTER_NET_CFG_EN | + ASP_RX_FILTER_NET_CFG_L2_EN | + ASP_RX_FILTER_NET_CFG_L3_EN | + ASP_RX_FILTER_NET_CFG_L4_EN | + ASP_RX_FILTER_NET_CFG_L3_FRM(2) | + ASP_RX_FILTER_NET_CFG_L4_FRM(2) | + ASP_RX_FILTER_NET_CFG_UMC(nfilt->port), + ASP_RX_FILTER_NET_CFG(nfilt->hw_index)); + + rx_filter_core_wl(priv, ASP_RX_FILTER_NET_CFG_CH(nfilt->port + 8) | + ASP_RX_FILTER_NET_CFG_EN | + ASP_RX_FILTER_NET_CFG_L2_EN | + ASP_RX_FILTER_NET_CFG_L3_EN | + ASP_RX_FILTER_NET_CFG_L4_EN | + ASP_RX_FILTER_NET_CFG_L3_FRM(2) | + ASP_RX_FILTER_NET_CFG_L4_FRM(2) | + ASP_RX_FILTER_NET_CFG_UMC(nfilt->port), + ASP_RX_FILTER_NET_CFG(nfilt->hw_index + 1)); +} + +#define MAX_WAKE_FILTER_SIZE 256 +enum asp_netfilt_reg_type { + ASP_NETFILT_MATCH = 0, + ASP_NETFILT_MASK, + ASP_NETFILT_MAX +}; + +static int bcmasp_netfilt_get_reg_offset(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + enum asp_netfilt_reg_type reg_type, + u32 offset) +{ + u32 block_index, filter_sel; + + if (offset < 32) { + block_index = ASP_RX_FILTER_NET_L2; + filter_sel = nfilt->hw_index; + } else if (offset < 64) { + block_index = ASP_RX_FILTER_NET_L2; + filter_sel = nfilt->hw_index + 1; + } else if (offset < 96) { + block_index = ASP_RX_FILTER_NET_L3_0; + filter_sel = nfilt->hw_index; + } else if (offset < 128) { + block_index = ASP_RX_FILTER_NET_L3_0; + filter_sel = nfilt->hw_index + 1; + } else if (offset < 160) { + block_index = ASP_RX_FILTER_NET_L3_1; + filter_sel = nfilt->hw_index; + } else if (offset < 192) { + block_index = ASP_RX_FILTER_NET_L3_1; + filter_sel = nfilt->hw_index + 1; + } else if (offset < 224) { + block_index = ASP_RX_FILTER_NET_L4; + filter_sel = nfilt->hw_index; + } else if (offset < 256) { + block_index = ASP_RX_FILTER_NET_L4; + filter_sel = nfilt->hw_index + 1; + } else { + return -EINVAL; + } + + switch (reg_type) { + case ASP_NETFILT_MATCH: + return ASP_RX_FILTER_NET_PAT(filter_sel, block_index, + (offset % 32)); + case ASP_NETFILT_MASK: + return ASP_RX_FILTER_NET_MASK(filter_sel, block_index, + (offset % 32)); + default: + return -EINVAL; + } +} + +static void bcmasp_netfilt_wr(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + enum asp_netfilt_reg_type reg_type, + u32 val, u32 offset) +{ + int reg_offset; + + /* HW only accepts 4 byte aligned writes */ + if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE) + return; + + reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type, + offset); + + rx_filter_core_wl(priv, val, reg_offset); +} + +static u32 bcmasp_netfilt_rd(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + enum asp_netfilt_reg_type reg_type, + u32 offset) +{ + int reg_offset; + + /* HW only accepts 4 byte aligned writes */ + if (!IS_ALIGNED(offset, 4) || offset > MAX_WAKE_FILTER_SIZE) + return 0; + + reg_offset = bcmasp_netfilt_get_reg_offset(priv, nfilt, reg_type, + offset); + + return rx_filter_core_rl(priv, reg_offset); +} + +static int bcmasp_netfilt_wr_m_wake(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + u32 offset, void *match, void *mask, + size_t size) +{ + u32 shift, mask_val = 0, match_val = 0; + bool first_byte = true; + + if ((offset + size) > MAX_WAKE_FILTER_SIZE) + return -EINVAL; + + while (size--) { + /* The HW only accepts 4 byte aligned writes, so if we + * begin unaligned or if remaining bytes less than 4, + * we need to read then write to avoid losing current + * register state + */ + if (first_byte && (!IS_ALIGNED(offset, 4) || size < 3)) { + match_val = bcmasp_netfilt_rd(priv, nfilt, + ASP_NETFILT_MATCH, + ALIGN_DOWN(offset, 4)); + mask_val = bcmasp_netfilt_rd(priv, nfilt, + ASP_NETFILT_MASK, + ALIGN_DOWN(offset, 4)); + } + + shift = (3 - (offset % 4)) * 8; + match_val &= ~GENMASK(shift + 7, shift); + mask_val &= ~GENMASK(shift + 7, shift); + match_val |= (u32)(*((u8 *)match) << shift); + mask_val |= (u32)(*((u8 *)mask) << shift); + + /* If last byte or last byte of word, write to reg */ + if (!size || ((offset % 4) == 3)) { + bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, + match_val, ALIGN_DOWN(offset, 4)); + bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, + mask_val, ALIGN_DOWN(offset, 4)); + first_byte = true; + } else { + first_byte = false; + } + + offset++; + match++; + mask++; + } + + return 0; +} + +static void bcmasp_netfilt_reset_hw(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt) +{ + int i; + + for (i = 0; i < MAX_WAKE_FILTER_SIZE; i += 4) { + bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MATCH, 0, i); + bcmasp_netfilt_wr(priv, nfilt, ASP_NETFILT_MASK, 0, i); + } +} + +static void bcmasp_netfilt_tcpip4_wr(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + struct ethtool_tcpip4_spec *match, + struct ethtool_tcpip4_spec *mask, + u32 offset) +{ + __be16 val_16, mask_16; + + val_16 = htons(ETH_P_IP); + mask_16 = htons(0xFFFF); + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1, + &match->tos, &mask->tos, + sizeof(match->tos)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12, + &match->ip4src, &mask->ip4src, + sizeof(match->ip4src)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16, + &match->ip4dst, &mask->ip4dst, + sizeof(match->ip4dst)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 20, + &match->psrc, &mask->psrc, + sizeof(match->psrc)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 22, + &match->pdst, &mask->pdst, + sizeof(match->pdst)); +} + +static void bcmasp_netfilt_tcpip6_wr(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt, + struct ethtool_tcpip6_spec *match, + struct ethtool_tcpip6_spec *mask, + u32 offset) +{ + __be16 val_16, mask_16; + + val_16 = htons(ETH_P_IPV6); + mask_16 = htons(0xFFFF); + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset, + &val_16, &mask_16, sizeof(val_16)); + val_16 = htons(match->tclass << 4); + mask_16 = htons(mask->tclass << 4); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 8, + &match->ip6src, &mask->ip6src, + sizeof(match->ip6src)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 24, + &match->ip6dst, &mask->ip6dst, + sizeof(match->ip6dst)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 40, + &match->psrc, &mask->psrc, + sizeof(match->psrc)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 42, + &match->pdst, &mask->pdst, + sizeof(match->pdst)); +} + +static int bcmasp_netfilt_wr_to_hw(struct bcmasp_priv *priv, + struct bcmasp_net_filter *nfilt) +{ + struct ethtool_rx_flow_spec *fs = &nfilt->fs; + unsigned int offset = 0; + __be16 val_16, mask_16; + u8 val_8, mask_8; + + /* Currently only supports wake filters */ + if (!nfilt->wake_filter) + return -EINVAL; + + bcmasp_netfilt_reset_hw(priv, nfilt); + + if (fs->flow_type & FLOW_MAC_EXT) { + bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, &fs->h_ext.h_dest, + &fs->m_ext.h_dest, + sizeof(fs->h_ext.h_dest)); + } + + if ((fs->flow_type & FLOW_EXT) && + (fs->m_ext.vlan_etype || fs->m_ext.vlan_tci)) { + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2), + &fs->h_ext.vlan_etype, + &fs->m_ext.vlan_etype, + sizeof(fs->h_ext.vlan_etype)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ((ETH_ALEN * 2) + 2), + &fs->h_ext.vlan_tci, + &fs->m_ext.vlan_tci, + sizeof(fs->h_ext.vlan_tci)); + offset += VLAN_HLEN; + } + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + bcmasp_netfilt_wr_m_wake(priv, nfilt, 0, + &fs->h_u.ether_spec.h_dest, + &fs->m_u.ether_spec.h_dest, + sizeof(fs->h_u.ether_spec.h_dest)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_ALEN, + &fs->h_u.ether_spec.h_source, + &fs->m_u.ether_spec.h_source, + sizeof(fs->h_u.ether_spec.h_source)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset, + &fs->h_u.ether_spec.h_proto, + &fs->m_u.ether_spec.h_proto, + sizeof(fs->h_u.ether_spec.h_proto)); + + break; + case IP_USER_FLOW: + val_16 = htons(ETH_P_IP); + mask_16 = htons(0xFFFF); + bcmasp_netfilt_wr_m_wake(priv, nfilt, (ETH_ALEN * 2) + offset, + &val_16, &mask_16, sizeof(val_16)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 1, + &fs->h_u.usr_ip4_spec.tos, + &fs->m_u.usr_ip4_spec.tos, + sizeof(fs->h_u.usr_ip4_spec.tos)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9, + &fs->h_u.usr_ip4_spec.proto, + &fs->m_u.usr_ip4_spec.proto, + sizeof(fs->h_u.usr_ip4_spec.proto)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 12, + &fs->h_u.usr_ip4_spec.ip4src, + &fs->m_u.usr_ip4_spec.ip4src, + sizeof(fs->h_u.usr_ip4_spec.ip4src)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 16, + &fs->h_u.usr_ip4_spec.ip4dst, + &fs->m_u.usr_ip4_spec.ip4dst, + sizeof(fs->h_u.usr_ip4_spec.ip4dst)); + if (!fs->m_u.usr_ip4_spec.l4_4_bytes) + break; + + /* Only supports 20 byte IPv4 header */ + val_8 = 0x45; + mask_8 = 0xFF; + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset, + &val_8, &mask_8, sizeof(val_8)); + bcmasp_netfilt_wr_m_wake(priv, nfilt, + ETH_HLEN + 20 + offset, + &fs->h_u.usr_ip4_spec.l4_4_bytes, + &fs->m_u.usr_ip4_spec.l4_4_bytes, + sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes) + ); + break; + case TCP_V4_FLOW: + val_8 = IPPROTO_TCP; + mask_8 = 0xFF; + bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.tcp_ip4_spec, + &fs->m_u.tcp_ip4_spec, offset); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9, + &val_8, &mask_8, sizeof(val_8)); + break; + case UDP_V4_FLOW: + val_8 = IPPROTO_UDP; + mask_8 = 0xFF; + bcmasp_netfilt_tcpip4_wr(priv, nfilt, &fs->h_u.udp_ip4_spec, + &fs->m_u.udp_ip4_spec, offset); + + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 9, + &val_8, &mask_8, sizeof(val_8)); + break; + case TCP_V6_FLOW: + val_8 = IPPROTO_TCP; + mask_8 = 0xFF; + bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.tcp_ip6_spec, + &fs->m_u.tcp_ip6_spec, offset); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6, + &val_8, &mask_8, sizeof(val_8)); + break; + case UDP_V6_FLOW: + val_8 = IPPROTO_UDP; + mask_8 = 0xFF; + bcmasp_netfilt_tcpip6_wr(priv, nfilt, &fs->h_u.udp_ip6_spec, + &fs->m_u.udp_ip6_spec, offset); + bcmasp_netfilt_wr_m_wake(priv, nfilt, ETH_HLEN + offset + 6, + &val_8, &mask_8, sizeof(val_8)); + break; + } + + bcmasp_netfilt_hw_en_wake(priv, nfilt); + + return 0; +} + +void bcmasp_netfilt_suspend(struct bcmasp_intf *intf) +{ + struct bcmasp_priv *priv = intf->parent; + bool write = false; + int ret, i; + + /* Write all filters to HW */ + for (i = 0; i < NUM_NET_FILTERS; i++) { + /* If the filter does not match the port, skip programming. */ + if (!priv->net_filters[i].claimed || + priv->net_filters[i].port != intf->port) + continue; + + if (i > 0 && (i % 2) && + priv->net_filters[i].wake_filter && + priv->net_filters[i - 1].wake_filter) + continue; + + ret = bcmasp_netfilt_wr_to_hw(priv, &priv->net_filters[i]); + if (!ret) + write = true; + } + + /* Successfully programmed at least one wake filter + * so enable top level wake config + */ + if (write) + rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN | + ASP_RX_FILTER_LNR_MD | + ASP_RX_FILTER_GEN_WK_EN | + ASP_RX_FILTER_NT_FLT_EN), + ASP_RX_FILTER_BLK_CTRL); +} + +void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, + u32 *rule_cnt) +{ + struct bcmasp_priv *priv = intf->parent; + int j = 0, i; + + for (i = 0; i < NUM_NET_FILTERS; i++) { + if (!priv->net_filters[i].claimed || + priv->net_filters[i].port != intf->port) + continue; + + if (i > 0 && (i % 2) && + priv->net_filters[i].wake_filter && + priv->net_filters[i - 1].wake_filter) + continue; + + rule_locs[j++] = priv->net_filters[i].fs.location; + } + + *rule_cnt = j; +} + +int bcmasp_netfilt_get_active(struct bcmasp_intf *intf) +{ + struct bcmasp_priv *priv = intf->parent; + int cnt = 0, i; + + for (i = 0; i < NUM_NET_FILTERS; i++) { + if (!priv->net_filters[i].claimed || + priv->net_filters[i].port != intf->port) + continue; + + /* Skip over a wake filter pair */ + if (i > 0 && (i % 2) && + priv->net_filters[i].wake_filter && + priv->net_filters[i - 1].wake_filter) + continue; + + cnt++; + } + + return cnt; +} + +bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf, + struct ethtool_rx_flow_spec *fs) +{ + struct bcmasp_priv *priv = intf->parent; + struct ethtool_rx_flow_spec *cur; + size_t fs_size = 0; + int i; + + for (i = 0; i < NUM_NET_FILTERS; i++) { + if (!priv->net_filters[i].claimed || + priv->net_filters[i].port != intf->port) + continue; + + cur = &priv->net_filters[i].fs; + + if (cur->flow_type != fs->flow_type || + cur->ring_cookie != fs->ring_cookie) + continue; + + switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + fs_size = sizeof(struct ethhdr); + break; + case IP_USER_FLOW: + fs_size = sizeof(struct ethtool_usrip4_spec); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + fs_size = sizeof(struct ethtool_tcpip6_spec); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + fs_size = sizeof(struct ethtool_tcpip4_spec); + break; + default: + continue; + } + + if (memcmp(&cur->h_u, &fs->h_u, fs_size) || + memcmp(&cur->m_u, &fs->m_u, fs_size)) + continue; + + if (cur->flow_type & FLOW_EXT) { + if (cur->h_ext.vlan_etype != fs->h_ext.vlan_etype || + cur->m_ext.vlan_etype != fs->m_ext.vlan_etype || + cur->h_ext.vlan_tci != fs->h_ext.vlan_tci || + cur->m_ext.vlan_tci != fs->m_ext.vlan_tci || + cur->h_ext.data[0] != fs->h_ext.data[0]) + continue; + } + if (cur->flow_type & FLOW_MAC_EXT) { + if (memcmp(&cur->h_ext.h_dest, + &fs->h_ext.h_dest, ETH_ALEN) || + memcmp(&cur->m_ext.h_dest, + &fs->m_ext.h_dest, ETH_ALEN)) + continue; + } + + return true; + } + + return false; +} + +/* If no network filter found, return open filter. + * If no more open filters return NULL + */ +struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf, + u32 loc, bool wake_filter, + bool init) +{ + struct bcmasp_net_filter *nfilter = NULL; + struct bcmasp_priv *priv = intf->parent; + int i, open_index = -1; + + /* Check whether we exceed the filter table capacity */ + if (loc != RX_CLS_LOC_ANY && loc >= NUM_NET_FILTERS) + return ERR_PTR(-EINVAL); + + /* If the filter location is busy (already claimed) and we are initializing + * the filter (insertion), return a busy error code. + */ + if (loc != RX_CLS_LOC_ANY && init && priv->net_filters[loc].claimed) + return ERR_PTR(-EBUSY); + + /* We need two filters for wake-up, so we cannot use an odd filter */ + if (wake_filter && loc != RX_CLS_LOC_ANY && (loc % 2)) + return ERR_PTR(-EINVAL); + + /* Initialize the loop index based on the desired location or from 0 */ + i = loc == RX_CLS_LOC_ANY ? 0 : loc; + + for ( ; i < NUM_NET_FILTERS; i++) { + /* Found matching network filter */ + if (!init && + priv->net_filters[i].claimed && + priv->net_filters[i].hw_index == i && + priv->net_filters[i].port == intf->port) + return &priv->net_filters[i]; + + /* If we don't need a new filter or new filter already found */ + if (!init || open_index >= 0) + continue; + + /* Wake filter conslidates two filters to cover more bytes + * Wake filter is open if... + * 1. It is an even filter + * 2. The current and next filter is not claimed + */ + if (wake_filter && !(i % 2) && !priv->net_filters[i].claimed && + !priv->net_filters[i + 1].claimed) + open_index = i; + else if (!priv->net_filters[i].claimed) + open_index = i; + } + + if (open_index >= 0) { + nfilter = &priv->net_filters[open_index]; + nfilter->claimed = true; + nfilter->port = intf->port; + nfilter->hw_index = open_index; + } + + if (wake_filter && open_index >= 0) { + /* Claim next filter */ + priv->net_filters[open_index + 1].claimed = true; + priv->net_filters[open_index + 1].wake_filter = true; + nfilter->wake_filter = true; + } + + return nfilter ? nfilter : ERR_PTR(-EINVAL); +} + +void bcmasp_netfilt_release(struct bcmasp_intf *intf, + struct bcmasp_net_filter *nfilt) +{ + struct bcmasp_priv *priv = intf->parent; + + if (nfilt->wake_filter) { + memset(&priv->net_filters[nfilt->hw_index + 1], 0, + sizeof(struct bcmasp_net_filter)); + } + + memset(nfilt, 0, sizeof(struct bcmasp_net_filter)); +} + +static void bcmasp_addr_to_uint(unsigned char *addr, u32 *high, u32 *low) +{ + *high = (u32)(addr[0] << 8 | addr[1]); + *low = (u32)(addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | + addr[5]); +} + +static void bcmasp_set_mda_filter(struct bcmasp_intf *intf, + const unsigned char *addr, + unsigned char *mask, + unsigned int i) +{ + struct bcmasp_priv *priv = intf->parent; + u32 addr_h, addr_l, mask_h, mask_l; + + /* Set local copy */ + ether_addr_copy(priv->mda_filters[i].mask, mask); + ether_addr_copy(priv->mda_filters[i].addr, addr); + + /* Write to HW */ + bcmasp_addr_to_uint(priv->mda_filters[i].mask, &mask_h, &mask_l); + bcmasp_addr_to_uint(priv->mda_filters[i].addr, &addr_h, &addr_l); + rx_filter_core_wl(priv, addr_h, ASP_RX_FILTER_MDA_PAT_H(i)); + rx_filter_core_wl(priv, addr_l, ASP_RX_FILTER_MDA_PAT_L(i)); + rx_filter_core_wl(priv, mask_h, ASP_RX_FILTER_MDA_MSK_H(i)); + rx_filter_core_wl(priv, mask_l, ASP_RX_FILTER_MDA_MSK_L(i)); +} + +static void bcmasp_en_mda_filter(struct bcmasp_intf *intf, bool en, + unsigned int i) +{ + struct bcmasp_priv *priv = intf->parent; + + if (priv->mda_filters[i].en == en) + return; + + priv->mda_filters[i].en = en; + priv->mda_filters[i].port = intf->port; + + rx_filter_core_wl(priv, ((intf->channel + 8) | + (en << ASP_RX_FILTER_MDA_CFG_EN_SHIFT) | + ASP_RX_FILTER_MDA_CFG_UMC_SEL(intf->port)), + ASP_RX_FILTER_MDA_CFG(i)); +} + +/* There are 32 MDA filters shared between all ports, we reserve 4 filters per + * port for the following. + * - Promisc: Filter to allow all packets when promisc is enabled + * - All Multicast + * - Broadcast + * - Own address + * + * The reserved filters are identified as so. + * - Promisc: (index * 4) + 0 + * - All Multicast: (index * 4) + 1 + * - Broadcast: (index * 4) + 2 + * - Own address: (index * 4) + 3 + */ +enum asp_rx_filter_id { + ASP_RX_FILTER_MDA_PROMISC = 0, + ASP_RX_FILTER_MDA_ALLMULTI, + ASP_RX_FILTER_MDA_BROADCAST, + ASP_RX_FILTER_MDA_OWN_ADDR, + ASP_RX_FILTER_MDA_RES_MAX, +}; + +#define ASP_RX_FILT_MDA(intf, name) (((intf)->index * \ + ASP_RX_FILTER_MDA_RES_MAX) \ + + ASP_RX_FILTER_MDA_##name) + +static int bcmasp_total_res_mda_cnt(struct bcmasp_priv *priv) +{ + return list_count_nodes(&priv->intfs) * ASP_RX_FILTER_MDA_RES_MAX; +} + +void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en) +{ + unsigned int i = ASP_RX_FILT_MDA(intf, PROMISC); + unsigned char promisc[ETH_ALEN]; + + eth_zero_addr(promisc); + /* Set mask to 00:00:00:00:00:00 to match all packets */ + bcmasp_set_mda_filter(intf, promisc, promisc, i); + bcmasp_en_mda_filter(intf, en, i); +} + +void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en) +{ + unsigned char allmulti[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00}; + unsigned int i = ASP_RX_FILT_MDA(intf, ALLMULTI); + + /* Set mask to 01:00:00:00:00:00 to match all multicast */ + bcmasp_set_mda_filter(intf, allmulti, allmulti, i); + bcmasp_en_mda_filter(intf, en, i); +} + +void bcmasp_set_broad(struct bcmasp_intf *intf, bool en) +{ + unsigned int i = ASP_RX_FILT_MDA(intf, BROADCAST); + unsigned char addr[ETH_ALEN]; + + eth_broadcast_addr(addr); + bcmasp_set_mda_filter(intf, addr, addr, i); + bcmasp_en_mda_filter(intf, en, i); +} + +void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr, + bool en) +{ + unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + unsigned int i = ASP_RX_FILT_MDA(intf, OWN_ADDR); + + bcmasp_set_mda_filter(intf, addr, mask, i); + bcmasp_en_mda_filter(intf, en, i); +} + +void bcmasp_disable_all_filters(struct bcmasp_intf *intf) +{ + struct bcmasp_priv *priv = intf->parent; + unsigned int i; + int res_count; + + res_count = bcmasp_total_res_mda_cnt(intf->parent); + + /* Disable all filters held by this port */ + for (i = res_count; i < NUM_MDA_FILTERS; i++) { + if (priv->mda_filters[i].en && + priv->mda_filters[i].port == intf->port) + bcmasp_en_mda_filter(intf, 0, i); + } +} + +static int bcmasp_combine_set_filter(struct bcmasp_intf *intf, + unsigned char *addr, unsigned char *mask, + int i) +{ + struct bcmasp_priv *priv = intf->parent; + u64 addr1, addr2, mask1, mask2, mask3; + + /* Switch to u64 to help with the calculations */ + addr1 = ether_addr_to_u64(priv->mda_filters[i].addr); + mask1 = ether_addr_to_u64(priv->mda_filters[i].mask); + addr2 = ether_addr_to_u64(addr); + mask2 = ether_addr_to_u64(mask); + + /* Check if one filter resides within the other */ + mask3 = mask1 & mask2; + if (mask3 == mask1 && ((addr1 & mask1) == (addr2 & mask1))) { + /* Filter 2 resides within filter 1, so everything is good */ + return 0; + } else if (mask3 == mask2 && ((addr1 & mask2) == (addr2 & mask2))) { + /* Filter 1 resides within filter 2, so swap filters */ + bcmasp_set_mda_filter(intf, addr, mask, i); + return 0; + } + + /* Unable to combine */ + return -EINVAL; +} + +int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr, + unsigned char *mask) +{ + struct bcmasp_priv *priv = intf->parent; + int ret, res_count; + unsigned int i; + + res_count = bcmasp_total_res_mda_cnt(intf->parent); + + for (i = res_count; i < NUM_MDA_FILTERS; i++) { + /* If filter not enabled or belongs to another port skip */ + if (!priv->mda_filters[i].en || + priv->mda_filters[i].port != intf->port) + continue; + + /* Attempt to combine filters */ + ret = bcmasp_combine_set_filter(intf, addr, mask, i); + if (!ret) { + intf->mib.filters_combine_cnt++; + return 0; + } + } + + /* Create new filter if possible */ + for (i = res_count; i < NUM_MDA_FILTERS; i++) { + if (priv->mda_filters[i].en) + continue; + + bcmasp_set_mda_filter(intf, addr, mask, i); + bcmasp_en_mda_filter(intf, 1, i); + return 0; + } + + /* No room for new filter */ + return -EINVAL; +} + +static void bcmasp_core_init_filters(struct bcmasp_priv *priv) +{ + unsigned int i; + + /* Disable all filters and reset software view since the HW + * can lose context while in deep sleep suspend states + */ + for (i = 0; i < NUM_MDA_FILTERS; i++) { + rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_MDA_CFG(i)); + priv->mda_filters[i].en = 0; + } + + for (i = 0; i < NUM_NET_FILTERS; i++) + rx_filter_core_wl(priv, 0x0, ASP_RX_FILTER_NET_CFG(i)); + + /* Top level filter enable bit should be enabled at all times, set + * GEN_WAKE_CLEAR to clear the network filter wake-up which would + * otherwise be sticky + */ + rx_filter_core_wl(priv, (ASP_RX_FILTER_OPUT_EN | + ASP_RX_FILTER_MDA_EN | + ASP_RX_FILTER_GEN_WK_CLR | + ASP_RX_FILTER_NT_FLT_EN), + ASP_RX_FILTER_BLK_CTRL); +} + +/* ASP core initialization */ +static void bcmasp_core_init(struct bcmasp_priv *priv) +{ + tx_analytics_core_wl(priv, 0x0, ASP_TX_ANALYTICS_CTRL); + rx_analytics_core_wl(priv, 0x4, ASP_RX_ANALYTICS_CTRL); + + rx_edpkt_core_wl(priv, (ASP_EDPKT_HDR_SZ_128 << ASP_EDPKT_HDR_SZ_SHIFT), + ASP_EDPKT_HDR_CFG); + rx_edpkt_core_wl(priv, + (ASP_EDPKT_ENDI_BT_SWP_WD << ASP_EDPKT_ENDI_DESC_SHIFT), + ASP_EDPKT_ENDI); + + rx_edpkt_core_wl(priv, 0x1b, ASP_EDPKT_BURST_BUF_PSCAL_TOUT); + rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_WRITE_TOUT); + rx_edpkt_core_wl(priv, 0x3e8, ASP_EDPKT_BURST_BUF_READ_TOUT); + + rx_edpkt_core_wl(priv, ASP_EDPKT_ENABLE_EN, ASP_EDPKT_ENABLE); + + /* Disable and clear both UniMAC's wake-up interrupts to avoid + * sticky interrupts. + */ + _intr2_mask_set(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE); + intr2_core_wl(priv, ASP_INTR2_UMC0_WAKE | ASP_INTR2_UMC1_WAKE, + ASP_INTR2_CLEAR); +} + +static void bcmasp_core_clock_select(struct bcmasp_priv *priv, bool slow) +{ + u32 reg; + + reg = ctrl_core_rl(priv, ASP_CTRL_CORE_CLOCK_SELECT); + if (slow) + reg &= ~ASP_CTRL_CORE_CLOCK_SELECT_MAIN; + else + reg |= ASP_CTRL_CORE_CLOCK_SELECT_MAIN; + ctrl_core_wl(priv, reg, ASP_CTRL_CORE_CLOCK_SELECT); +} + +static void bcmasp_core_clock_set_ll(struct bcmasp_priv *priv, u32 clr, u32 set) +{ + u32 reg; + + reg = ctrl_core_rl(priv, ASP_CTRL_CLOCK_CTRL); + reg &= ~clr; + reg |= set; + ctrl_core_wl(priv, reg, ASP_CTRL_CLOCK_CTRL); + + reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0); + reg &= ~clr; + reg |= set; + ctrl_core_wl(priv, reg, ASP_CTRL_SCRATCH_0); +} + +static void bcmasp_core_clock_set(struct bcmasp_priv *priv, u32 clr, u32 set) +{ + unsigned long flags; + + spin_lock_irqsave(&priv->clk_lock, flags); + bcmasp_core_clock_set_ll(priv, clr, set); + spin_unlock_irqrestore(&priv->clk_lock, flags); +} + +void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en) +{ + u32 intf_mask = ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(intf->port); + struct bcmasp_priv *priv = intf->parent; + unsigned long flags; + u32 reg; + + /* When enabling an interface, if the RX or TX clocks were not enabled, + * enable them. Conversely, while disabling an interface, if this is + * the last one enabled, we can turn off the shared RX and TX clocks as + * well. We control enable bits which is why we test for equality on + * the RGMII clock bit mask. + */ + spin_lock_irqsave(&priv->clk_lock, flags); + if (en) { + intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE | + ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE; + bcmasp_core_clock_set_ll(priv, intf_mask, 0); + } else { + reg = ctrl_core_rl(priv, ASP_CTRL_SCRATCH_0) | intf_mask; + if ((reg & ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) == + ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK) + intf_mask |= ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE | + ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE; + bcmasp_core_clock_set_ll(priv, 0, intf_mask); + } + spin_unlock_irqrestore(&priv->clk_lock, flags); +} + +static irqreturn_t bcmasp_isr_wol(int irq, void *data) +{ + struct bcmasp_priv *priv = data; + u32 status; + + /* No L3 IRQ, so we good */ + if (priv->wol_irq <= 0) + goto irq_handled; + + status = wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_STATUS) & + ~wakeup_intr2_core_rl(priv, ASP_WAKEUP_INTR2_MASK_STATUS); + wakeup_intr2_core_wl(priv, status, ASP_WAKEUP_INTR2_CLEAR); + +irq_handled: + pm_wakeup_event(&priv->pdev->dev, 0); + return IRQ_HANDLED; +} + +static int bcmasp_get_and_request_irq(struct bcmasp_priv *priv, int i) +{ + struct platform_device *pdev = priv->pdev; + int irq, ret; + + irq = platform_get_irq_optional(pdev, i); + if (irq < 0) + return irq; + + ret = devm_request_irq(&pdev->dev, irq, bcmasp_isr_wol, 0, + pdev->name, priv); + if (ret) + return ret; + + return irq; +} + +static void bcmasp_init_wol_shared(struct bcmasp_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + int irq; + + irq = bcmasp_get_and_request_irq(priv, 1); + if (irq < 0) { + dev_warn(dev, "Failed to init WoL irq: %d\n", irq); + return; + } + + priv->wol_irq = irq; + priv->wol_irq_enabled_mask = 0; + device_set_wakeup_capable(&pdev->dev, 1); +} + +static void bcmasp_enable_wol_shared(struct bcmasp_intf *intf, bool en) +{ + struct bcmasp_priv *priv = intf->parent; + struct device *dev = &priv->pdev->dev; + + if (en) { + if (priv->wol_irq_enabled_mask) { + set_bit(intf->port, &priv->wol_irq_enabled_mask); + return; + } + + /* First enable */ + set_bit(intf->port, &priv->wol_irq_enabled_mask); + enable_irq_wake(priv->wol_irq); + device_set_wakeup_enable(dev, 1); + } else { + if (!priv->wol_irq_enabled_mask) + return; + + clear_bit(intf->port, &priv->wol_irq_enabled_mask); + if (priv->wol_irq_enabled_mask) + return; + + /* Last disable */ + disable_irq_wake(priv->wol_irq); + device_set_wakeup_enable(dev, 0); + } +} + +static void bcmasp_wol_irq_destroy_shared(struct bcmasp_priv *priv) +{ + if (priv->wol_irq > 0) + free_irq(priv->wol_irq, priv); +} + +static void bcmasp_init_wol_per_intf(struct bcmasp_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + struct bcmasp_intf *intf; + int irq; + + list_for_each_entry(intf, &priv->intfs, list) { + irq = bcmasp_get_and_request_irq(priv, intf->port + 1); + if (irq < 0) { + dev_warn(dev, "Failed to init WoL irq(port %d): %d\n", + intf->port, irq); + continue; + } + + intf->wol_irq = irq; + intf->wol_irq_enabled = false; + device_set_wakeup_capable(&pdev->dev, 1); + } +} + +static void bcmasp_enable_wol_per_intf(struct bcmasp_intf *intf, bool en) +{ + struct device *dev = &intf->parent->pdev->dev; + + if (en ^ intf->wol_irq_enabled) + irq_set_irq_wake(intf->wol_irq, en); + + intf->wol_irq_enabled = en; + device_set_wakeup_enable(dev, en); +} + +static void bcmasp_wol_irq_destroy_per_intf(struct bcmasp_priv *priv) +{ + struct bcmasp_intf *intf; + + list_for_each_entry(intf, &priv->intfs, list) { + if (intf->wol_irq > 0) + free_irq(intf->wol_irq, priv); + } +} + +static struct bcmasp_hw_info v20_hw_info = { + .rx_ctrl_flush = ASP_RX_CTRL_FLUSH, + .umac2fb = UMAC2FB_OFFSET, + .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT, + .rx_ctrl_fb_filt_out_frame_count = ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT, + .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH, +}; + +static const struct bcmasp_plat_data v20_plat_data = { + .init_wol = bcmasp_init_wol_per_intf, + .enable_wol = bcmasp_enable_wol_per_intf, + .destroy_wol = bcmasp_wol_irq_destroy_per_intf, + .hw_info = &v20_hw_info, +}; + +static struct bcmasp_hw_info v21_hw_info = { + .rx_ctrl_flush = ASP_RX_CTRL_FLUSH_2_1, + .umac2fb = UMAC2FB_OFFSET_2_1, + .rx_ctrl_fb_out_frame_count = ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1, + .rx_ctrl_fb_filt_out_frame_count = + ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1, + .rx_ctrl_fb_rx_fifo_depth = ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1, +}; + +static const struct bcmasp_plat_data v21_plat_data = { + .init_wol = bcmasp_init_wol_shared, + .enable_wol = bcmasp_enable_wol_shared, + .destroy_wol = bcmasp_wol_irq_destroy_shared, + .hw_info = &v21_hw_info, +}; + +static const struct of_device_id bcmasp_of_match[] = { + { .compatible = "brcm,asp-v2.0", .data = &v20_plat_data }, + { .compatible = "brcm,asp-v2.1", .data = &v21_plat_data }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcmasp_of_match); + +static const struct of_device_id bcmasp_mdio_of_match[] = { + { .compatible = "brcm,asp-v2.1-mdio", }, + { .compatible = "brcm,asp-v2.0-mdio", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, bcmasp_mdio_of_match); + +static void bcmasp_remove_intfs(struct bcmasp_priv *priv) +{ + struct bcmasp_intf *intf, *n; + + list_for_each_entry_safe(intf, n, &priv->intfs, list) { + list_del(&intf->list); + bcmasp_interface_destroy(intf); + } +} + +static int bcmasp_probe(struct platform_device *pdev) +{ + struct device_node *ports_node, *intf_node; + const struct bcmasp_plat_data *pdata; + struct device *dev = &pdev->dev; + struct bcmasp_priv *priv; + struct bcmasp_intf *intf; + int ret = 0, count = 0; + unsigned int i; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq <= 0) + return -EINVAL; + + priv->clk = devm_clk_get_optional_enabled(dev, "sw_asp"); + if (IS_ERR(priv->clk)) + return dev_err_probe(dev, PTR_ERR(priv->clk), + "failed to request clock\n"); + + /* Base from parent node */ + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return dev_err_probe(dev, PTR_ERR(priv->base), "failed to iomap\n"); + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); + if (ret) + return dev_err_probe(dev, ret, "unable to set DMA mask: %d\n", ret); + + dev_set_drvdata(&pdev->dev, priv); + priv->pdev = pdev; + spin_lock_init(&priv->mda_lock); + spin_lock_init(&priv->clk_lock); + mutex_init(&priv->wol_lock); + mutex_init(&priv->net_lock); + INIT_LIST_HEAD(&priv->intfs); + + pdata = device_get_match_data(&pdev->dev); + if (!pdata) + return dev_err_probe(dev, -EINVAL, "unable to find platform data\n"); + + priv->init_wol = pdata->init_wol; + priv->enable_wol = pdata->enable_wol; + priv->destroy_wol = pdata->destroy_wol; + priv->hw_info = pdata->hw_info; + + /* Enable all clocks to ensure successful probing */ + bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0); + + /* Switch to the main clock */ + bcmasp_core_clock_select(priv, false); + + bcmasp_intr2_mask_set_all(priv); + bcmasp_intr2_clear_all(priv); + + ret = devm_request_irq(&pdev->dev, priv->irq, bcmasp_isr, 0, + pdev->name, priv); + if (ret) + return dev_err_probe(dev, ret, "failed to request ASP interrupt: %d", ret); + + /* Register mdio child nodes */ + of_platform_populate(dev->of_node, bcmasp_mdio_of_match, NULL, dev); + + /* ASP specific initialization, Needs to be done regardless of + * how many interfaces come up. + */ + bcmasp_core_init(priv); + bcmasp_core_init_filters(priv); + + ports_node = of_find_node_by_name(dev->of_node, "ethernet-ports"); + if (!ports_node) { + dev_warn(dev, "No ports found\n"); + return -EINVAL; + } + + i = 0; + for_each_available_child_of_node(ports_node, intf_node) { + intf = bcmasp_interface_create(priv, intf_node, i); + if (!intf) { + dev_err(dev, "Cannot create eth interface %d\n", i); + bcmasp_remove_intfs(priv); + goto of_put_exit; + } + list_add_tail(&intf->list, &priv->intfs); + i++; + } + + /* Check and enable WoL */ + priv->init_wol(priv); + + /* Drop the clock reference count now and let ndo_open()/ndo_close() + * manage it for us from now on. + */ + bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE); + + clk_disable_unprepare(priv->clk); + + /* Now do the registration of the network ports which will take care + * of managing the clock properly. + */ + list_for_each_entry(intf, &priv->intfs, list) { + ret = register_netdev(intf->ndev); + if (ret) { + netdev_err(intf->ndev, + "failed to register net_device: %d\n", ret); + priv->destroy_wol(priv); + bcmasp_remove_intfs(priv); + goto of_put_exit; + } + count++; + } + + dev_info(dev, "Initialized %d port(s)\n", count); + +of_put_exit: + of_node_put(ports_node); + return ret; +} + +static int bcmasp_remove(struct platform_device *pdev) +{ + struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev); + + if (!priv) + return 0; + + priv->destroy_wol(priv); + bcmasp_remove_intfs(priv); + + return 0; +} + +static void bcmasp_shutdown(struct platform_device *pdev) +{ + bcmasp_remove(pdev); +} + +static int __maybe_unused bcmasp_suspend(struct device *d) +{ + struct bcmasp_priv *priv = dev_get_drvdata(d); + struct bcmasp_intf *intf; + int ret; + + list_for_each_entry(intf, &priv->intfs, list) { + ret = bcmasp_interface_suspend(intf); + if (ret) + break; + } + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Whether Wake-on-LAN is enabled or not, we can always disable + * the shared TX clock + */ + bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE); + + bcmasp_core_clock_select(priv, true); + + clk_disable_unprepare(priv->clk); + + return ret; +} + +static int __maybe_unused bcmasp_resume(struct device *d) +{ + struct bcmasp_priv *priv = dev_get_drvdata(d); + struct bcmasp_intf *intf; + int ret; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + /* Switch to the main clock domain */ + bcmasp_core_clock_select(priv, false); + + /* Re-enable all clocks for re-initialization */ + bcmasp_core_clock_set(priv, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE, 0); + + bcmasp_core_init(priv); + bcmasp_core_init_filters(priv); + + /* And disable them to let the network devices take care of them */ + bcmasp_core_clock_set(priv, 0, ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE); + + clk_disable_unprepare(priv->clk); + + list_for_each_entry(intf, &priv->intfs, list) { + ret = bcmasp_interface_resume(intf); + if (ret) + break; + } + + return ret; +} + +static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops, + bcmasp_suspend, bcmasp_resume); + +static struct platform_driver bcmasp_driver = { + .probe = bcmasp_probe, + .remove = bcmasp_remove, + .shutdown = bcmasp_shutdown, + .driver = { + .name = "brcm,asp-v2", + .of_match_table = bcmasp_of_match, + .pm = &bcmasp_pm_ops, + }, +}; +module_platform_driver(bcmasp_driver); + +MODULE_DESCRIPTION("Broadcom ASP 2.0 Ethernet controller driver"); +MODULE_ALIAS("platform:brcm,asp-v2"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h new file mode 100644 index 000000000000..5b512f7f5e94 --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h @@ -0,0 +1,586 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BCMASP_H +#define __BCMASP_H + +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/io-64-nonatomic-hi-lo.h> +#include <uapi/linux/ethtool.h> + +#define ASP_INTR2_OFFSET 0x1000 +#define ASP_INTR2_STATUS 0x0 +#define ASP_INTR2_SET 0x4 +#define ASP_INTR2_CLEAR 0x8 +#define ASP_INTR2_MASK_STATUS 0xc +#define ASP_INTR2_MASK_SET 0x10 +#define ASP_INTR2_MASK_CLEAR 0x14 + +#define ASP_INTR2_RX_ECH(intr) BIT(intr) +#define ASP_INTR2_TX_DESC(intr) BIT((intr) + 14) +#define ASP_INTR2_UMC0_WAKE BIT(22) +#define ASP_INTR2_UMC1_WAKE BIT(28) + +#define ASP_WAKEUP_INTR2_OFFSET 0x1200 +#define ASP_WAKEUP_INTR2_STATUS 0x0 +#define ASP_WAKEUP_INTR2_SET 0x4 +#define ASP_WAKEUP_INTR2_CLEAR 0x8 +#define ASP_WAKEUP_INTR2_MASK_STATUS 0xc +#define ASP_WAKEUP_INTR2_MASK_SET 0x10 +#define ASP_WAKEUP_INTR2_MASK_CLEAR 0x14 +#define ASP_WAKEUP_INTR2_MPD_0 BIT(0) +#define ASP_WAKEUP_INTR2_MPD_1 BIT(1) +#define ASP_WAKEUP_INTR2_FILT_0 BIT(2) +#define ASP_WAKEUP_INTR2_FILT_1 BIT(3) +#define ASP_WAKEUP_INTR2_FW BIT(4) + +#define ASP_TX_ANALYTICS_OFFSET 0x4c000 +#define ASP_TX_ANALYTICS_CTRL 0x0 + +#define ASP_RX_ANALYTICS_OFFSET 0x98000 +#define ASP_RX_ANALYTICS_CTRL 0x0 + +#define ASP_RX_CTRL_OFFSET 0x9f000 +#define ASP_RX_CTRL_UMAC_0_FRAME_COUNT 0x8 +#define ASP_RX_CTRL_UMAC_1_FRAME_COUNT 0xc +#define ASP_RX_CTRL_FB_0_FRAME_COUNT 0x14 +#define ASP_RX_CTRL_FB_1_FRAME_COUNT 0x18 +#define ASP_RX_CTRL_FB_8_FRAME_COUNT 0x1c +/* asp2.1 diverges offsets here */ +/* ASP2.0 */ +#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT 0x20 +#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT 0x24 +#define ASP_RX_CTRL_FLUSH 0x28 +#define ASP_CTRL_UMAC0_FLUSH_MASK (BIT(0) | BIT(12)) +#define ASP_CTRL_UMAC1_FLUSH_MASK (BIT(1) | BIT(13)) +#define ASP_CTRL_SPB_FLUSH_MASK (BIT(8) | BIT(20)) +#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH 0x30 +/* ASP2.1 */ +#define ASP_RX_CTRL_FB_9_FRAME_COUNT_2_1 0x20 +#define ASP_RX_CTRL_FB_10_FRAME_COUNT_2_1 0x24 +#define ASP_RX_CTRL_FB_OUT_FRAME_COUNT_2_1 0x28 +#define ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT_2_1 0x2c +#define ASP_RX_CTRL_FLUSH_2_1 0x30 +#define ASP_RX_CTRL_FB_RX_FIFO_DEPTH_2_1 0x38 + +#define ASP_RX_FILTER_OFFSET 0x80000 +#define ASP_RX_FILTER_BLK_CTRL 0x0 +#define ASP_RX_FILTER_OPUT_EN BIT(0) +#define ASP_RX_FILTER_MDA_EN BIT(1) +#define ASP_RX_FILTER_LNR_MD BIT(2) +#define ASP_RX_FILTER_GEN_WK_EN BIT(3) +#define ASP_RX_FILTER_GEN_WK_CLR BIT(4) +#define ASP_RX_FILTER_NT_FLT_EN BIT(5) +#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100) +#define ASP_RX_FILTER_MDA_CFG_EN_SHIFT 8 +#define ASP_RX_FILTER_MDA_CFG_UMC_SEL(sel) ((sel) > 1 ? BIT(17) : \ + BIT((sel) + 9)) +#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104) +#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108) +#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c) +#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110) +#define ASP_RX_FILTER_MDA_CFG(sel) (((sel) * 0x14) + 0x100) +#define ASP_RX_FILTER_MDA_PAT_H(sel) (((sel) * 0x14) + 0x104) +#define ASP_RX_FILTER_MDA_PAT_L(sel) (((sel) * 0x14) + 0x108) +#define ASP_RX_FILTER_MDA_MSK_H(sel) (((sel) * 0x14) + 0x10c) +#define ASP_RX_FILTER_MDA_MSK_L(sel) (((sel) * 0x14) + 0x110) +#define ASP_RX_FILTER_NET_CFG(sel) (((sel) * 0xa04) + 0x400) +#define ASP_RX_FILTER_NET_CFG_CH(sel) ((sel) << 0) +#define ASP_RX_FILTER_NET_CFG_EN BIT(9) +#define ASP_RX_FILTER_NET_CFG_L2_EN BIT(10) +#define ASP_RX_FILTER_NET_CFG_L3_EN BIT(11) +#define ASP_RX_FILTER_NET_CFG_L4_EN BIT(12) +#define ASP_RX_FILTER_NET_CFG_L3_FRM(sel) ((sel) << 13) +#define ASP_RX_FILTER_NET_CFG_L4_FRM(sel) ((sel) << 15) +#define ASP_RX_FILTER_NET_CFG_UMC(sel) BIT((sel) + 19) +#define ASP_RX_FILTER_NET_CFG_DMA_EN BIT(27) + +#define ASP_RX_FILTER_NET_OFFSET_MAX 32 +#define ASP_RX_FILTER_NET_PAT(sel, block, off) \ + (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x600) +#define ASP_RX_FILTER_NET_MASK(sel, block, off) \ + (((sel) * 0xa04) + ((block) * 0x200) + (off) + 0x700) + +#define ASP_RX_FILTER_NET_OFFSET(sel) (((sel) * 0xa04) + 0xe00) +#define ASP_RX_FILTER_NET_OFFSET_L2(val) ((val) << 0) +#define ASP_RX_FILTER_NET_OFFSET_L3_0(val) ((val) << 8) +#define ASP_RX_FILTER_NET_OFFSET_L3_1(val) ((val) << 16) +#define ASP_RX_FILTER_NET_OFFSET_L4(val) ((val) << 24) + +enum asp_rx_net_filter_block { + ASP_RX_FILTER_NET_L2 = 0, + ASP_RX_FILTER_NET_L3_0, + ASP_RX_FILTER_NET_L3_1, + ASP_RX_FILTER_NET_L4, + ASP_RX_FILTER_NET_BLOCK_MAX +}; + +#define ASP_EDPKT_OFFSET 0x9c000 +#define ASP_EDPKT_ENABLE 0x4 +#define ASP_EDPKT_ENABLE_EN BIT(0) +#define ASP_EDPKT_HDR_CFG 0xc +#define ASP_EDPKT_HDR_SZ_SHIFT 2 +#define ASP_EDPKT_HDR_SZ_32 0 +#define ASP_EDPKT_HDR_SZ_64 1 +#define ASP_EDPKT_HDR_SZ_96 2 +#define ASP_EDPKT_HDR_SZ_128 3 +#define ASP_EDPKT_BURST_BUF_PSCAL_TOUT 0x10 +#define ASP_EDPKT_BURST_BUF_WRITE_TOUT 0x14 +#define ASP_EDPKT_BURST_BUF_READ_TOUT 0x18 +#define ASP_EDPKT_RX_TS_COUNTER 0x38 +#define ASP_EDPKT_ENDI 0x48 +#define ASP_EDPKT_ENDI_DESC_SHIFT 8 +#define ASP_EDPKT_ENDI_NO_BT_SWP 0 +#define ASP_EDPKT_ENDI_BT_SWP_WD 1 +#define ASP_EDPKT_RX_PKT_CNT 0x138 +#define ASP_EDPKT_HDR_EXTR_CNT 0x13c +#define ASP_EDPKT_HDR_OUT_CNT 0x140 + +#define ASP_CTRL 0x101000 +#define ASP_CTRL_ASP_SW_INIT 0x04 +#define ASP_CTRL_ASP_SW_INIT_ACPUSS_CORE BIT(0) +#define ASP_CTRL_ASP_SW_INIT_ASP_TX BIT(1) +#define ASP_CTRL_ASP_SW_INIT_AS_RX BIT(2) +#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC0 BIT(3) +#define ASP_CTRL_ASP_SW_INIT_ASP_RGMII_UMAC1 BIT(4) +#define ASP_CTRL_ASP_SW_INIT_ASP_XMEMIF BIT(5) +#define ASP_CTRL_CLOCK_CTRL 0x04 +#define ASP_CTRL_CLOCK_CTRL_ASP_TX_DISABLE BIT(0) +#define ASP_CTRL_CLOCK_CTRL_ASP_RX_DISABLE BIT(1) +#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT 2 +#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_MASK (0x7 << ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT) +#define ASP_CTRL_CLOCK_CTRL_ASP_RGMII_DIS(x) BIT(ASP_CTRL_CLOCK_CTRL_ASP_RGMII_SHIFT + (x)) +#define ASP_CTRL_CLOCK_CTRL_ASP_ALL_DISABLE GENMASK(4, 0) +#define ASP_CTRL_CORE_CLOCK_SELECT 0x08 +#define ASP_CTRL_CORE_CLOCK_SELECT_MAIN BIT(0) +#define ASP_CTRL_SCRATCH_0 0x0c + +struct bcmasp_tx_cb { + struct sk_buff *skb; + unsigned int bytes_sent; + bool last; + + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +struct bcmasp_res { + /* Per interface resources */ + /* Port */ + void __iomem *umac; + void __iomem *umac2fb; + void __iomem *rgmii; + + /* TX slowpath/configuration */ + void __iomem *tx_spb_ctrl; + void __iomem *tx_spb_top; + void __iomem *tx_epkt_core; + void __iomem *tx_pause_ctrl; +}; + +#define DESC_ADDR(x) ((x) & GENMASK_ULL(39, 0)) +#define DESC_FLAGS(x) ((x) & GENMASK_ULL(63, 40)) + +struct bcmasp_desc { + u64 buf; + #define DESC_CHKSUM BIT_ULL(40) + #define DESC_CRC_ERR BIT_ULL(41) + #define DESC_RX_SYM_ERR BIT_ULL(42) + #define DESC_NO_OCT_ALN BIT_ULL(43) + #define DESC_PKT_TRUC BIT_ULL(44) + /* 39:0 (TX/RX) bits 0-39 of buf addr + * 40 (RX) checksum + * 41 (RX) crc_error + * 42 (RX) rx_symbol_error + * 43 (RX) non_octet_aligned + * 44 (RX) pkt_truncated + * 45 Reserved + * 56:46 (RX) mac_filter_id + * 60:57 (RX) rx_port_num (0-unicmac0, 1-unimac1) + * 61 Reserved + * 63:62 (TX) forward CRC, overwrite CRC + */ + u32 size; + u32 flags; + #define DESC_INT_EN BIT(0) + #define DESC_SOF BIT(1) + #define DESC_EOF BIT(2) + #define DESC_EPKT_CMD BIT(3) + #define DESC_SCRAM_ST BIT(8) + #define DESC_SCRAM_END BIT(9) + #define DESC_PCPP BIT(10) + #define DESC_PPPP BIT(11) + /* 0 (TX) tx_int_en + * 1 (TX/RX) SOF + * 2 (TX/RX) EOF + * 3 (TX) epkt_command + * 6:4 (TX) PA + * 7 (TX) pause at desc end + * 8 (TX) scram_start + * 9 (TX) scram_end + * 10 (TX) PCPP + * 11 (TX) PPPP + * 14:12 Reserved + * 15 (TX) pid ch Valid + * 19:16 (TX) data_pkt_type + * 32:20 (TX) pid_channel (RX) nw_filter_id + */ +}; + +struct bcmasp_intf; + +struct bcmasp_intf_stats64 { + /* Rx Stats */ + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t rx_dropped; + u64_stats_t rx_crc_errs; + u64_stats_t rx_sym_errs; + + /* Tx Stats*/ + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + + struct u64_stats_sync syncp; +}; + +struct bcmasp_mib_counters { + u32 edpkt_ts; + u32 edpkt_rx_pkt_cnt; + u32 edpkt_hdr_ext_cnt; + u32 edpkt_hdr_out_cnt; + u32 umac_frm_cnt; + u32 fb_frm_cnt; + u32 fb_rx_fifo_depth; + u32 fb_out_frm_cnt; + u32 fb_filt_out_frm_cnt; + u32 alloc_rx_skb_failed; + u32 tx_dma_failed; + u32 mc_filters_full_cnt; + u32 uc_filters_full_cnt; + u32 filters_combine_cnt; + u32 promisc_filters_cnt; + u32 tx_realloc_offload_failed; + u32 tx_timeout_cnt; +}; + +struct bcmasp_intf_ops { + unsigned long (*rx_desc_read)(struct bcmasp_intf *intf); + void (*rx_buffer_write)(struct bcmasp_intf *intf, dma_addr_t addr); + void (*rx_desc_write)(struct bcmasp_intf *intf, dma_addr_t addr); + unsigned long (*tx_read)(struct bcmasp_intf *intf); + void (*tx_write)(struct bcmasp_intf *intf, dma_addr_t addr); +}; + +struct bcmasp_priv; + +struct bcmasp_intf { + struct list_head list; + struct net_device *ndev; + struct bcmasp_priv *parent; + + /* ASP Ch */ + int channel; + int port; + const struct bcmasp_intf_ops *ops; + + /* Used for splitting shared resources */ + int index; + + struct napi_struct tx_napi; + /* TX ring, starts on a new cacheline boundary */ + void __iomem *tx_spb_dma; + int tx_spb_index; + int tx_spb_clean_index; + struct bcmasp_desc *tx_spb_cpu; + dma_addr_t tx_spb_dma_addr; + dma_addr_t tx_spb_dma_valid; + dma_addr_t tx_spb_dma_read; + struct bcmasp_tx_cb *tx_cbs; + + /* RX ring, starts on a new cacheline boundary */ + void __iomem *rx_edpkt_cfg; + void __iomem *rx_edpkt_dma; + int rx_edpkt_index; + int rx_buf_order; + struct bcmasp_desc *rx_edpkt_cpu; + dma_addr_t rx_edpkt_dma_addr; + dma_addr_t rx_edpkt_dma_read; + + /* RX buffer prefetcher ring*/ + void *rx_ring_cpu; + dma_addr_t rx_ring_dma; + dma_addr_t rx_ring_dma_valid; + struct napi_struct rx_napi; + + struct bcmasp_res res; + unsigned int crc_fwd; + + /* PHY device */ + struct device_node *phy_dn; + struct device_node *ndev_dn; + phy_interface_t phy_interface; + bool internal_phy; + int old_pause; + int old_link; + int old_duplex; + + u32 msg_enable; + + /* Statistics */ + struct bcmasp_intf_stats64 stats64; + struct bcmasp_mib_counters mib; + + u32 wolopts; + u8 sopass[SOPASS_MAX]; + /* Used if per intf wol irq */ + int wol_irq; + unsigned int wol_irq_enabled:1; + + struct ethtool_eee eee; +}; + +#define NUM_NET_FILTERS 32 +struct bcmasp_net_filter { + struct ethtool_rx_flow_spec fs; + + bool claimed; + bool wake_filter; + + int port; + unsigned int hw_index; +}; + +#define NUM_MDA_FILTERS 32 +struct bcmasp_mda_filter { + /* Current owner of this filter */ + int port; + bool en; + u8 addr[ETH_ALEN]; + u8 mask[ETH_ALEN]; +}; + +struct bcmasp_hw_info { + u32 rx_ctrl_flush; + u32 umac2fb; + u32 rx_ctrl_fb_out_frame_count; + u32 rx_ctrl_fb_filt_out_frame_count; + u32 rx_ctrl_fb_rx_fifo_depth; +}; + +struct bcmasp_plat_data { + void (*init_wol)(struct bcmasp_priv *priv); + void (*enable_wol)(struct bcmasp_intf *intf, bool en); + void (*destroy_wol)(struct bcmasp_priv *priv); + struct bcmasp_hw_info *hw_info; +}; + +struct bcmasp_priv { + struct platform_device *pdev; + struct clk *clk; + + int irq; + u32 irq_mask; + + /* Used if shared wol irq */ + struct mutex wol_lock; + int wol_irq; + unsigned long wol_irq_enabled_mask; + + void (*init_wol)(struct bcmasp_priv *priv); + void (*enable_wol)(struct bcmasp_intf *intf, bool en); + void (*destroy_wol)(struct bcmasp_priv *priv); + + void __iomem *base; + struct bcmasp_hw_info *hw_info; + + struct list_head intfs; + + struct bcmasp_mda_filter mda_filters[NUM_MDA_FILTERS]; + + /* MAC destination address filters lock */ + spinlock_t mda_lock; + + /* Protects accesses to ASP_CTRL_CLOCK_CTRL */ + spinlock_t clk_lock; + + struct bcmasp_net_filter net_filters[NUM_NET_FILTERS]; + + /* Network filter lock */ + struct mutex net_lock; +}; + +static inline unsigned long bcmasp_intf_rx_desc_read(struct bcmasp_intf *intf) +{ + return intf->ops->rx_desc_read(intf); +} + +static inline void bcmasp_intf_rx_buffer_write(struct bcmasp_intf *intf, + dma_addr_t addr) +{ + intf->ops->rx_buffer_write(intf, addr); +} + +static inline void bcmasp_intf_rx_desc_write(struct bcmasp_intf *intf, + dma_addr_t addr) +{ + intf->ops->rx_desc_write(intf, addr); +} + +static inline unsigned long bcmasp_intf_tx_read(struct bcmasp_intf *intf) +{ + return intf->ops->tx_read(intf); +} + +static inline void bcmasp_intf_tx_write(struct bcmasp_intf *intf, + dma_addr_t addr) +{ + intf->ops->tx_write(intf, addr); +} + +#define __BCMASP_IO_MACRO(name, m) \ +static inline u32 name##_rl(struct bcmasp_intf *intf, u32 off) \ +{ \ + u32 reg = readl_relaxed(intf->m + off); \ + return reg; \ +} \ +static inline void name##_wl(struct bcmasp_intf *intf, u32 val, u32 off)\ +{ \ + writel_relaxed(val, intf->m + off); \ +} + +#define BCMASP_IO_MACRO(name) __BCMASP_IO_MACRO(name, res.name) +#define BCMASP_FP_IO_MACRO(name) __BCMASP_IO_MACRO(name, name) + +BCMASP_IO_MACRO(umac); +BCMASP_IO_MACRO(umac2fb); +BCMASP_IO_MACRO(rgmii); +BCMASP_FP_IO_MACRO(tx_spb_dma); +BCMASP_IO_MACRO(tx_spb_ctrl); +BCMASP_IO_MACRO(tx_spb_top); +BCMASP_IO_MACRO(tx_epkt_core); +BCMASP_IO_MACRO(tx_pause_ctrl); +BCMASP_FP_IO_MACRO(rx_edpkt_dma); +BCMASP_FP_IO_MACRO(rx_edpkt_cfg); + +#define __BCMASP_FP_IO_MACRO_Q(name, m) \ +static inline u64 name##_rq(struct bcmasp_intf *intf, u32 off) \ +{ \ + u64 reg = readq_relaxed(intf->m + off); \ + return reg; \ +} \ +static inline void name##_wq(struct bcmasp_intf *intf, u64 val, u32 off)\ +{ \ + writeq_relaxed(val, intf->m + off); \ +} + +#define BCMASP_FP_IO_MACRO_Q(name) __BCMASP_FP_IO_MACRO_Q(name, name) + +BCMASP_FP_IO_MACRO_Q(tx_spb_dma); +BCMASP_FP_IO_MACRO_Q(rx_edpkt_dma); +BCMASP_FP_IO_MACRO_Q(rx_edpkt_cfg); + +#define PKT_OFFLOAD_NOP (0 << 28) +#define PKT_OFFLOAD_HDR_OP (1 << 28) +#define PKT_OFFLOAD_HDR_WRBACK BIT(19) +#define PKT_OFFLOAD_HDR_COUNT(x) ((x) << 16) +#define PKT_OFFLOAD_HDR_SIZE_1(x) ((x) << 4) +#define PKT_OFFLOAD_HDR_SIZE_2(x) (x) +#define PKT_OFFLOAD_HDR2_SIZE_2(x) ((x) << 24) +#define PKT_OFFLOAD_HDR2_SIZE_3(x) ((x) << 12) +#define PKT_OFFLOAD_HDR2_SIZE_4(x) (x) +#define PKT_OFFLOAD_EPKT_OP (2 << 28) +#define PKT_OFFLOAD_EPKT_WRBACK BIT(23) +#define PKT_OFFLOAD_EPKT_IP(x) ((x) << 21) +#define PKT_OFFLOAD_EPKT_TP(x) ((x) << 19) +#define PKT_OFFLOAD_EPKT_LEN(x) ((x) << 16) +#define PKT_OFFLOAD_EPKT_CSUM_L3 BIT(15) +#define PKT_OFFLOAD_EPKT_CSUM_L2 BIT(14) +#define PKT_OFFLOAD_EPKT_ID(x) ((x) << 12) +#define PKT_OFFLOAD_EPKT_SEQ(x) ((x) << 10) +#define PKT_OFFLOAD_EPKT_TS(x) ((x) << 8) +#define PKT_OFFLOAD_EPKT_BLOC(x) (x) +#define PKT_OFFLOAD_END_OP (7 << 28) + +struct bcmasp_pkt_offload { + __be32 nop; + __be32 header; + __be32 header2; + __be32 epkt; + __be32 end; +}; + +#define BCMASP_CORE_IO_MACRO(name, offset) \ +static inline u32 name##_core_rl(struct bcmasp_priv *priv, \ + u32 off) \ +{ \ + u32 reg = readl_relaxed(priv->base + (offset) + off); \ + return reg; \ +} \ +static inline void name##_core_wl(struct bcmasp_priv *priv, \ + u32 val, u32 off) \ +{ \ + writel_relaxed(val, priv->base + (offset) + off); \ +} + +BCMASP_CORE_IO_MACRO(intr2, ASP_INTR2_OFFSET); +BCMASP_CORE_IO_MACRO(wakeup_intr2, ASP_WAKEUP_INTR2_OFFSET); +BCMASP_CORE_IO_MACRO(tx_analytics, ASP_TX_ANALYTICS_OFFSET); +BCMASP_CORE_IO_MACRO(rx_analytics, ASP_RX_ANALYTICS_OFFSET); +BCMASP_CORE_IO_MACRO(rx_ctrl, ASP_RX_CTRL_OFFSET); +BCMASP_CORE_IO_MACRO(rx_filter, ASP_RX_FILTER_OFFSET); +BCMASP_CORE_IO_MACRO(rx_edpkt, ASP_EDPKT_OFFSET); +BCMASP_CORE_IO_MACRO(ctrl, ASP_CTRL); + +struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, + struct device_node *ndev_dn, int i); + +void bcmasp_interface_destroy(struct bcmasp_intf *intf); + +void bcmasp_enable_tx_irq(struct bcmasp_intf *intf, int en); + +void bcmasp_enable_rx_irq(struct bcmasp_intf *intf, int en); + +void bcmasp_flush_rx_port(struct bcmasp_intf *intf); + +extern const struct ethtool_ops bcmasp_ethtool_ops; + +int bcmasp_interface_suspend(struct bcmasp_intf *intf); + +int bcmasp_interface_resume(struct bcmasp_intf *intf); + +void bcmasp_set_promisc(struct bcmasp_intf *intf, bool en); + +void bcmasp_set_allmulti(struct bcmasp_intf *intf, bool en); + +void bcmasp_set_broad(struct bcmasp_intf *intf, bool en); + +void bcmasp_set_oaddr(struct bcmasp_intf *intf, const unsigned char *addr, + bool en); + +int bcmasp_set_en_mda_filter(struct bcmasp_intf *intf, unsigned char *addr, + unsigned char *mask); + +void bcmasp_disable_all_filters(struct bcmasp_intf *intf); + +void bcmasp_core_clock_set_intf(struct bcmasp_intf *intf, bool en); + +struct bcmasp_net_filter *bcmasp_netfilt_get_init(struct bcmasp_intf *intf, + u32 loc, bool wake_filter, + bool init); + +bool bcmasp_netfilt_check_dup(struct bcmasp_intf *intf, + struct ethtool_rx_flow_spec *fs); + +void bcmasp_netfilt_release(struct bcmasp_intf *intf, + struct bcmasp_net_filter *nfilt); + +int bcmasp_netfilt_get_active(struct bcmasp_intf *intf); + +void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs, + u32 *rule_cnt); + +void bcmasp_netfilt_suspend(struct bcmasp_intf *intf); + +void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable); +#endif diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c new file mode 100644 index 000000000000..c4f1604d5ab3 --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "bcmasp_ethtool: " fmt + +#include <asm-generic/unaligned.h> +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/platform_device.h> + +#include "bcmasp.h" +#include "bcmasp_intf_defs.h" + +enum bcmasp_stat_type { + BCMASP_STAT_RX_EDPKT, + BCMASP_STAT_RX_CTRL, + BCMASP_STAT_RX_CTRL_PER_INTF, + BCMASP_STAT_SOFT, +}; + +struct bcmasp_stats { + char stat_string[ETH_GSTRING_LEN]; + enum bcmasp_stat_type type; + u32 reg_offset; +}; + +#define STAT_BCMASP_SOFT_MIB(str) { \ + .stat_string = str, \ + .type = BCMASP_STAT_SOFT, \ +} + +#define STAT_BCMASP_OFFSET(str, _type, offset) { \ + .stat_string = str, \ + .type = _type, \ + .reg_offset = offset, \ +} + +#define STAT_BCMASP_RX_EDPKT(str, offset) \ + STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_EDPKT, offset) +#define STAT_BCMASP_RX_CTRL(str, offset) \ + STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL, offset) +#define STAT_BCMASP_RX_CTRL_PER_INTF(str, offset) \ + STAT_BCMASP_OFFSET(str, BCMASP_STAT_RX_CTRL_PER_INTF, offset) + +/* Must match the order of struct bcmasp_mib_counters */ +static const struct bcmasp_stats bcmasp_gstrings_stats[] = { + /* EDPKT counters */ + STAT_BCMASP_RX_EDPKT("RX Time Stamp", ASP_EDPKT_RX_TS_COUNTER), + STAT_BCMASP_RX_EDPKT("RX PKT Count", ASP_EDPKT_RX_PKT_CNT), + STAT_BCMASP_RX_EDPKT("RX PKT Buffered", ASP_EDPKT_HDR_EXTR_CNT), + STAT_BCMASP_RX_EDPKT("RX PKT Pushed to DRAM", ASP_EDPKT_HDR_OUT_CNT), + /* ASP RX control */ + STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Unimac", + ASP_RX_CTRL_UMAC_0_FRAME_COUNT), + STAT_BCMASP_RX_CTRL_PER_INTF("Frames From Port", + ASP_RX_CTRL_FB_0_FRAME_COUNT), + STAT_BCMASP_RX_CTRL_PER_INTF("RX Buffer FIFO Depth", + ASP_RX_CTRL_FB_RX_FIFO_DEPTH), + STAT_BCMASP_RX_CTRL("Frames Out(Buffer)", + ASP_RX_CTRL_FB_OUT_FRAME_COUNT), + STAT_BCMASP_RX_CTRL("Frames Out(Filters)", + ASP_RX_CTRL_FB_FILT_OUT_FRAME_COUNT), + /* Software maintained statistics */ + STAT_BCMASP_SOFT_MIB("RX SKB Alloc Failed"), + STAT_BCMASP_SOFT_MIB("TX DMA Failed"), + STAT_BCMASP_SOFT_MIB("Multicast Filters Full"), + STAT_BCMASP_SOFT_MIB("Unicast Filters Full"), + STAT_BCMASP_SOFT_MIB("MDA Filters Combined"), + STAT_BCMASP_SOFT_MIB("Promisc Filter Set"), + STAT_BCMASP_SOFT_MIB("TX Realloc For Offload Failed"), + STAT_BCMASP_SOFT_MIB("Tx Timeout Count"), +}; + +#define BCMASP_STATS_LEN ARRAY_SIZE(bcmasp_gstrings_stats) + +static u16 bcmasp_stat_fixup_offset(struct bcmasp_intf *intf, + const struct bcmasp_stats *s) +{ + struct bcmasp_priv *priv = intf->parent; + + if (!strcmp("Frames Out(Buffer)", s->stat_string)) + return priv->hw_info->rx_ctrl_fb_out_frame_count; + + if (!strcmp("Frames Out(Filters)", s->stat_string)) + return priv->hw_info->rx_ctrl_fb_filt_out_frame_count; + + if (!strcmp("RX Buffer FIFO Depth", s->stat_string)) + return priv->hw_info->rx_ctrl_fb_rx_fifo_depth; + + return s->reg_offset; +} + +static int bcmasp_get_sset_count(struct net_device *dev, int string_set) +{ + switch (string_set) { + case ETH_SS_STATS: + return BCMASP_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void bcmasp_get_strings(struct net_device *dev, u32 stringset, + u8 *data) +{ + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCMASP_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcmasp_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + default: + return; + } +} + +static void bcmasp_update_mib_counters(struct bcmasp_intf *intf) +{ + unsigned int i; + + for (i = 0; i < BCMASP_STATS_LEN; i++) { + const struct bcmasp_stats *s; + u32 offset, val; + char *p; + + s = &bcmasp_gstrings_stats[i]; + offset = bcmasp_stat_fixup_offset(intf, s); + switch (s->type) { + case BCMASP_STAT_SOFT: + continue; + case BCMASP_STAT_RX_EDPKT: + val = rx_edpkt_core_rl(intf->parent, offset); + break; + case BCMASP_STAT_RX_CTRL: + val = rx_ctrl_core_rl(intf->parent, offset); + break; + case BCMASP_STAT_RX_CTRL_PER_INTF: + offset += sizeof(u32) * intf->port; + val = rx_ctrl_core_rl(intf->parent, offset); + break; + default: + continue; + } + p = (char *)(&intf->mib) + (i * sizeof(u32)); + put_unaligned(val, (u32 *)p); + } +} + +static void bcmasp_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + unsigned int i; + char *p; + + if (netif_running(dev)) + bcmasp_update_mib_counters(intf); + + for (i = 0; i < BCMASP_STATS_LEN; i++) { + p = (char *)(&intf->mib) + (i * sizeof(u32)); + data[i] = *(u32 *)p; + } +} + +static void bcmasp_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, "bcmasp", sizeof(info->driver)); + strscpy(info->bus_info, dev_name(dev->dev.parent), + sizeof(info->bus_info)); +} + +static u32 bcmasp_get_msglevel(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + return intf->msg_enable; +} + +static void bcmasp_set_msglevel(struct net_device *dev, u32 level) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + intf->msg_enable = level; +} + +#define BCMASP_SUPPORTED_WAKE (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER) +static void bcmasp_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + wol->supported = BCMASP_SUPPORTED_WAKE; + wol->wolopts = intf->wolopts; + memset(wol->sopass, 0, sizeof(wol->sopass)); + + if (wol->wolopts & WAKE_MAGICSECURE) + memcpy(wol->sopass, intf->sopass, sizeof(intf->sopass)); +} + +static int bcmasp_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_priv *priv = intf->parent; + struct device *kdev = &priv->pdev->dev; + + if (!device_can_wakeup(kdev)) + return -EOPNOTSUPP; + + /* Interface Specific */ + intf->wolopts = wol->wolopts; + if (intf->wolopts & WAKE_MAGICSECURE) + memcpy(intf->sopass, wol->sopass, sizeof(wol->sopass)); + + mutex_lock(&priv->wol_lock); + priv->enable_wol(intf, !!intf->wolopts); + mutex_unlock(&priv->wol_lock); + + return 0; +} + +static int bcmasp_flow_insert(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_net_filter *nfilter; + u32 loc = cmd->fs.location; + bool wake = false; + + if (cmd->fs.ring_cookie == RX_CLS_FLOW_WAKE) + wake = true; + + /* Currently only supports WAKE filters */ + if (!wake) + return -EOPNOTSUPP; + + switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + case ETHER_FLOW: + case IP_USER_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + break; + default: + return -EOPNOTSUPP; + } + + /* Check if filter already exists */ + if (bcmasp_netfilt_check_dup(intf, &cmd->fs)) + return -EINVAL; + + nfilter = bcmasp_netfilt_get_init(intf, loc, wake, true); + if (IS_ERR(nfilter)) + return PTR_ERR(nfilter); + + /* Return the location where we did insert the filter */ + cmd->fs.location = nfilter->hw_index; + memcpy(&nfilter->fs, &cmd->fs, sizeof(struct ethtool_rx_flow_spec)); + + /* Since we only support wake filters, defer register programming till + * suspend time. + */ + return 0; +} + +static int bcmasp_flow_delete(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_net_filter *nfilter; + + nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false); + if (IS_ERR(nfilter)) + return PTR_ERR(nfilter); + + bcmasp_netfilt_release(intf, nfilter); + + return 0; +} + +static int bcmasp_flow_get(struct bcmasp_intf *intf, struct ethtool_rxnfc *cmd) +{ + struct bcmasp_net_filter *nfilter; + + nfilter = bcmasp_netfilt_get_init(intf, cmd->fs.location, false, false); + if (IS_ERR(nfilter)) + return PTR_ERR(nfilter); + + memcpy(&cmd->fs, &nfilter->fs, sizeof(nfilter->fs)); + + cmd->data = NUM_NET_FILTERS; + + return 0; +} + +static int bcmasp_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + mutex_lock(&intf->parent->net_lock); + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = bcmasp_flow_insert(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = bcmasp_flow_delete(dev, cmd); + break; + default: + break; + } + + mutex_unlock(&intf->parent->net_lock); + + return ret; +} + +static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + int err = 0; + + mutex_lock(&intf->parent->net_lock); + + switch (cmd->cmd) { + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = bcmasp_netfilt_get_active(intf); + /* We support specifying rule locations */ + cmd->data |= RX_CLS_LOC_SPECIAL; + break; + case ETHTOOL_GRXCLSRULE: + err = bcmasp_flow_get(intf, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt); + cmd->data = NUM_NET_FILTERS; + break; + default: + err = -EOPNOTSUPP; + break; + } + + mutex_unlock(&intf->parent->net_lock); + + return err; +} + +void bcmasp_eee_enable_set(struct bcmasp_intf *intf, bool enable) +{ + u32 reg; + + reg = umac_rl(intf, UMC_EEE_CTRL); + if (enable) + reg |= EEE_EN; + else + reg &= ~EEE_EN; + umac_wl(intf, reg, UMC_EEE_CTRL); + + intf->eee.eee_enabled = enable; + intf->eee.eee_active = enable; +} + +static int bcmasp_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct ethtool_eee *p = &intf->eee; + + if (!dev->phydev) + return -ENODEV; + + e->eee_enabled = p->eee_enabled; + e->eee_active = p->eee_active; + e->tx_lpi_enabled = p->tx_lpi_enabled; + e->tx_lpi_timer = umac_rl(intf, UMC_EEE_LPI_TIMER); + + return phy_ethtool_get_eee(dev->phydev, e); +} + +static int bcmasp_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct ethtool_eee *p = &intf->eee; + int ret; + + if (!dev->phydev) + return -ENODEV; + + if (!p->eee_enabled) { + bcmasp_eee_enable_set(intf, false); + } else { + ret = phy_init_eee(dev->phydev, 0); + if (ret) { + netif_err(intf, hw, dev, + "EEE initialization failed: %d\n", ret); + return ret; + } + + umac_wl(intf, e->tx_lpi_timer, UMC_EEE_LPI_TIMER); + intf->eee.eee_active = ret >= 0; + intf->eee.tx_lpi_enabled = e->tx_lpi_enabled; + bcmasp_eee_enable_set(intf, true); + } + + return phy_ethtool_set_eee(dev->phydev, e); +} + +static void bcmasp_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + mac_stats->FramesTransmittedOK = umac_rl(intf, UMC_GTPOK); + mac_stats->SingleCollisionFrames = umac_rl(intf, UMC_GTSCL); + mac_stats->MultipleCollisionFrames = umac_rl(intf, UMC_GTMCL); + mac_stats->FramesReceivedOK = umac_rl(intf, UMC_GRPOK); + mac_stats->FrameCheckSequenceErrors = umac_rl(intf, UMC_GRFCS); + mac_stats->AlignmentErrors = umac_rl(intf, UMC_GRALN); + mac_stats->OctetsTransmittedOK = umac_rl(intf, UMC_GTBYT); + mac_stats->FramesWithDeferredXmissions = umac_rl(intf, UMC_GTDRF); + mac_stats->LateCollisions = umac_rl(intf, UMC_GTLCL); + mac_stats->FramesAbortedDueToXSColls = umac_rl(intf, UMC_GTXCL); + mac_stats->OctetsReceivedOK = umac_rl(intf, UMC_GRBYT); + mac_stats->MulticastFramesXmittedOK = umac_rl(intf, UMC_GTMCA); + mac_stats->BroadcastFramesXmittedOK = umac_rl(intf, UMC_GTBCA); + mac_stats->FramesWithExcessiveDeferral = umac_rl(intf, UMC_GTEDF); + mac_stats->MulticastFramesReceivedOK = umac_rl(intf, UMC_GRMCA); + mac_stats->BroadcastFramesReceivedOK = umac_rl(intf, UMC_GRBCA); +} + +static const struct ethtool_rmon_hist_range bcmasp_rmon_ranges[] = { + { 0, 64}, + { 65, 127}, + { 128, 255}, + { 256, 511}, + { 512, 1023}, + { 1024, 1518}, + { 1519, 1522}, + {} +}; + +static void bcmasp_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + *ranges = bcmasp_rmon_ranges; + + rmon_stats->undersize_pkts = umac_rl(intf, UMC_RRUND); + rmon_stats->oversize_pkts = umac_rl(intf, UMC_GROVR); + rmon_stats->fragments = umac_rl(intf, UMC_RRFRG); + rmon_stats->jabbers = umac_rl(intf, UMC_GRJBR); + + rmon_stats->hist[0] = umac_rl(intf, UMC_GR64); + rmon_stats->hist[1] = umac_rl(intf, UMC_GR127); + rmon_stats->hist[2] = umac_rl(intf, UMC_GR255); + rmon_stats->hist[3] = umac_rl(intf, UMC_GR511); + rmon_stats->hist[4] = umac_rl(intf, UMC_GR1023); + rmon_stats->hist[5] = umac_rl(intf, UMC_GR1518); + rmon_stats->hist[6] = umac_rl(intf, UMC_GRMGV); + + rmon_stats->hist_tx[0] = umac_rl(intf, UMC_TR64); + rmon_stats->hist_tx[1] = umac_rl(intf, UMC_TR127); + rmon_stats->hist_tx[2] = umac_rl(intf, UMC_TR255); + rmon_stats->hist_tx[3] = umac_rl(intf, UMC_TR511); + rmon_stats->hist_tx[4] = umac_rl(intf, UMC_TR1023); + rmon_stats->hist_tx[5] = umac_rl(intf, UMC_TR1518); + rmon_stats->hist_tx[6] = umac_rl(intf, UMC_TRMGV); +} + +static void bcmasp_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + ctrl_stats->MACControlFramesTransmitted = umac_rl(intf, UMC_GTXCF); + ctrl_stats->MACControlFramesReceived = umac_rl(intf, UMC_GRXCF); + ctrl_stats->UnsupportedOpcodesReceived = umac_rl(intf, UMC_GRXUO); +} + +const struct ethtool_ops bcmasp_ethtool_ops = { + .get_drvinfo = bcmasp_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, + .get_msglevel = bcmasp_get_msglevel, + .set_msglevel = bcmasp_set_msglevel, + .get_wol = bcmasp_get_wol, + .set_wol = bcmasp_set_wol, + .get_rxnfc = bcmasp_get_rxnfc, + .set_rxnfc = bcmasp_set_rxnfc, + .set_eee = bcmasp_set_eee, + .get_eee = bcmasp_get_eee, + .get_eth_mac_stats = bcmasp_get_eth_mac_stats, + .get_rmon_stats = bcmasp_get_rmon_stats, + .get_eth_ctrl_stats = bcmasp_get_eth_ctrl_stats, + .get_strings = bcmasp_get_strings, + .get_ethtool_stats = bcmasp_get_ethtool_stats, + .get_sset_count = bcmasp_get_sset_count, +}; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c new file mode 100644 index 000000000000..53e542881255 --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -0,0 +1,1415 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) "bcmasp_intf: " fmt + +#include <asm/byteorder.h> +#include <linux/brcmphy.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/of_net.h> +#include <linux/of_mdio.h> +#include <linux/phy.h> +#include <linux/phy_fixed.h> +#include <linux/ptp_classify.h> +#include <linux/platform_device.h> +#include <net/ip.h> +#include <net/ipv6.h> + +#include "bcmasp.h" +#include "bcmasp_intf_defs.h" + +static int incr_ring(int index, int ring_count) +{ + index++; + if (index == ring_count) + return 0; + + return index; +} + +/* Points to last byte of descriptor */ +static dma_addr_t incr_last_byte(dma_addr_t addr, dma_addr_t beg, + int ring_count) +{ + dma_addr_t end = beg + (ring_count * DESC_SIZE); + + addr += DESC_SIZE; + if (addr > end) + return beg + DESC_SIZE - 1; + + return addr; +} + +/* Points to first byte of descriptor */ +static dma_addr_t incr_first_byte(dma_addr_t addr, dma_addr_t beg, + int ring_count) +{ + dma_addr_t end = beg + (ring_count * DESC_SIZE); + + addr += DESC_SIZE; + if (addr >= end) + return beg; + + return addr; +} + +static void bcmasp_enable_tx(struct bcmasp_intf *intf, int en) +{ + if (en) { + tx_spb_ctrl_wl(intf, TX_SPB_CTRL_ENABLE_EN, TX_SPB_CTRL_ENABLE); + tx_epkt_core_wl(intf, (TX_EPKT_C_CFG_MISC_EN | + TX_EPKT_C_CFG_MISC_PT | + (intf->port << TX_EPKT_C_CFG_MISC_PS_SHIFT)), + TX_EPKT_C_CFG_MISC); + } else { + tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); + tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); + } +} + +static void bcmasp_enable_rx(struct bcmasp_intf *intf, int en) +{ + if (en) + rx_edpkt_cfg_wl(intf, RX_EDPKT_CFG_ENABLE_EN, + RX_EDPKT_CFG_ENABLE); + else + rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); +} + +static void bcmasp_set_rx_mode(struct net_device *dev) +{ + unsigned char mask[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; + struct bcmasp_intf *intf = netdev_priv(dev); + struct netdev_hw_addr *ha; + int ret; + + spin_lock_bh(&intf->parent->mda_lock); + + bcmasp_disable_all_filters(intf); + + if (dev->flags & IFF_PROMISC) + goto set_promisc; + + bcmasp_set_promisc(intf, 0); + + bcmasp_set_broad(intf, 1); + + bcmasp_set_oaddr(intf, dev->dev_addr, 1); + + if (dev->flags & IFF_ALLMULTI) { + bcmasp_set_allmulti(intf, 1); + } else { + bcmasp_set_allmulti(intf, 0); + + netdev_for_each_mc_addr(ha, dev) { + ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); + if (ret) { + intf->mib.mc_filters_full_cnt++; + goto set_promisc; + } + } + } + + netdev_for_each_uc_addr(ha, dev) { + ret = bcmasp_set_en_mda_filter(intf, ha->addr, mask); + if (ret) { + intf->mib.uc_filters_full_cnt++; + goto set_promisc; + } + } + + spin_unlock_bh(&intf->parent->mda_lock); + return; + +set_promisc: + bcmasp_set_promisc(intf, 1); + intf->mib.promisc_filters_cnt++; + + /* disable all filters used by this port */ + bcmasp_disable_all_filters(intf); + + spin_unlock_bh(&intf->parent->mda_lock); +} + +static void bcmasp_clean_txcb(struct bcmasp_intf *intf, int index) +{ + struct bcmasp_tx_cb *txcb = &intf->tx_cbs[index]; + + txcb->skb = NULL; + dma_unmap_addr_set(txcb, dma_addr, 0); + dma_unmap_len_set(txcb, dma_len, 0); + txcb->last = false; +} + +static int tx_spb_ring_full(struct bcmasp_intf *intf, int cnt) +{ + int next_index, i; + + /* Check if we have enough room for cnt descriptors */ + for (i = 0; i < cnt; i++) { + next_index = incr_ring(intf->tx_spb_index, DESC_RING_COUNT); + if (next_index == intf->tx_spb_clean_index) + return 1; + } + + return 0; +} + +static struct sk_buff *bcmasp_csum_offload(struct net_device *dev, + struct sk_buff *skb, + bool *csum_hw) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + u32 header = 0, header2 = 0, epkt = 0; + struct bcmasp_pkt_offload *offload; + unsigned int header_cnt = 0; + u8 ip_proto; + int ret; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return skb; + + ret = skb_cow_head(skb, sizeof(*offload)); + if (ret < 0) { + intf->mib.tx_realloc_offload_failed++; + goto help; + } + + switch (skb->protocol) { + case htons(ETH_P_IP): + header |= PKT_OFFLOAD_HDR_SIZE_2((ip_hdrlen(skb) >> 8) & 0xf); + header2 |= PKT_OFFLOAD_HDR2_SIZE_2(ip_hdrlen(skb) & 0xff); + epkt |= PKT_OFFLOAD_EPKT_IP(0) | PKT_OFFLOAD_EPKT_CSUM_L2; + ip_proto = ip_hdr(skb)->protocol; + header_cnt += 2; + break; + case htons(ETH_P_IPV6): + header |= PKT_OFFLOAD_HDR_SIZE_2((IP6_HLEN >> 8) & 0xf); + header2 |= PKT_OFFLOAD_HDR2_SIZE_2(IP6_HLEN & 0xff); + epkt |= PKT_OFFLOAD_EPKT_IP(1) | PKT_OFFLOAD_EPKT_CSUM_L2; + ip_proto = ipv6_hdr(skb)->nexthdr; + header_cnt += 2; + break; + default: + goto help; + } + + switch (ip_proto) { + case IPPROTO_TCP: + header2 |= PKT_OFFLOAD_HDR2_SIZE_3(tcp_hdrlen(skb)); + epkt |= PKT_OFFLOAD_EPKT_TP(0) | PKT_OFFLOAD_EPKT_CSUM_L3; + header_cnt++; + break; + case IPPROTO_UDP: + header2 |= PKT_OFFLOAD_HDR2_SIZE_3(UDP_HLEN); + epkt |= PKT_OFFLOAD_EPKT_TP(1) | PKT_OFFLOAD_EPKT_CSUM_L3; + header_cnt++; + break; + default: + goto help; + } + + offload = (struct bcmasp_pkt_offload *)skb_push(skb, sizeof(*offload)); + + header |= PKT_OFFLOAD_HDR_OP | PKT_OFFLOAD_HDR_COUNT(header_cnt) | + PKT_OFFLOAD_HDR_SIZE_1(ETH_HLEN); + epkt |= PKT_OFFLOAD_EPKT_OP; + + offload->nop = htonl(PKT_OFFLOAD_NOP); + offload->header = htonl(header); + offload->header2 = htonl(header2); + offload->epkt = htonl(epkt); + offload->end = htonl(PKT_OFFLOAD_END_OP); + *csum_hw = true; + + return skb; + +help: + skb_checksum_help(skb); + + return skb; +} + +static unsigned long bcmasp_rx_edpkt_dma_rq(struct bcmasp_intf *intf) +{ + return rx_edpkt_dma_rq(intf, RX_EDPKT_DMA_VALID); +} + +static void bcmasp_rx_edpkt_cfg_wq(struct bcmasp_intf *intf, dma_addr_t addr) +{ + rx_edpkt_cfg_wq(intf, addr, RX_EDPKT_RING_BUFFER_READ); +} + +static void bcmasp_rx_edpkt_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr) +{ + rx_edpkt_dma_wq(intf, addr, RX_EDPKT_DMA_READ); +} + +static unsigned long bcmasp_tx_spb_dma_rq(struct bcmasp_intf *intf) +{ + return tx_spb_dma_rq(intf, TX_SPB_DMA_READ); +} + +static void bcmasp_tx_spb_dma_wq(struct bcmasp_intf *intf, dma_addr_t addr) +{ + tx_spb_dma_wq(intf, addr, TX_SPB_DMA_VALID); +} + +static const struct bcmasp_intf_ops bcmasp_intf_ops = { + .rx_desc_read = bcmasp_rx_edpkt_dma_rq, + .rx_buffer_write = bcmasp_rx_edpkt_cfg_wq, + .rx_desc_write = bcmasp_rx_edpkt_dma_wq, + .tx_read = bcmasp_tx_spb_dma_rq, + .tx_write = bcmasp_tx_spb_dma_wq, +}; + +static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + unsigned int total_bytes, size; + int spb_index, nr_frags, i, j; + struct bcmasp_tx_cb *txcb; + dma_addr_t mapping, valid; + struct bcmasp_desc *desc; + bool csum_hw = false; + struct device *kdev; + skb_frag_t *frag; + + kdev = &intf->parent->pdev->dev; + + nr_frags = skb_shinfo(skb)->nr_frags; + + if (tx_spb_ring_full(intf, nr_frags + 1)) { + netif_stop_queue(dev); + if (net_ratelimit()) + netdev_err(dev, "Tx Ring Full!\n"); + return NETDEV_TX_BUSY; + } + + /* Save skb len before adding csum offload header */ + total_bytes = skb->len; + skb = bcmasp_csum_offload(dev, skb, &csum_hw); + if (!skb) + return NETDEV_TX_OK; + + spb_index = intf->tx_spb_index; + valid = intf->tx_spb_dma_valid; + for (i = 0; i <= nr_frags; i++) { + if (!i) { + size = skb_headlen(skb); + if (!nr_frags && size < (ETH_ZLEN + ETH_FCS_LEN)) { + if (skb_put_padto(skb, ETH_ZLEN + ETH_FCS_LEN)) + return NETDEV_TX_OK; + size = skb->len; + } + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } + + if (dma_mapping_error(kdev, mapping)) { + intf->mib.tx_dma_failed++; + spb_index = intf->tx_spb_index; + for (j = 0; j < i; j++) { + bcmasp_clean_txcb(intf, spb_index); + spb_index = incr_ring(spb_index, + DESC_RING_COUNT); + } + /* Rewind so we do not have a hole */ + spb_index = intf->tx_spb_index; + return NETDEV_TX_OK; + } + + txcb = &intf->tx_cbs[spb_index]; + desc = &intf->tx_spb_cpu[spb_index]; + memset(desc, 0, sizeof(*desc)); + txcb->skb = skb; + txcb->bytes_sent = total_bytes; + dma_unmap_addr_set(txcb, dma_addr, mapping); + dma_unmap_len_set(txcb, dma_len, size); + if (!i) { + desc->flags |= DESC_SOF; + if (csum_hw) + desc->flags |= DESC_EPKT_CMD; + } + + if (i == nr_frags) { + desc->flags |= DESC_EOF; + txcb->last = true; + } + + desc->buf = mapping; + desc->size = size; + desc->flags |= DESC_INT_EN; + + netif_dbg(intf, tx_queued, dev, + "%s dma_buf=%pad dma_len=0x%x flags=0x%x index=0x%x\n", + __func__, &mapping, desc->size, desc->flags, + spb_index); + + spb_index = incr_ring(spb_index, DESC_RING_COUNT); + valid = incr_last_byte(valid, intf->tx_spb_dma_addr, + DESC_RING_COUNT); + } + + /* Ensure all descriptors have been written to DRAM for the + * hardware to see up-to-date contents. + */ + wmb(); + + intf->tx_spb_index = spb_index; + intf->tx_spb_dma_valid = valid; + bcmasp_intf_tx_write(intf, intf->tx_spb_dma_valid); + + if (tx_spb_ring_full(intf, MAX_SKB_FRAGS + 1)) + netif_stop_queue(dev); + + return NETDEV_TX_OK; +} + +static void bcmasp_netif_start(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + bcmasp_set_rx_mode(dev); + napi_enable(&intf->tx_napi); + napi_enable(&intf->rx_napi); + + bcmasp_enable_rx_irq(intf, 1); + bcmasp_enable_tx_irq(intf, 1); + + phy_start(dev->phydev); +} + +static void umac_reset(struct bcmasp_intf *intf) +{ + umac_wl(intf, 0x0, UMC_CMD); + umac_wl(intf, UMC_CMD_SW_RESET, UMC_CMD); + usleep_range(10, 100); + umac_wl(intf, 0x0, UMC_CMD); +} + +static void umac_set_hw_addr(struct bcmasp_intf *intf, + const unsigned char *addr) +{ + u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | + addr[3]; + u32 mac1 = (addr[4] << 8) | addr[5]; + + umac_wl(intf, mac0, UMC_MAC0); + umac_wl(intf, mac1, UMC_MAC1); +} + +static void umac_enable_set(struct bcmasp_intf *intf, u32 mask, + unsigned int enable) +{ + u32 reg; + + reg = umac_rl(intf, UMC_CMD); + if (enable) + reg |= mask; + else + reg &= ~mask; + umac_wl(intf, reg, UMC_CMD); + + /* UniMAC stops on a packet boundary, wait for a full-sized packet + * to be processed (1 msec). + */ + if (enable == 0) + usleep_range(1000, 2000); +} + +static void umac_init(struct bcmasp_intf *intf) +{ + umac_wl(intf, 0x800, UMC_FRM_LEN); + umac_wl(intf, 0xffff, UMC_PAUSE_CNTRL); + umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ); + umac_enable_set(intf, UMC_CMD_PROMISC, 1); +} + +static int bcmasp_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmasp_intf *intf = + container_of(napi, struct bcmasp_intf, tx_napi); + struct bcmasp_intf_stats64 *stats = &intf->stats64; + struct device *kdev = &intf->parent->pdev->dev; + unsigned long read, released = 0; + struct bcmasp_tx_cb *txcb; + struct bcmasp_desc *desc; + dma_addr_t mapping; + + read = bcmasp_intf_tx_read(intf); + while (intf->tx_spb_dma_read != read) { + txcb = &intf->tx_cbs[intf->tx_spb_clean_index]; + mapping = dma_unmap_addr(txcb, dma_addr); + + dma_unmap_single(kdev, mapping, + dma_unmap_len(txcb, dma_len), + DMA_TO_DEVICE); + + if (txcb->last) { + dev_consume_skb_any(txcb->skb); + + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_packets); + u64_stats_add(&stats->tx_bytes, txcb->bytes_sent); + u64_stats_update_end(&stats->syncp); + } + + desc = &intf->tx_spb_cpu[intf->tx_spb_clean_index]; + + netif_dbg(intf, tx_done, intf->ndev, + "%s dma_buf=%pad dma_len=0x%x flags=0x%x c_index=0x%x\n", + __func__, &mapping, desc->size, desc->flags, + intf->tx_spb_clean_index); + + bcmasp_clean_txcb(intf, intf->tx_spb_clean_index); + released++; + + intf->tx_spb_clean_index = incr_ring(intf->tx_spb_clean_index, + DESC_RING_COUNT); + intf->tx_spb_dma_read = incr_first_byte(intf->tx_spb_dma_read, + intf->tx_spb_dma_addr, + DESC_RING_COUNT); + } + + /* Ensure all descriptors have been written to DRAM for the hardware + * to see updated contents. + */ + wmb(); + + napi_complete(&intf->tx_napi); + + bcmasp_enable_tx_irq(intf, 1); + + if (released) + netif_wake_queue(intf->ndev); + + return 0; +} + +static int bcmasp_rx_poll(struct napi_struct *napi, int budget) +{ + struct bcmasp_intf *intf = + container_of(napi, struct bcmasp_intf, rx_napi); + struct bcmasp_intf_stats64 *stats = &intf->stats64; + struct device *kdev = &intf->parent->pdev->dev; + unsigned long processed = 0; + struct bcmasp_desc *desc; + struct sk_buff *skb; + dma_addr_t valid; + void *data; + u64 flags; + u32 len; + + valid = bcmasp_intf_rx_desc_read(intf) + 1; + if (valid == intf->rx_edpkt_dma_addr + DESC_RING_SIZE) + valid = intf->rx_edpkt_dma_addr; + + while ((processed < budget) && (valid != intf->rx_edpkt_dma_read)) { + desc = &intf->rx_edpkt_cpu[intf->rx_edpkt_index]; + + /* Ensure that descriptor has been fully written to DRAM by + * hardware before reading by the CPU + */ + rmb(); + + /* Calculate virt addr by offsetting from physical addr */ + data = intf->rx_ring_cpu + + (DESC_ADDR(desc->buf) - intf->rx_ring_dma); + + flags = DESC_FLAGS(desc->buf); + if (unlikely(flags & (DESC_CRC_ERR | DESC_RX_SYM_ERR))) { + if (net_ratelimit()) { + netif_err(intf, rx_status, intf->ndev, + "flags=0x%llx\n", flags); + } + + u64_stats_update_begin(&stats->syncp); + if (flags & DESC_CRC_ERR) + u64_stats_inc(&stats->rx_crc_errs); + if (flags & DESC_RX_SYM_ERR) + u64_stats_inc(&stats->rx_sym_errs); + u64_stats_update_end(&stats->syncp); + + goto next; + } + + dma_sync_single_for_cpu(kdev, DESC_ADDR(desc->buf), desc->size, + DMA_FROM_DEVICE); + + len = desc->size; + + skb = napi_alloc_skb(napi, len); + if (!skb) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_dropped); + u64_stats_update_end(&stats->syncp); + intf->mib.alloc_rx_skb_failed++; + + goto next; + } + + skb_put(skb, len); + memcpy(skb->data, data, len); + + skb_pull(skb, 2); + len -= 2; + if (likely(intf->crc_fwd)) { + skb_trim(skb, len - ETH_FCS_LEN); + len -= ETH_FCS_LEN; + } + + if ((intf->ndev->features & NETIF_F_RXCSUM) && + (desc->buf & DESC_CHKSUM)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb->protocol = eth_type_trans(skb, intf->ndev); + + napi_gro_receive(napi, skb); + + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_packets); + u64_stats_add(&stats->rx_bytes, len); + u64_stats_update_end(&stats->syncp); + +next: + bcmasp_intf_rx_buffer_write(intf, (DESC_ADDR(desc->buf) + + desc->size)); + + processed++; + intf->rx_edpkt_dma_read = + incr_first_byte(intf->rx_edpkt_dma_read, + intf->rx_edpkt_dma_addr, + DESC_RING_COUNT); + intf->rx_edpkt_index = incr_ring(intf->rx_edpkt_index, + DESC_RING_COUNT); + } + + bcmasp_intf_rx_desc_write(intf, intf->rx_edpkt_dma_read); + + if (processed < budget) { + napi_complete_done(&intf->rx_napi, processed); + bcmasp_enable_rx_irq(intf, 1); + } + + return processed; +} + +static void bcmasp_adj_link(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct phy_device *phydev = dev->phydev; + u32 cmd_bits = 0, reg; + int changed = 0; + + if (intf->old_link != phydev->link) { + changed = 1; + intf->old_link = phydev->link; + } + + if (intf->old_duplex != phydev->duplex) { + changed = 1; + intf->old_duplex = phydev->duplex; + } + + switch (phydev->speed) { + case SPEED_2500: + cmd_bits = UMC_CMD_SPEED_2500; + break; + case SPEED_1000: + cmd_bits = UMC_CMD_SPEED_1000; + break; + case SPEED_100: + cmd_bits = UMC_CMD_SPEED_100; + break; + case SPEED_10: + cmd_bits = UMC_CMD_SPEED_10; + break; + default: + break; + } + cmd_bits <<= UMC_CMD_SPEED_SHIFT; + + if (phydev->duplex == DUPLEX_HALF) + cmd_bits |= UMC_CMD_HD_EN; + + if (intf->old_pause != phydev->pause) { + changed = 1; + intf->old_pause = phydev->pause; + } + + if (!phydev->pause) + cmd_bits |= UMC_CMD_RX_PAUSE_IGNORE | UMC_CMD_TX_PAUSE_IGNORE; + + if (!changed) + return; + + if (phydev->link) { + reg = umac_rl(intf, UMC_CMD); + reg &= ~((UMC_CMD_SPEED_MASK << UMC_CMD_SPEED_SHIFT) | + UMC_CMD_HD_EN | UMC_CMD_RX_PAUSE_IGNORE | + UMC_CMD_TX_PAUSE_IGNORE); + reg |= cmd_bits; + umac_wl(intf, reg, UMC_CMD); + + intf->eee.eee_active = phy_init_eee(phydev, 0) >= 0; + bcmasp_eee_enable_set(intf, intf->eee.eee_active); + } + + reg = rgmii_rl(intf, RGMII_OOB_CNTRL); + if (phydev->link) + reg |= RGMII_LINK; + else + reg &= ~RGMII_LINK; + rgmii_wl(intf, reg, RGMII_OOB_CNTRL); + + if (changed) + phy_print_status(phydev); +} + +static int bcmasp_init_rx(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + struct page *buffer_pg; + dma_addr_t dma; + void *p; + u32 reg; + int ret; + + intf->rx_buf_order = get_order(RING_BUFFER_SIZE); + buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); + + dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(kdev, dma)) { + __free_pages(buffer_pg, intf->rx_buf_order); + return -ENOMEM; + } + intf->rx_ring_cpu = page_to_virt(buffer_pg); + intf->rx_ring_dma = dma; + intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; + + p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr, + GFP_KERNEL); + if (!p) { + ret = -ENOMEM; + goto free_rx_ring; + } + intf->rx_edpkt_cpu = p; + + netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); + + intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr; + intf->rx_edpkt_index = 0; + + /* Make sure channels are disabled */ + rx_edpkt_cfg_wl(intf, 0x0, RX_EDPKT_CFG_ENABLE); + + /* Rx SPB */ + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_READ); + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_WRITE); + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma, RX_EDPKT_RING_BUFFER_BASE); + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, + RX_EDPKT_RING_BUFFER_END); + rx_edpkt_cfg_wq(intf, intf->rx_ring_dma_valid, + RX_EDPKT_RING_BUFFER_VALID); + + /* EDPKT */ + rx_edpkt_cfg_wl(intf, (RX_EDPKT_CFG_CFG0_RBUF_4K << + RX_EDPKT_CFG_CFG0_DBUF_SHIFT) | + (RX_EDPKT_CFG_CFG0_64_ALN << + RX_EDPKT_CFG_CFG0_BALN_SHIFT) | + (RX_EDPKT_CFG_CFG0_EFRM_STUF), + RX_EDPKT_CFG_CFG0); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), + RX_EDPKT_DMA_END); + rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), + RX_EDPKT_DMA_VALID); + + reg = UMAC2FB_CFG_DEFAULT_EN | + ((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT); + reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT); + umac2fb_wl(intf, reg, UMAC2FB_CFG); + + return 0; + +free_rx_ring: + dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); + __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); + + return ret; +} + +static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + + dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, + intf->rx_edpkt_dma_addr); + dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE, + DMA_FROM_DEVICE); + __free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order); +} + +static int bcmasp_init_tx(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + void *p; + int ret; + + p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr, + GFP_KERNEL); + if (!p) + return -ENOMEM; + + intf->tx_spb_cpu = p; + intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1; + intf->tx_spb_dma_read = intf->tx_spb_dma_addr; + + intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb), + GFP_KERNEL); + if (!intf->tx_cbs) { + ret = -ENOMEM; + goto free_tx_spb; + } + + intf->tx_spb_index = 0; + intf->tx_spb_clean_index = 0; + + netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll); + + /* Make sure channels are disabled */ + tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); + tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); + + /* Tx SPB */ + tx_spb_ctrl_wl(intf, ((intf->channel + 8) << TX_SPB_CTRL_XF_BID_SHIFT), + TX_SPB_CTRL_XF_CTRL2); + tx_pause_ctrl_wl(intf, (1 << (intf->channel + 8)), TX_PAUSE_MAP_VECTOR); + tx_spb_top_wl(intf, 0x1e, TX_SPB_TOP_BLKOUT); + tx_spb_top_wl(intf, 0x0, TX_SPB_TOP_SPRE_BW_CTRL); + + tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_READ); + tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); + tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END); + tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); + + return 0; + +free_tx_spb: + dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, + intf->tx_spb_dma_addr); + + return ret; +} + +static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + + /* Free descriptors */ + dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu, + intf->tx_spb_dma_addr); + + /* Free cbs */ + kfree(intf->tx_cbs); +} + +static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable) +{ + u32 mask = RGMII_EPHY_CFG_IDDQ_BIAS | RGMII_EPHY_CFG_EXT_PWRDOWN | + RGMII_EPHY_CFG_IDDQ_GLOBAL; + u32 reg; + + reg = rgmii_rl(intf, RGMII_EPHY_CNTRL); + if (enable) { + reg &= ~RGMII_EPHY_CK25_DIS; + rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); + mdelay(1); + + reg &= ~mask; + reg |= RGMII_EPHY_RESET; + rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); + mdelay(1); + + reg &= ~RGMII_EPHY_RESET; + } else { + reg |= mask | RGMII_EPHY_RESET; + rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); + mdelay(1); + reg |= RGMII_EPHY_CK25_DIS; + } + rgmii_wl(intf, reg, RGMII_EPHY_CNTRL); + mdelay(1); + + /* Set or clear the LED control override to avoid lighting up LEDs + * while the EPHY is powered off and drawing unnecessary current. + */ + reg = rgmii_rl(intf, RGMII_SYS_LED_CNTRL); + if (enable) + reg &= ~RGMII_SYS_LED_CNTRL_LINK_OVRD; + else + reg |= RGMII_SYS_LED_CNTRL_LINK_OVRD; + rgmii_wl(intf, reg, RGMII_SYS_LED_CNTRL); +} + +static void bcmasp_rgmii_mode_en_set(struct bcmasp_intf *intf, bool enable) +{ + u32 reg; + + reg = rgmii_rl(intf, RGMII_OOB_CNTRL); + reg &= ~RGMII_OOB_DIS; + if (enable) + reg |= RGMII_MODE_EN; + else + reg &= ~RGMII_MODE_EN; + rgmii_wl(intf, reg, RGMII_OOB_CNTRL); +} + +static void bcmasp_netif_deinit(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + u32 reg, timeout = 1000; + + napi_disable(&intf->tx_napi); + + bcmasp_enable_tx(intf, 0); + + /* Flush any TX packets in the pipe */ + tx_spb_dma_wl(intf, TX_SPB_DMA_FIFO_FLUSH, TX_SPB_DMA_FIFO_CTRL); + do { + reg = tx_spb_dma_rl(intf, TX_SPB_DMA_FIFO_STATUS); + if (!(reg & TX_SPB_DMA_FIFO_FLUSH)) + break; + usleep_range(1000, 2000); + } while (timeout-- > 0); + tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL); + + umac_enable_set(intf, UMC_CMD_TX_EN, 0); + + phy_stop(dev->phydev); + + umac_enable_set(intf, UMC_CMD_RX_EN, 0); + + bcmasp_flush_rx_port(intf); + usleep_range(1000, 2000); + bcmasp_enable_rx(intf, 0); + + napi_disable(&intf->rx_napi); + + /* Disable interrupts */ + bcmasp_enable_tx_irq(intf, 0); + bcmasp_enable_rx_irq(intf, 0); + + netif_napi_del(&intf->tx_napi); + bcmasp_reclaim_free_all_tx(intf); + + netif_napi_del(&intf->rx_napi); + bcmasp_reclaim_free_all_rx(intf); +} + +static int bcmasp_stop(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + netif_dbg(intf, ifdown, dev, "bcmasp stop\n"); + + /* Stop tx from updating HW */ + netif_tx_disable(dev); + + bcmasp_netif_deinit(dev); + + phy_disconnect(dev->phydev); + + /* Disable internal EPHY or external PHY */ + if (intf->internal_phy) + bcmasp_ephy_enable_set(intf, false); + else + bcmasp_rgmii_mode_en_set(intf, false); + + /* Disable the interface clocks */ + bcmasp_core_clock_set_intf(intf, false); + + clk_disable_unprepare(intf->parent->clk); + + return 0; +} + +static void bcmasp_configure_port(struct bcmasp_intf *intf) +{ + u32 reg, id_mode_dis = 0; + + reg = rgmii_rl(intf, RGMII_PORT_CNTRL); + reg &= ~RGMII_PORT_MODE_MASK; + + switch (intf->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + /* RGMII_NO_ID: TXC transitions at the same time as TXD + * (requires PCB or receiver-side delay) + * RGMII: Add 2ns delay on TXC (90 degree shift) + * + * ID is implicitly disabled for 100Mbps (RG)MII operation. + */ + id_mode_dis = RGMII_ID_MODE_DIS; + fallthrough; + case PHY_INTERFACE_MODE_RGMII_TXID: + reg |= RGMII_PORT_MODE_EXT_GPHY; + break; + case PHY_INTERFACE_MODE_MII: + reg |= RGMII_PORT_MODE_EXT_EPHY; + break; + default: + break; + } + + if (intf->internal_phy) + reg |= RGMII_PORT_MODE_EPHY; + + rgmii_wl(intf, reg, RGMII_PORT_CNTRL); + + reg = rgmii_rl(intf, RGMII_OOB_CNTRL); + reg &= ~RGMII_ID_MODE_DIS; + reg |= id_mode_dis; + rgmii_wl(intf, reg, RGMII_OOB_CNTRL); +} + +static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + phy_interface_t phy_iface = intf->phy_interface; + u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE | + PHY_BRCM_DIS_TXCRXC_NOENRGY | + PHY_BRCM_IDDQ_SUSPEND; + struct phy_device *phydev = NULL; + int ret; + + /* Always enable interface clocks */ + bcmasp_core_clock_set_intf(intf, true); + + /* Enable internal PHY or external PHY before any MAC activity */ + if (intf->internal_phy) + bcmasp_ephy_enable_set(intf, true); + else + bcmasp_rgmii_mode_en_set(intf, true); + bcmasp_configure_port(intf); + + /* This is an ugly quirk but we have not been correctly + * interpreting the phy_interface values and we have done that + * across different drivers, so at least we are consistent in + * our mistakes. + * + * When the Generic PHY driver is in use either the PHY has + * been strapped or programmed correctly by the boot loader so + * we should stick to our incorrect interpretation since we + * have validated it. + * + * Now when a dedicated PHY driver is in use, we need to + * reverse the meaning of the phy_interface_mode values to + * something that the PHY driver will interpret and act on such + * that we have two mistakes canceling themselves so to speak. + * We only do this for the two modes that GENET driver + * officially supports on Broadcom STB chips: + * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. + * Other modes are not *officially* supported with the boot + * loader and the scripted environment generating Device Tree + * blobs for those platforms. + * + * Note that internal PHY and fixed-link configurations are not + * affected because they use different phy_interface_t values + * or the Generic PHY driver. + */ + switch (phy_iface) { + case PHY_INTERFACE_MODE_RGMII: + phy_iface = PHY_INTERFACE_MODE_RGMII_ID; + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + phy_iface = PHY_INTERFACE_MODE_RGMII_RXID; + break; + default: + break; + } + + if (phy_connect) { + phydev = of_phy_connect(dev, intf->phy_dn, + bcmasp_adj_link, phy_flags, + phy_iface); + if (!phydev) { + ret = -ENODEV; + netdev_err(dev, "could not attach to PHY\n"); + goto err_phy_disable; + } + } else if (!intf->wolopts) { + ret = phy_resume(dev->phydev); + if (ret) + goto err_phy_disable; + } + + umac_reset(intf); + + umac_init(intf); + + /* Disable the UniMAC RX/TX */ + umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 0); + + umac_set_hw_addr(intf, dev->dev_addr); + + intf->old_duplex = -1; + intf->old_link = -1; + intf->old_pause = -1; + + ret = bcmasp_init_tx(intf); + if (ret) + goto err_phy_disconnect; + + /* Turn on asp */ + bcmasp_enable_tx(intf, 1); + + ret = bcmasp_init_rx(intf); + if (ret) + goto err_reclaim_tx; + + bcmasp_enable_rx(intf, 1); + + /* Turn on UniMAC TX/RX */ + umac_enable_set(intf, (UMC_CMD_RX_EN | UMC_CMD_TX_EN), 1); + + intf->crc_fwd = !!(umac_rl(intf, UMC_CMD) & UMC_CMD_CRC_FWD); + + bcmasp_netif_start(dev); + + netif_start_queue(dev); + + return 0; + +err_reclaim_tx: + bcmasp_reclaim_free_all_tx(intf); +err_phy_disconnect: + if (phydev) + phy_disconnect(phydev); +err_phy_disable: + if (intf->internal_phy) + bcmasp_ephy_enable_set(intf, false); + else + bcmasp_rgmii_mode_en_set(intf, false); + return ret; +} + +static int bcmasp_open(struct net_device *dev) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + int ret; + + netif_dbg(intf, ifup, dev, "bcmasp open\n"); + + ret = clk_prepare_enable(intf->parent->clk); + if (ret) + return ret; + + ret = bcmasp_netif_init(dev, true); + if (ret) + clk_disable_unprepare(intf->parent->clk); + + return ret; +} + +static void bcmasp_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + netif_dbg(intf, tx_err, dev, "transmit timeout!\n"); + intf->mib.tx_timeout_cnt++; +} + +static int bcmasp_get_phys_port_name(struct net_device *dev, + char *name, size_t len) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + + if (snprintf(name, len, "p%d", intf->port) >= len) + return -EINVAL; + + return 0; +} + +static void bcmasp_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct bcmasp_intf *intf = netdev_priv(dev); + struct bcmasp_intf_stats64 *lstats; + unsigned int start; + + lstats = &intf->stats64; + + do { + start = u64_stats_fetch_begin(&lstats->syncp); + stats->rx_packets = u64_stats_read(&lstats->rx_packets); + stats->rx_bytes = u64_stats_read(&lstats->rx_bytes); + stats->rx_dropped = u64_stats_read(&lstats->rx_dropped); + stats->rx_crc_errors = u64_stats_read(&lstats->rx_crc_errs); + stats->rx_frame_errors = u64_stats_read(&lstats->rx_sym_errs); + stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors; + + stats->tx_packets = u64_stats_read(&lstats->tx_packets); + stats->tx_bytes = u64_stats_read(&lstats->tx_bytes); + } while (u64_stats_fetch_retry(&lstats->syncp, start)); +} + +static const struct net_device_ops bcmasp_netdev_ops = { + .ndo_open = bcmasp_open, + .ndo_stop = bcmasp_stop, + .ndo_start_xmit = bcmasp_xmit, + .ndo_tx_timeout = bcmasp_tx_timeout, + .ndo_set_rx_mode = bcmasp_set_rx_mode, + .ndo_get_phys_port_name = bcmasp_get_phys_port_name, + .ndo_eth_ioctl = phy_do_ioctl_running, + .ndo_set_mac_address = eth_mac_addr, + .ndo_get_stats64 = bcmasp_get_stats64, +}; + +static void bcmasp_map_res(struct bcmasp_priv *priv, struct bcmasp_intf *intf) +{ + /* Per port */ + intf->res.umac = priv->base + UMC_OFFSET(intf); + intf->res.umac2fb = priv->base + (priv->hw_info->umac2fb + + (intf->port * 0x4)); + intf->res.rgmii = priv->base + RGMII_OFFSET(intf); + + /* Per ch */ + intf->tx_spb_dma = priv->base + TX_SPB_DMA_OFFSET(intf); + intf->res.tx_spb_ctrl = priv->base + TX_SPB_CTRL_OFFSET(intf); + intf->res.tx_spb_top = priv->base + TX_SPB_TOP_OFFSET(intf); + intf->res.tx_epkt_core = priv->base + TX_EPKT_C_OFFSET(intf); + intf->res.tx_pause_ctrl = priv->base + TX_PAUSE_CTRL_OFFSET(intf); + + intf->rx_edpkt_dma = priv->base + RX_EDPKT_DMA_OFFSET(intf); + intf->rx_edpkt_cfg = priv->base + RX_EDPKT_CFG_OFFSET(intf); +} + +#define MAX_IRQ_STR_LEN 64 +struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, + struct device_node *ndev_dn, int i) +{ + struct device *dev = &priv->pdev->dev; + struct bcmasp_intf *intf; + struct net_device *ndev; + int ch, port, ret; + + if (of_property_read_u32(ndev_dn, "reg", &port)) { + dev_warn(dev, "%s: invalid port number\n", ndev_dn->name); + goto err; + } + + if (of_property_read_u32(ndev_dn, "brcm,channel", &ch)) { + dev_warn(dev, "%s: invalid ch number\n", ndev_dn->name); + goto err; + } + + ndev = alloc_etherdev(sizeof(struct bcmasp_intf)); + if (!ndev) { + dev_warn(dev, "%s: unable to alloc ndev\n", ndev_dn->name); + goto err; + } + intf = netdev_priv(ndev); + + intf->parent = priv; + intf->ndev = ndev; + intf->channel = ch; + intf->port = port; + intf->ndev_dn = ndev_dn; + intf->index = i; + + ret = of_get_phy_mode(ndev_dn, &intf->phy_interface); + if (ret < 0) { + dev_err(dev, "invalid PHY mode property\n"); + goto err_free_netdev; + } + + if (intf->phy_interface == PHY_INTERFACE_MODE_INTERNAL) + intf->internal_phy = true; + + intf->phy_dn = of_parse_phandle(ndev_dn, "phy-handle", 0); + if (!intf->phy_dn && of_phy_is_fixed_link(ndev_dn)) { + ret = of_phy_register_fixed_link(ndev_dn); + if (ret) { + dev_warn(dev, "%s: failed to register fixed PHY\n", + ndev_dn->name); + goto err_free_netdev; + } + intf->phy_dn = ndev_dn; + } + + /* Map resource */ + bcmasp_map_res(priv, intf); + + if ((!phy_interface_mode_is_rgmii(intf->phy_interface) && + intf->phy_interface != PHY_INTERFACE_MODE_MII && + intf->phy_interface != PHY_INTERFACE_MODE_INTERNAL) || + (intf->port != 1 && intf->internal_phy)) { + netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", + phy_modes(intf->phy_interface), intf->port); + ret = -EINVAL; + goto err_free_netdev; + } + + ret = of_get_ethdev_address(ndev_dn, ndev); + if (ret) { + netdev_warn(ndev, "using random Ethernet MAC\n"); + eth_hw_addr_random(ndev); + } + + SET_NETDEV_DEV(ndev, dev); + intf->ops = &bcmasp_intf_ops; + ndev->netdev_ops = &bcmasp_netdev_ops; + ndev->ethtool_ops = &bcmasp_ethtool_ops; + intf->msg_enable = netif_msg_init(-1, NETIF_MSG_DRV | + NETIF_MSG_PROBE | + NETIF_MSG_LINK); + ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + ndev->hw_features |= ndev->features; + ndev->needed_headroom += sizeof(struct bcmasp_pkt_offload); + + return intf; + +err_free_netdev: + free_netdev(ndev); +err: + return NULL; +} + +void bcmasp_interface_destroy(struct bcmasp_intf *intf) +{ + if (intf->ndev->reg_state == NETREG_REGISTERED) + unregister_netdev(intf->ndev); + if (of_phy_is_fixed_link(intf->ndev_dn)) + of_phy_deregister_fixed_link(intf->ndev_dn); + free_netdev(intf->ndev); +} + +static void bcmasp_suspend_to_wol(struct bcmasp_intf *intf) +{ + struct net_device *ndev = intf->ndev; + u32 reg; + + reg = umac_rl(intf, UMC_MPD_CTRL); + if (intf->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) + reg |= UMC_MPD_CTRL_MPD_EN; + reg &= ~UMC_MPD_CTRL_PSW_EN; + if (intf->wolopts & WAKE_MAGICSECURE) { + /* Program the SecureOn password */ + umac_wl(intf, get_unaligned_be16(&intf->sopass[0]), + UMC_PSW_MS); + umac_wl(intf, get_unaligned_be32(&intf->sopass[2]), + UMC_PSW_LS); + reg |= UMC_MPD_CTRL_PSW_EN; + } + umac_wl(intf, reg, UMC_MPD_CTRL); + + if (intf->wolopts & WAKE_FILTER) + bcmasp_netfilt_suspend(intf); + + /* UniMAC receive needs to be turned on */ + umac_enable_set(intf, UMC_CMD_RX_EN, 1); + + if (intf->parent->wol_irq > 0) { + wakeup_intr2_core_wl(intf->parent, 0xffffffff, + ASP_WAKEUP_INTR2_MASK_CLEAR); + } + + netif_dbg(intf, wol, ndev, "entered WOL mode\n"); +} + +int bcmasp_interface_suspend(struct bcmasp_intf *intf) +{ + struct device *kdev = &intf->parent->pdev->dev; + struct net_device *dev = intf->ndev; + int ret = 0; + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + + bcmasp_netif_deinit(dev); + + if (!intf->wolopts) { + ret = phy_suspend(dev->phydev); + if (ret) + goto out; + + if (intf->internal_phy) + bcmasp_ephy_enable_set(intf, false); + else + bcmasp_rgmii_mode_en_set(intf, false); + + /* If Wake-on-LAN is disabled, we can safely + * disable the network interface clocks. + */ + bcmasp_core_clock_set_intf(intf, false); + } + + if (device_may_wakeup(kdev) && intf->wolopts) + bcmasp_suspend_to_wol(intf); + + clk_disable_unprepare(intf->parent->clk); + + return ret; + +out: + bcmasp_netif_init(dev, false); + return ret; +} + +static void bcmasp_resume_from_wol(struct bcmasp_intf *intf) +{ + u32 reg; + + reg = umac_rl(intf, UMC_MPD_CTRL); + reg &= ~UMC_MPD_CTRL_MPD_EN; + umac_wl(intf, reg, UMC_MPD_CTRL); + + if (intf->parent->wol_irq > 0) { + wakeup_intr2_core_wl(intf->parent, 0xffffffff, + ASP_WAKEUP_INTR2_MASK_SET); + } +} + +int bcmasp_interface_resume(struct bcmasp_intf *intf) +{ + struct net_device *dev = intf->ndev; + int ret; + + if (!netif_running(dev)) + return 0; + + ret = clk_prepare_enable(intf->parent->clk); + if (ret) + return ret; + + ret = bcmasp_netif_init(dev, false); + if (ret) + goto out; + + bcmasp_resume_from_wol(intf); + + if (intf->eee.eee_enabled) + bcmasp_eee_enable_set(intf, true); + + netif_device_attach(dev); + + return 0; + +out: + clk_disable_unprepare(intf->parent->clk); + return ret; +} diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h new file mode 100644 index 000000000000..ad742612895f --- /dev/null +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf_defs.h @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BCMASP_INTF_DEFS_H +#define __BCMASP_INTF_DEFS_H + +#define UMC_OFFSET(intf) \ + ((((intf)->port) * 0x800) + 0xc000) +#define UMC_CMD 0x008 +#define UMC_CMD_TX_EN BIT(0) +#define UMC_CMD_RX_EN BIT(1) +#define UMC_CMD_SPEED_SHIFT 0x2 +#define UMC_CMD_SPEED_MASK 0x3 +#define UMC_CMD_SPEED_10 0x0 +#define UMC_CMD_SPEED_100 0x1 +#define UMC_CMD_SPEED_1000 0x2 +#define UMC_CMD_SPEED_2500 0x3 +#define UMC_CMD_PROMISC BIT(4) +#define UMC_CMD_PAD_EN BIT(5) +#define UMC_CMD_CRC_FWD BIT(6) +#define UMC_CMD_PAUSE_FWD BIT(7) +#define UMC_CMD_RX_PAUSE_IGNORE BIT(8) +#define UMC_CMD_TX_ADDR_INS BIT(9) +#define UMC_CMD_HD_EN BIT(10) +#define UMC_CMD_SW_RESET BIT(13) +#define UMC_CMD_LCL_LOOP_EN BIT(15) +#define UMC_CMD_AUTO_CONFIG BIT(22) +#define UMC_CMD_CNTL_FRM_EN BIT(23) +#define UMC_CMD_NO_LEN_CHK BIT(24) +#define UMC_CMD_RMT_LOOP_EN BIT(25) +#define UMC_CMD_PRBL_EN BIT(27) +#define UMC_CMD_TX_PAUSE_IGNORE BIT(28) +#define UMC_CMD_TX_RX_EN BIT(29) +#define UMC_CMD_RUNT_FILTER_DIS BIT(30) +#define UMC_MAC0 0x0c +#define UMC_MAC1 0x10 +#define UMC_FRM_LEN 0x14 +#define UMC_EEE_CTRL 0x64 +#define EN_LPI_RX_PAUSE BIT(0) +#define EN_LPI_TX_PFC BIT(1) +#define EN_LPI_TX_PAUSE BIT(2) +#define EEE_EN BIT(3) +#define RX_FIFO_CHECK BIT(4) +#define EEE_TX_CLK_DIS BIT(5) +#define DIS_EEE_10M BIT(6) +#define LP_IDLE_PREDICTION_MODE BIT(7) +#define UMC_EEE_LPI_TIMER 0x68 +#define UMC_PAUSE_CNTRL 0x330 +#define UMC_TX_FLUSH 0x334 +#define UMC_GR64 0x400 +#define UMC_GR127 0x404 +#define UMC_GR255 0x408 +#define UMC_GR511 0x40c +#define UMC_GR1023 0x410 +#define UMC_GR1518 0x414 +#define UMC_GRMGV 0x418 +#define UMC_GR2047 0x41c +#define UMC_GR4095 0x420 +#define UMC_GR9216 0x424 +#define UMC_GRPKT 0x428 +#define UMC_GRBYT 0x42c +#define UMC_GRMCA 0x430 +#define UMC_GRBCA 0x434 +#define UMC_GRFCS 0x438 +#define UMC_GRXCF 0x43c +#define UMC_GRXPF 0x440 +#define UMC_GRXUO 0x444 +#define UMC_GRALN 0x448 +#define UMC_GRFLR 0x44c +#define UMC_GRCDE 0x450 +#define UMC_GRFCR 0x454 +#define UMC_GROVR 0x458 +#define UMC_GRJBR 0x45c +#define UMC_GRMTUE 0x460 +#define UMC_GRPOK 0x464 +#define UMC_GRUC 0x468 +#define UMC_GRPPP 0x46c +#define UMC_GRMCRC 0x470 +#define UMC_TR64 0x480 +#define UMC_TR127 0x484 +#define UMC_TR255 0x488 +#define UMC_TR511 0x48c +#define UMC_TR1023 0x490 +#define UMC_TR1518 0x494 +#define UMC_TRMGV 0x498 +#define UMC_TR2047 0x49c +#define UMC_TR4095 0x4a0 +#define UMC_TR9216 0x4a4 +#define UMC_GTPKT 0x4a8 +#define UMC_GTMCA 0x4ac +#define UMC_GTBCA 0x4b0 +#define UMC_GTXPF 0x4b4 +#define UMC_GTXCF 0x4b8 +#define UMC_GTFCS 0x4bc +#define UMC_GTOVR 0x4c0 +#define UMC_GTDRF 0x4c4 +#define UMC_GTEDF 0x4c8 +#define UMC_GTSCL 0x4cc +#define UMC_GTMCL 0x4d0 +#define UMC_GTLCL 0x4d4 +#define UMC_GTXCL 0x4d8 +#define UMC_GTFRG 0x4dc +#define UMC_GTNCL 0x4e0 +#define UMC_GTJBR 0x4e4 +#define UMC_GTBYT 0x4e8 +#define UMC_GTPOK 0x4ec +#define UMC_GTUC 0x4f0 +#define UMC_RRPKT 0x500 +#define UMC_RRUND 0x504 +#define UMC_RRFRG 0x508 +#define UMC_RRBYT 0x50c +#define UMC_MIB_CNTRL 0x580 +#define UMC_MIB_CNTRL_RX_CNT_RST BIT(0) +#define UMC_MIB_CNTRL_RUNT_CNT_RST BIT(1) +#define UMC_MIB_CNTRL_TX_CNT_RST BIT(2) +#define UMC_RX_MAX_PKT_SZ 0x608 +#define UMC_MPD_CTRL 0x620 +#define UMC_MPD_CTRL_MPD_EN BIT(0) +#define UMC_MPD_CTRL_PSW_EN BIT(27) +#define UMC_PSW_MS 0x624 +#define UMC_PSW_LS 0x628 + +#define UMAC2FB_OFFSET_2_1 0x9f044 +#define UMAC2FB_OFFSET 0x9f03c +#define UMAC2FB_CFG 0x0 +#define UMAC2FB_CFG_OPUT_EN BIT(0) +#define UMAC2FB_CFG_VLAN_EN BIT(1) +#define UMAC2FB_CFG_SNAP_EN BIT(2) +#define UMAC2FB_CFG_BCM_TG_EN BIT(3) +#define UMAC2FB_CFG_IPUT_EN BIT(4) +#define UMAC2FB_CFG_CHID_SHIFT 8 +#define UMAC2FB_CFG_OK_SEND_SHIFT 24 +#define UMAC2FB_CFG_DEFAULT_EN \ + (UMAC2FB_CFG_OPUT_EN | UMAC2FB_CFG_VLAN_EN \ + | UMAC2FB_CFG_SNAP_EN | UMAC2FB_CFG_IPUT_EN) + +#define RGMII_OFFSET(intf) \ + ((((intf)->port) * 0x100) + 0xd000) +#define RGMII_EPHY_CNTRL 0x00 +#define RGMII_EPHY_CFG_IDDQ_BIAS BIT(0) +#define RGMII_EPHY_CFG_EXT_PWRDOWN BIT(1) +#define RGMII_EPHY_CFG_FORCE_DLL_EN BIT(2) +#define RGMII_EPHY_CFG_IDDQ_GLOBAL BIT(3) +#define RGMII_EPHY_CK25_DIS BIT(4) +#define RGMII_EPHY_RESET BIT(7) +#define RGMII_OOB_CNTRL 0x0c +#define RGMII_LINK BIT(4) +#define RGMII_OOB_DIS BIT(5) +#define RGMII_MODE_EN BIT(6) +#define RGMII_ID_MODE_DIS BIT(16) + +#define RGMII_PORT_CNTRL 0x60 +#define RGMII_PORT_MODE_EPHY 0 +#define RGMII_PORT_MODE_GPHY 1 +#define RGMII_PORT_MODE_EXT_EPHY 2 +#define RGMII_PORT_MODE_EXT_GPHY 3 +#define RGMII_PORT_MODE_EXT_RVMII 4 +#define RGMII_PORT_MODE_MASK GENMASK(2, 0) + +#define RGMII_SYS_LED_CNTRL 0x74 +#define RGMII_SYS_LED_CNTRL_LINK_OVRD BIT(15) + +#define TX_SPB_DMA_OFFSET(intf) \ + ((((intf)->channel) * 0x30) + 0x48180) +#define TX_SPB_DMA_READ 0x00 +#define TX_SPB_DMA_BASE 0x08 +#define TX_SPB_DMA_END 0x10 +#define TX_SPB_DMA_VALID 0x18 +#define TX_SPB_DMA_FIFO_CTRL 0x20 +#define TX_SPB_DMA_FIFO_FLUSH BIT(0) +#define TX_SPB_DMA_FIFO_STATUS 0x24 + +#define TX_SPB_CTRL_OFFSET(intf) \ + ((((intf)->channel) * 0x68) + 0x49340) +#define TX_SPB_CTRL_ENABLE 0x0 +#define TX_SPB_CTRL_ENABLE_EN BIT(0) +#define TX_SPB_CTRL_XF_CTRL2 0x20 +#define TX_SPB_CTRL_XF_BID_SHIFT 16 + +#define TX_SPB_TOP_OFFSET(intf) \ + ((((intf)->channel) * 0x1c) + 0x4a0e0) +#define TX_SPB_TOP_BLKOUT 0x0 +#define TX_SPB_TOP_SPRE_BW_CTRL 0x4 + +#define TX_EPKT_C_OFFSET(intf) \ + ((((intf)->channel) * 0x120) + 0x40900) +#define TX_EPKT_C_CFG_MISC 0x0 +#define TX_EPKT_C_CFG_MISC_EN BIT(0) +#define TX_EPKT_C_CFG_MISC_PT BIT(1) +#define TX_EPKT_C_CFG_MISC_PS_SHIFT 14 +#define TX_EPKT_C_CFG_MISC_FD_SHIFT 20 + +#define TX_PAUSE_CTRL_OFFSET(intf) \ + ((((intf)->channel * 0xc) + 0x49a20)) +#define TX_PAUSE_MAP_VECTOR 0x8 + +#define RX_EDPKT_DMA_OFFSET(intf) \ + ((((intf)->channel) * 0x38) + 0x9ca00) +#define RX_EDPKT_DMA_WRITE 0x00 +#define RX_EDPKT_DMA_READ 0x08 +#define RX_EDPKT_DMA_BASE 0x10 +#define RX_EDPKT_DMA_END 0x18 +#define RX_EDPKT_DMA_VALID 0x20 +#define RX_EDPKT_DMA_FULLNESS 0x28 +#define RX_EDPKT_DMA_MIN_THRES 0x2c +#define RX_EDPKT_DMA_CH_XONOFF 0x30 + +#define RX_EDPKT_CFG_OFFSET(intf) \ + ((((intf)->channel) * 0x70) + 0x9c600) +#define RX_EDPKT_CFG_CFG0 0x0 +#define RX_EDPKT_CFG_CFG0_DBUF_SHIFT 9 +#define RX_EDPKT_CFG_CFG0_RBUF 0x0 +#define RX_EDPKT_CFG_CFG0_RBUF_4K 0x1 +#define RX_EDPKT_CFG_CFG0_BUF_4K 0x2 +/* EFRM STUFF, 0 = no byte stuff, 1 = two byte stuff */ +#define RX_EDPKT_CFG_CFG0_EFRM_STUF BIT(11) +#define RX_EDPKT_CFG_CFG0_BALN_SHIFT 12 +#define RX_EDPKT_CFG_CFG0_NO_ALN 0 +#define RX_EDPKT_CFG_CFG0_4_ALN 2 +#define RX_EDPKT_CFG_CFG0_64_ALN 6 +#define RX_EDPKT_RING_BUFFER_WRITE 0x38 +#define RX_EDPKT_RING_BUFFER_READ 0x40 +#define RX_EDPKT_RING_BUFFER_BASE 0x48 +#define RX_EDPKT_RING_BUFFER_END 0x50 +#define RX_EDPKT_RING_BUFFER_VALID 0x58 +#define RX_EDPKT_CFG_ENABLE 0x6c +#define RX_EDPKT_CFG_ENABLE_EN BIT(0) + +#define RX_SPB_DMA_OFFSET(intf) \ + ((((intf)->channel) * 0x30) + 0xa0000) +#define RX_SPB_DMA_READ 0x00 +#define RX_SPB_DMA_BASE 0x08 +#define RX_SPB_DMA_END 0x10 +#define RX_SPB_DMA_VALID 0x18 +#define RX_SPB_DMA_FIFO_CTRL 0x20 +#define RX_SPB_DMA_FIFO_FLUSH BIT(0) +#define RX_SPB_DMA_FIFO_STATUS 0x24 + +#define RX_SPB_CTRL_OFFSET(intf) \ + ((((intf)->channel - 6) * 0x68) + 0xa1000) +#define RX_SPB_CTRL_ENABLE 0x00 +#define RX_SPB_CTRL_ENABLE_EN BIT(0) + +#define RX_PAUSE_CTRL_OFFSET(intf) \ + ((((intf)->channel - 6) * 0x4) + 0xa1138) +#define RX_PAUSE_MAP_VECTOR 0x00 + +#define RX_SPB_TOP_CTRL_OFFSET(intf) \ + ((((intf)->channel - 6) * 0x14) + 0xa2000) +#define RX_SPB_TOP_BLKOUT 0x00 + +#define NUM_4K_BUFFERS 32 +#define RING_BUFFER_SIZE (PAGE_SIZE * NUM_4K_BUFFERS) + +#define DESC_RING_COUNT (64 * NUM_4K_BUFFERS) +#define DESC_SIZE 16 +#define DESC_RING_SIZE (DESC_RING_COUNT * DESC_SIZE) + +#endif diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 392ec09a1d8a..3e4fb3c3e834 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1793,11 +1793,9 @@ static int b44_nway_reset(struct net_device *dev) b44_readphy(bp, MII_BMCR, &bmcr); b44_readphy(bp, MII_BMCR, &bmcr); r = -EINVAL; - if (bmcr & BMCR_ANENABLE) { - b44_writephy(bp, MII_BMCR, - bmcr | BMCR_ANRESTART); - r = 0; - } + if (bmcr & BMCR_ANENABLE) + r = b44_writephy(bp, MII_BMCR, + bmcr | BMCR_ANRESTART); spin_unlock_irq(&bp->lock); return r; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 2cf96892e565..a741070f1f9a 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1940,7 +1940,6 @@ static struct platform_driver bcm63xx_enet_driver = { .remove = bcm_enet_remove, .driver = { .name = "bcm63xx_enet", - .owner = THIS_MODULE, }, }; @@ -2761,7 +2760,6 @@ static struct platform_driver bcm63xx_enetsw_driver = { .remove = bcm_enetsw_remove, .driver = { .name = "bcm63xx_enetsw", - .owner = THIS_MODULE, }, }; @@ -2791,7 +2789,6 @@ struct platform_driver bcm63xx_enet_shared_driver = { .probe = bcm_enet_shared_probe, .driver = { .name = "bcm63xx_enet_shared", - .owner = THIS_MODULE, }, }; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 10c7c232cc4e..448a1b90de5e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1448,9 +1448,9 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) int err; phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phy_dev || IS_ERR(phy_dev)) { + if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); - return -ENODEV; + return PTR_ERR(phy_dev); } err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 8bcde0a6e011..e2a4e1088b7f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1508,6 +1508,8 @@ struct bnx2x { bool cnic_loaded; struct cnic_eth_dev *(*cnic_probe)(struct net_device *); + bool nic_stopped; + /* Flag that indicates that we can start looking for FCoE L2 queue * completions in the default status block. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 6ea5521074d3..e9c1e1bb5580 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2715,6 +2715,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) bnx2x_add_all_napi(bp); DP(NETIF_MSG_IFUP, "napi added\n"); bnx2x_napi_enable(bp); + bp->nic_stopped = false; if (IS_PF(bp)) { /* set pf load just before approaching the MCP */ @@ -2960,6 +2961,7 @@ load_error2: load_error1: bnx2x_napi_disable(bp); bnx2x_del_all_napi(bp); + bp->nic_stopped = true; /* clear pf_load status, as it was already set */ if (IS_PF(bp)) @@ -3095,14 +3097,17 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) if (!CHIP_IS_E1x(bp)) bnx2x_pf_disable(bp); - /* Disable HW interrupts, NAPI */ - bnx2x_netif_stop(bp, 1); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - if (CNIC_LOADED(bp)) - bnx2x_del_all_napi_cnic(bp); - /* Release IRQs */ - bnx2x_free_irq(bp); + if (!bp->nic_stopped) { + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); + /* Release IRQs */ + bnx2x_free_irq(bp); + bp->nic_stopped = true; + } /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, false); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 1e7a6f1d4223..0d8e61c63c7c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9474,15 +9474,18 @@ unload_error: } } - /* Disable HW interrupts, NAPI */ - bnx2x_netif_stop(bp, 1); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - if (CNIC_LOADED(bp)) - bnx2x_del_all_napi_cnic(bp); + if (!bp->nic_stopped) { + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); - /* Release IRQs */ - bnx2x_free_irq(bp); + /* Release IRQs */ + bnx2x_free_irq(bp); + bp->nic_stopped = true; + } /* Reset the chip, unless PCI function is offline. If we reach this * point following a PCI error handling, it means device is really @@ -14238,13 +14241,16 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev) } bnx2x_drain_tx_queues(bp); bnx2x_send_unload_req(bp, UNLOAD_RECOVERY); - bnx2x_netif_stop(bp, 1); - bnx2x_del_all_napi(bp); + if (!bp->nic_stopped) { + bnx2x_netif_stop(bp, 1); + bnx2x_del_all_napi(bp); - if (CNIC_LOADED(bp)) - bnx2x_del_all_napi_cnic(bp); + if (CNIC_LOADED(bp)) + bnx2x_del_all_napi_cnic(bp); - bnx2x_free_irq(bp); + bnx2x_free_irq(bp); + bp->nic_stopped = true; + } /* Report UNLOAD_DONE to MCP */ bnx2x_send_unload_done(bp, true); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 542c69822649..8e04552d2216 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -890,7 +890,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp, (struct eth_classify_rules_ramrod_data *)(raw->rdata); int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + bool add = cmd == BNX2X_VLAN_MAC_ADD; unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags; u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac; @@ -1075,7 +1075,7 @@ static void bnx2x_set_one_vlan_e2(struct bnx2x *bp, int rule_cnt = rule_idx + 1; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + bool add = cmd == BNX2X_VLAN_MAC_ADD; u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan; /* Reset the ramrod data buffer for the first rule */ @@ -1125,7 +1125,7 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp, int rule_cnt = rule_idx + 1; union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx]; enum bnx2x_vlan_mac_cmd cmd = elem->cmd_data.vlan_mac.cmd; - bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false; + bool add = cmd == BNX2X_VLAN_MAC_ADD; u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan; u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac; u16 inner_mac; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0657a0f5170f..8946a931e87e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -529,13 +529,16 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp) bnx2x_vfpf_finalize(bp, &req->first_tlv); free_irq: - /* Disable HW interrupts, NAPI */ - bnx2x_netif_stop(bp, 0); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - - /* Release IRQs */ - bnx2x_free_irq(bp); + if (!bp->nic_stopped) { + /* Disable HW interrupts, NAPI */ + bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); + + /* Release IRQs */ + bnx2x_free_irq(bp); + bp->nic_stopped = true; + } } static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e5b54e6025be..5cc0dbe12132 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -54,7 +54,7 @@ #include <net/pkt_cls.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include <linux/align.h> #include <net/netdev_queues.h> @@ -293,6 +293,60 @@ static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx) BNXT_DB_CQ(db, idx); } +static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) +{ + if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) + return; + + if (BNXT_PF(bp)) + queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); + else + schedule_delayed_work(&bp->fw_reset_task, delay); +} + +static void __bnxt_queue_sp_work(struct bnxt *bp) +{ + if (BNXT_PF(bp)) + queue_work(bnxt_pf_wq, &bp->sp_task); + else + schedule_work(&bp->sp_task); +} + +static void bnxt_queue_sp_work(struct bnxt *bp, unsigned int event) +{ + set_bit(event, &bp->sp_event); + __bnxt_queue_sp_work(bp); +} + +static void bnxt_sched_reset_rxr(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + if (!rxr->bnapi->in_reset) { + rxr->bnapi->in_reset = true; + if (bp->flags & BNXT_FLAG_CHIP_P5) + set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); + else + set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); + __bnxt_queue_sp_work(bp); + } + rxr->rx_next_cons = 0xffff; +} + +void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + int idx) +{ + struct bnxt_napi *bnapi = txr->bnapi; + + if (bnapi->tx_fault) + return; + + netdev_err(bp->dev, "Invalid Tx completion (ring:%d tx_pkts:%d cons:%u prod:%u i:%d)", + txr->txq_index, bnapi->tx_pkts, + txr->tx_cons, txr->tx_prod, idx); + WARN_ON_ONCE(1); + bnapi->tx_fault = 1; + bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); +} + const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_512_AND_SMALLER, TX_BD_FLAGS_LHINT_512_TO_1023, @@ -633,12 +687,13 @@ tx_kick_pending: return NETDEV_TX_OK; } -static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); u16 cons = txr->tx_cons; struct pci_dev *pdev = bp->pdev; + int nr_pkts = bnapi->tx_pkts; int i; unsigned int tx_bytes = 0; @@ -652,6 +707,11 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) skb = tx_buf->skb; tx_buf->skb = NULL; + if (unlikely(!skb)) { + bnxt_sched_reset_txr(bp, txr, i); + return; + } + tx_bytes += skb->len; if (tx_buf->is_push) { @@ -685,9 +745,10 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) next_tx_int: cons = NEXT_TX(cons); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } + bnapi->tx_pkts = 0; WRITE_ONCE(txr->tx_cons, cons); __netif_txq_completed_wake(txq, nr_pkts, tx_bytes, @@ -697,21 +758,22 @@ next_tx_int: static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, struct bnxt_rx_ring_info *rxr, + unsigned int *offset, gfp_t gfp) { - struct device *dev = &bp->pdev->dev; struct page *page; - page = page_pool_dev_alloc_pages(rxr->page_pool); + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { + page = page_pool_dev_alloc_frag(rxr->page_pool, offset, + BNXT_RX_PAGE_SIZE); + } else { + page = page_pool_dev_alloc_pages(rxr->page_pool); + *offset = 0; + } if (!page) return NULL; - *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - if (dma_mapping_error(dev, *mapping)) { - page_pool_recycle_direct(rxr->page_pool, page); - return NULL; - } + *mapping = page_pool_get_dma_addr(page) + *offset; return page; } @@ -747,15 +809,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, dma_addr_t mapping; if (BNXT_RX_PAGE_MODE(bp)) { + unsigned int offset; struct page *page = - __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); + __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); if (!page) return -ENOMEM; mapping += bp->rx_dma_offset; rx_buf->data = page; - rx_buf->data_ptr = page_address(page) + bp->rx_offset; + rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset; } else { u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp); @@ -808,48 +871,15 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, struct rx_bd *rxbd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)]; struct bnxt_sw_rx_agg_bd *rx_agg_buf; - struct pci_dev *pdev = bp->pdev; struct page *page; dma_addr_t mapping; u16 sw_prod = rxr->rx_sw_agg_prod; unsigned int offset = 0; - if (BNXT_RX_PAGE_MODE(bp)) { - page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); + page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp); - if (!page) - return -ENOMEM; - - } else { - if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) { - page = rxr->rx_page; - if (!page) { - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - rxr->rx_page = page; - rxr->rx_page_offset = 0; - } - offset = rxr->rx_page_offset; - rxr->rx_page_offset += BNXT_RX_PAGE_SIZE; - if (rxr->rx_page_offset == PAGE_SIZE) - rxr->rx_page = NULL; - else - get_page(page); - } else { - page = alloc_page(gfp); - if (!page) - return -ENOMEM; - } - - mapping = dma_map_page_attrs(&pdev->dev, page, offset, - BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_WEAK_ORDERING); - if (dma_mapping_error(&pdev->dev, mapping)) { - __free_page(page); - return -EIO; - } - } + if (!page) + return -ENOMEM; if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap))) sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod); @@ -962,15 +992,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp, return NULL; } dma_addr -= bp->rx_dma_offset; - dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - skb = build_skb(page_address(page), PAGE_SIZE); + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, + bp->rx_dir); + skb = napi_build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE); if (!skb) { page_pool_recycle_direct(rxr->page_pool, page); return NULL; } skb_mark_for_recycle(skb); - skb_reserve(skb, bp->rx_dma_offset); + skb_reserve(skb, bp->rx_offset); __skb_put(skb, len); return skb; @@ -996,8 +1026,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, return NULL; } dma_addr -= bp->rx_dma_offset; - dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE, + bp->rx_dir); if (unlikely(!payload)) payload = eth_get_headlen(bp->dev, data_ptr, len); @@ -1010,7 +1040,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp, skb_mark_for_recycle(skb); off = (void *)data_ptr - page_address(page); - skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE); + skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE); memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN, payload + NET_IP_ALIGN); @@ -1039,7 +1069,7 @@ static struct sk_buff *bnxt_rx_skb(struct bnxt *bp, return NULL; } - skb = build_skb(data, bp->rx_buf_size); + skb = napi_build_skb(data, bp->rx_buf_size); dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); if (!skb) { @@ -1113,9 +1143,8 @@ static u32 __bnxt_rx_agg_pages(struct bnxt *bp, return 0; } - dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); + dma_sync_single_for_cpu(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, + bp->rx_dir); total_frag_len += frag_len; prod = NEXT_RX_AGG(prod); @@ -1135,13 +1164,14 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp, total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx, agg_bufs, tpa, NULL); if (!total_frag_len) { + skb_mark_for_recycle(skb); dev_kfree_skb(skb); return NULL; } skb->data_len += total_frag_len; skb->len += total_frag_len; - skb->truesize += PAGE_SIZE * agg_bufs; + skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs; return skb; } @@ -1234,38 +1264,6 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return 0; } -static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay) -{ - if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))) - return; - - if (BNXT_PF(bp)) - queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay); - else - schedule_delayed_work(&bp->fw_reset_task, delay); -} - -static void bnxt_queue_sp_work(struct bnxt *bp) -{ - if (BNXT_PF(bp)) - queue_work(bnxt_pf_wq, &bp->sp_task); - else - schedule_work(&bp->sp_task); -} - -static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) -{ - if (!rxr->bnapi->in_reset) { - rxr->bnapi->in_reset = true; - if (bp->flags & BNXT_FLAG_CHIP_P5) - set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); - else - set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } - rxr->rx_next_cons = 0xffff; -} - static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id) { struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; @@ -1320,7 +1318,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n", cons, rxr->rx_next_cons, TPA_START_ERROR_CODE(tpa_start1)); - bnxt_sched_reset(bp, rxr); + bnxt_sched_reset_rxr(bp, rxr); return; } /* Store cfa_code in tpa_info to use in tpa_end @@ -1679,7 +1677,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, tpa_info->data_ptr = new_data + bp->rx_offset; tpa_info->mapping = new_mapping; - skb = build_skb(data, bp->rx_buf_size); + skb = napi_build_skb(data, bp->rx_buf_size); dma_unmap_single_attrs(&bp->pdev->dev, mapping, bp->rx_buf_use_size, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); @@ -1757,6 +1755,7 @@ static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, return; } skb_record_rx_queue(skb, bnapi->index); + skb_mark_for_recycle(skb); napi_gro_receive(&bnapi->napi, skb); } @@ -1844,7 +1843,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (rxr->rx_next_cons != 0xffff) netdev_warn(bp->dev, "RX cons %x != expected cons %x\n", cons, rxr->rx_next_cons); - bnxt_sched_reset(bp, rxr); + bnxt_sched_reset_rxr(bp, rxr); if (rc1) return rc1; goto next_rx_no_prod_no_len; @@ -1882,7 +1881,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) { netdev_warn_once(bp->dev, "RX buffer error %x\n", rx_err); - bnxt_sched_reset(bp, rxr); + bnxt_sched_reset_rxr(bp, rxr); } } goto next_rx_no_len; @@ -2329,7 +2328,7 @@ static int bnxt_async_event_process(struct bnxt *bp, goto async_event_process_exit; } rxr = bp->bnapi[grp_idx]->rx_ring; - bnxt_sched_reset(bp, rxr); + bnxt_sched_reset_rxr(bp, rxr); goto async_event_process_exit; } case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: { @@ -2384,7 +2383,7 @@ static int bnxt_async_event_process(struct bnxt *bp, default: goto async_event_process_exit; } - bnxt_queue_sp_work(bp); + __bnxt_queue_sp_work(bp); async_event_process_exit: return 0; } @@ -2413,8 +2412,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) } set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); - set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT); break; case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: @@ -2569,12 +2567,11 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, return rx_pkts; } -static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi) +static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, + int budget) { - if (bnapi->tx_pkts) { - bnapi->tx_int(bp, bnapi, bnapi->tx_pkts); - bnapi->tx_pkts = 0; - } + if (bnapi->tx_pkts && !bnapi->tx_fault) + bnapi->tx_int(bp, bnapi, budget); if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; @@ -2603,7 +2600,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, */ bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons); - __bnxt_poll_work_done(bp, bnapi); + __bnxt_poll_work_done(bp, bnapi, budget); return rx_pkts; } @@ -2734,7 +2731,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) } static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, - u64 dbr_type) + u64 dbr_type, int budget) { struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; int i; @@ -2750,7 +2747,7 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi, cpr2->had_work_done = 0; } } - __bnxt_poll_work_done(bp, bnapi); + __bnxt_poll_work_done(bp, bnapi, budget); } static int bnxt_poll_p5(struct napi_struct *napi, int budget) @@ -2780,7 +2777,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) if (cpr->has_more_work) break; - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL); + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, + budget); cpr->cp_raw_cons = raw_cons; if (napi_complete_done(napi, work_done)) BNXT_DB_NQ_ARM_P5(&cpr->cp_db, @@ -2810,7 +2808,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget) } raw_cons = NEXT_RAW_CMP(raw_cons); } - __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ); + __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget); if (raw_cons != cpr->cp_raw_cons) { cpr->cp_raw_cons = raw_cons; BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons); @@ -2942,10 +2940,6 @@ skip_rx_tpa_free: rx_buf->data = NULL; if (BNXT_RX_PAGE_MODE(bp)) { - mapping -= bp->rx_dma_offset; - dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE, - bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); page_pool_recycle_direct(rxr->page_pool, data); } else { dma_unmap_single_attrs(&pdev->dev, mapping, @@ -2966,30 +2960,13 @@ skip_rx_buf_free: if (!page) continue; - if (BNXT_RX_PAGE_MODE(bp)) { - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - rx_agg_buf->page = NULL; - __clear_bit(i, rxr->rx_agg_bmap); - - page_pool_recycle_direct(rxr->page_pool, page); - } else { - dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, - DMA_ATTR_WEAK_ORDERING); - rx_agg_buf->page = NULL; - __clear_bit(i, rxr->rx_agg_bmap); + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); - __free_page(page); - } + page_pool_recycle_direct(rxr->page_pool, page); } skip_rx_agg_free: - if (rxr->rx_page) { - __free_page(rxr->rx_page); - rxr->rx_page = NULL; - } map = rxr->rx_tpa_idx_map; if (map) memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); @@ -3208,11 +3185,17 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp, { struct page_pool_params pp = { 0 }; - pp.pool_size = bp->rx_ring_size; + pp.pool_size = bp->rx_agg_ring_size; + if (BNXT_RX_PAGE_MODE(bp)) + pp.pool_size += bp->rx_ring_size; pp.nid = dev_to_node(&bp->pdev->dev); pp.napi = &rxr->bnapi->napi; pp.dev = &bp->pdev->dev; - pp.dma_dir = DMA_BIDIRECTIONAL; + pp.dma_dir = bp->rx_dir; + pp.max_len = PAGE_SIZE; + pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; + if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) + pp.flags |= PP_FLAG_PAGE_FRAG; rxr->page_pool = page_pool_create(&pp); if (IS_ERR(rxr->page_pool)) { @@ -3989,26 +3972,29 @@ void bnxt_set_ring_params(struct bnxt *bp) */ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) { + struct net_device *dev = bp->dev; + if (page_mode) { bp->flags &= ~BNXT_FLAG_AGG_RINGS; bp->flags |= BNXT_FLAG_RX_PAGE_MODE; - if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { + if (bp->xdp_prog->aux->xdp_has_frags) + dev->max_mtu = min_t(u16, bp->max_mtu, BNXT_MAX_MTU); + else + dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); + if (dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { bp->flags |= BNXT_FLAG_JUMBO; bp->rx_skb_func = bnxt_rx_multi_page_skb; - bp->dev->max_mtu = - min_t(u16, bp->max_mtu, BNXT_MAX_MTU); } else { bp->flags |= BNXT_FLAG_NO_AGG_RINGS; bp->rx_skb_func = bnxt_rx_page_skb; - bp->dev->max_mtu = - min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); } bp->rx_dir = DMA_BIDIRECTIONAL; /* Disable LRO or GRO_HW */ - netdev_update_features(bp->dev); + netdev_update_features(dev); } else { - bp->dev->max_mtu = bp->max_mtu; + dev->max_mtu = bp->max_mtu; bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; bp->rx_dir = DMA_FROM_DEVICE; bp->rx_skb_func = bnxt_rx_skb; @@ -9407,10 +9393,16 @@ static void bnxt_disable_napi(struct bnxt *bp) return; for (i = 0; i < bp->cp_nr_rings; i++) { - struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring; + struct bnxt_napi *bnapi = bp->bnapi[i]; + struct bnxt_cp_ring_info *cpr; - napi_disable(&bp->bnapi[i]->napi); - if (bp->bnapi[i]->rx_ring) + cpr = &bnapi->cp_ring; + if (bnapi->tx_fault) + cpr->sw_stats.tx.tx_resets++; + if (bnapi->in_reset) + cpr->sw_stats.rx.rx_resets++; + napi_disable(&bnapi->napi); + if (bnapi->rx_ring) cancel_work_sync(&cpr->dim.work); } } @@ -9424,11 +9416,13 @@ static void bnxt_enable_napi(struct bnxt *bp) struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr; + bnapi->tx_fault = 0; + cpr = &bnapi->cp_ring; - if (bnapi->in_reset) - cpr->sw_stats.rx.rx_resets++; bnapi->in_reset = false; + bnapi->tx_pkts = 0; + if (bnapi->rx_ring) { INIT_WORK(&cpr->dim.work, bnxt_dim_work); cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; @@ -10693,8 +10687,10 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bnxt_free_skbs(bp); /* Save ring stats before shutdown */ - if (bp->bnapi && irq_re_init) + if (bp->bnapi && irq_re_init) { bnxt_get_ring_stats(bp, &bp->net_stats_prev); + bnxt_get_ring_err_stats(bp, &bp->ring_err_stats_prev); + } if (irq_re_init) { bnxt_free_irq(bp); bnxt_del_napi(bp); @@ -10943,6 +10939,35 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) clear_bit(BNXT_STATE_READ_STATS, &bp->state); } +static void bnxt_get_one_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats, + struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_sw_stats *sw_stats = &cpr->sw_stats; + u64 *hw_stats = cpr->stats.sw_stats; + + stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors; + stats->rx_total_resets += sw_stats->rx.rx_resets; + stats->rx_total_buf_errors += sw_stats->rx.rx_buf_errors; + stats->rx_total_oom_discards += sw_stats->rx.rx_oom_discards; + stats->rx_total_netpoll_discards += sw_stats->rx.rx_netpoll_discards; + stats->rx_total_ring_discards += + BNXT_GET_RING_STATS64(hw_stats, rx_discard_pkts); + stats->tx_total_resets += sw_stats->tx.tx_resets; + stats->tx_total_ring_discards += + BNXT_GET_RING_STATS64(hw_stats, tx_discard_pkts); + stats->total_missed_irqs += sw_stats->cmn.missed_irqs; +} + +void bnxt_get_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats) +{ + int i; + + for (i = 0; i < bp->cp_nr_rings; i++) + bnxt_get_one_ring_err_stats(bp, stats, &bp->bnapi[i]->cp_ring); +} + static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) { struct net_device *dev = bp->dev; @@ -11031,8 +11056,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) if (mask != vnic->rx_mask || uc_update || mc_update) { vnic->rx_mask = mask; - set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); } } @@ -11597,8 +11621,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue) struct bnxt *bp = netdev_priv(dev); netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); - set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_RESET_TASK_SP_EVENT); } static void bnxt_fw_health_check(struct bnxt *bp) @@ -11635,8 +11658,7 @@ static void bnxt_fw_health_check(struct bnxt *bp) return; fw_reset: - set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_FW_EXCEPTION_SP_EVENT); } static void bnxt_timer(struct timer_list *t) @@ -11653,21 +11675,15 @@ static void bnxt_timer(struct timer_list *t) if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) bnxt_fw_health_check(bp); - if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) { - set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) + bnxt_queue_sp_work(bp, BNXT_PERIODIC_STATS_SP_EVENT); - if (bnxt_tc_flower_enabled(bp)) { - set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + if (bnxt_tc_flower_enabled(bp)) + bnxt_queue_sp_work(bp, BNXT_FLOW_STATS_SP_EVENT); #ifdef CONFIG_RFS_ACCEL - if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) { - set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) + bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); #endif /*CONFIG_RFS_ACCEL*/ if (bp->link_info.phy_retry) { @@ -11675,21 +11691,17 @@ static void bnxt_timer(struct timer_list *t) bp->link_info.phy_retry = false; netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n"); } else { - set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_UPDATE_PHY_SP_EVENT); } } - if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) { - set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) + bnxt_queue_sp_work(bp, BNXT_RX_MASK_SP_EVENT); if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev && - netif_carrier_ok(dev)) { - set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); - } + netif_carrier_ok(dev)) + bnxt_queue_sp_work(bp, BNXT_RING_COAL_NOW_SP_EVENT); + bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -12968,8 +12980,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, bp->ntp_fltr_count++; spin_unlock_bh(&bp->ntp_fltr_lock); - set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); - bnxt_queue_sp_work(bp); + bnxt_queue_sp_work(bp, BNXT_RX_NTP_FLTR_SP_EVENT); return new_fltr->sw_id; @@ -13101,9 +13112,6 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; - if (nla_len(attr) < sizeof(mode)) - return -EINVAL; - mode = nla_get_u16(attr); if (mode == bp->br_mode) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 080e73496066..84cbcfa61bc1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -919,9 +919,6 @@ struct bnxt_rx_ring_info { unsigned long *rx_agg_bmap; u16 rx_agg_bmap_size; - struct page *rx_page; - unsigned int rx_page_offset; - dma_addr_t rx_desc_mapping[MAX_RX_PAGES]; dma_addr_t rx_agg_desc_mapping[MAX_RX_AGG_PAGES]; @@ -942,15 +939,32 @@ struct bnxt_rx_sw_stats { u64 rx_netpoll_discards; }; +struct bnxt_tx_sw_stats { + u64 tx_resets; +}; + struct bnxt_cmn_sw_stats { u64 missed_irqs; }; struct bnxt_sw_stats { struct bnxt_rx_sw_stats rx; + struct bnxt_tx_sw_stats tx; struct bnxt_cmn_sw_stats cmn; }; +struct bnxt_total_ring_err_stats { + u64 rx_total_l4_csum_errors; + u64 rx_total_resets; + u64 rx_total_buf_errors; + u64 rx_total_oom_discards; + u64 rx_total_netpoll_discards; + u64 rx_total_ring_discards; + u64 tx_total_resets; + u64 tx_total_ring_discards; + u64 total_missed_irqs; +}; + struct bnxt_stats_mem { u64 *sw_stats; u64 *hw_masks; @@ -1005,9 +1019,10 @@ struct bnxt_napi { struct bnxt_tx_ring_info *tx_ring; void (*tx_int)(struct bnxt *, struct bnxt_napi *, - int); + int budget); int tx_pkts; u8 events; + u8 tx_fault:1; u32 flags; #define BNXT_NAPI_FLAG_XDP 0x1 @@ -2020,6 +2035,8 @@ struct bnxt { u8 pri2cos_idx[8]; u8 pri2cos_valid; + struct bnxt_total_ring_err_stats ring_err_stats_prev; + u16 hwrm_max_req_len; u16 hwrm_max_ext_req_len; unsigned int hwrm_cmd_timeout; @@ -2329,6 +2346,8 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init); void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp); +void bnxt_sched_reset_txr(struct bnxt *bp, struct bnxt_tx_ring_info *txr, + int idx); void bnxt_report_link(struct bnxt *bp); int bnxt_update_link(struct bnxt *bp, bool chng_link_state); int bnxt_hwrm_set_pause(struct bnxt *); @@ -2344,6 +2363,8 @@ int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); void bnxt_reenable_sriov(struct bnxt *bp); int bnxt_close_nic(struct bnxt *, bool, bool); +void bnxt_get_ring_err_stats(struct bnxt *bp, + struct bnxt_total_ring_err_stats *stats); int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf); void bnxt_fw_exception(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index caab3d626a2a..63e067038385 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -98,7 +98,6 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, { struct hwrm_queue_cos2bw_cfg_input *req; struct bnxt_cos2bw_cfg cos2bw; - void *data; int rc, i; rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG); @@ -129,11 +128,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cpu_to_le32((ets->tc_tx_bw[i] * 100) | BW_VALUE_UNIT_PERCENT1_100); } - data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4); - memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (qidx == 0) { req->queue_id0 = cos2bw.queue_id; - req->unused_0 = 0; + req->queue_id0_min_bw = cos2bw.min_bw; + req->queue_id0_max_bw = cos2bw.max_bw; + req->queue_id0_tsa_assign = cos2bw.tsa; + req->queue_id0_pri_lvl = cos2bw.pri_lvl; + req->queue_id0_bw_weight = cos2bw.bw_weight; + } else { + memcpy(&req->cfg[i - 1], &cos2bw.cfg, sizeof(cos2bw.cfg)); } } return hwrm_req_send(bp, req); @@ -144,7 +147,6 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) struct hwrm_queue_cos2bw_qcfg_output *resp; struct hwrm_queue_cos2bw_qcfg_input *req; struct bnxt_cos2bw_cfg cos2bw; - void *data; int rc, i; rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG); @@ -158,13 +160,19 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) return rc; } - data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); - for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) { + for (i = 0; i < bp->max_tc; i++) { int tc; - memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg)); - if (i == 0) + if (i == 0) { cos2bw.queue_id = resp->queue_id0; + cos2bw.min_bw = resp->queue_id0_min_bw; + cos2bw.max_bw = resp->queue_id0_max_bw; + cos2bw.tsa = resp->queue_id0_tsa_assign; + cos2bw.pri_lvl = resp->queue_id0_pri_lvl; + cos2bw.bw_weight = resp->queue_id0_bw_weight; + } else { + memcpy(&cos2bw.cfg, &resp->cfg[i - 1], sizeof(cos2bw.cfg)); + } tc = bnxt_queue_to_tc(bp, cos2bw.queue_id); if (tc < 0) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index 716742522161..5b2a6f678244 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -27,11 +27,12 @@ struct bnxt_cos2bw_cfg { u8 queue_id; __le32 min_bw; __le32 max_bw; -#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) u8 tsa; u8 pri_lvl; u8 bw_weight; ); +/* for min_bw / max_bw */ +#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) u8 unused; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8fd5071d8b09..547247d98eba 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -339,13 +339,16 @@ enum { RX_NETPOLL_DISCARDS, }; -static struct { - u64 counter; - char string[ETH_GSTRING_LEN]; -} bnxt_sw_func_stats[] = { - {0, "rx_total_discard_pkts"}, - {0, "tx_total_discard_pkts"}, - {0, "rx_total_netpoll_discards"}, +static const char *const bnxt_ring_err_stats_arr[] = { + "rx_total_l4_csum_errors", + "rx_total_resets", + "rx_total_buf_errors", + "rx_total_oom_discards", + "rx_total_netpoll_discards", + "rx_total_ring_discards", + "tx_total_resets", + "tx_total_ring_discards", + "total_missed_irqs", }; #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) @@ -495,7 +498,7 @@ static const struct { BNXT_TX_STATS_PRI_ENTRIES(tx_packets), }; -#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) +#define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) #define BNXT_NUM_STATS_PRI \ (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ @@ -532,7 +535,7 @@ static int bnxt_get_num_stats(struct bnxt *bp) { int num_stats = bnxt_get_num_ring_stats(bp); - num_stats += BNXT_NUM_SW_FUNC_STATS; + num_stats += BNXT_NUM_RING_ERR_STATS; if (bp->flags & BNXT_FLAG_PORT_STATS) num_stats += BNXT_NUM_PORT_STATS; @@ -583,18 +586,17 @@ static bool is_tx_ring(struct bnxt *bp, int ring_num) static void bnxt_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *buf) { - u32 i, j = 0; + struct bnxt_total_ring_err_stats ring_err_stats = {0}; struct bnxt *bp = netdev_priv(dev); + u64 *curr, *prev; u32 tpa_stats; + u32 i, j = 0; if (!bp->bnapi) { - j += bnxt_get_num_ring_stats(bp) + BNXT_NUM_SW_FUNC_STATS; + j += bnxt_get_num_ring_stats(bp); goto skip_ring_stats; } - for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) - bnxt_sw_func_stats[i].counter = 0; - tpa_stats = bnxt_get_num_tpa_ring_stats(bp); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; @@ -631,19 +633,16 @@ skip_tpa_ring_stats: sw = (u64 *)&cpr->sw_stats.cmn; for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++) buf[j] = sw[k]; - - bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter += - BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); - bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += - BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); - bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter += - cpr->sw_stats.rx.rx_netpoll_discards; } - for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) - buf[j] = bnxt_sw_func_stats[i].counter; + bnxt_get_ring_err_stats(bp, &ring_err_stats); skip_ring_stats: + curr = &ring_err_stats.rx_total_l4_csum_errors; + prev = &bp->ring_err_stats_prev.rx_total_l4_csum_errors; + for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++, j++, curr++, prev++) + buf[j] = *curr + *prev; + if (bp->flags & BNXT_FLAG_PORT_STATS) { u64 *port_stats = bp->port_stats.sw_stats; @@ -745,8 +744,8 @@ skip_tpa_stats: buf += ETH_GSTRING_LEN; } } - for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) { - strcpy(buf, bnxt_sw_func_stats[i].string); + for (i = 0; i < BNXT_NUM_RING_ERR_STATS; i++) { + strscpy(buf, bnxt_ring_err_stats_arr[i], ETH_GSTRING_LEN); buf += ETH_GSTRING_LEN; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index b31de4cf6534..f178ed9899a9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -5739,286 +5739,48 @@ struct hwrm_queue_cos2bw_qcfg_output { #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL u8 queue_id0_pri_lvl; u8 queue_id0_bw_weight; - u8 queue_id1; - __le32 queue_id1_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id1_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id1_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id1_pri_lvl; - u8 queue_id1_bw_weight; - u8 queue_id2; - __le32 queue_id2_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id2_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id2_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id2_pri_lvl; - u8 queue_id2_bw_weight; - u8 queue_id3; - __le32 queue_id3_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id3_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id3_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id3_pri_lvl; - u8 queue_id3_bw_weight; - u8 queue_id4; - __le32 queue_id4_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id4_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id4_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id4_pri_lvl; - u8 queue_id4_bw_weight; - u8 queue_id5; - __le32 queue_id5_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id5_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id5_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id5_pri_lvl; - u8 queue_id5_bw_weight; - u8 queue_id6; - __le32 queue_id6_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id6_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id6_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id6_pri_lvl; - u8 queue_id6_bw_weight; - u8 queue_id7; - __le32 queue_id7_min_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id7_max_bw; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id7_tsa_assign; - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id7_pri_lvl; - u8 queue_id7_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; u8 unused_2[4]; u8 valid; }; @@ -6082,286 +5844,48 @@ struct hwrm_queue_cos2bw_cfg_input { #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL u8 queue_id0_pri_lvl; u8 queue_id0_bw_weight; - u8 queue_id1; - __le32 queue_id1_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id1_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id1_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id1_pri_lvl; - u8 queue_id1_bw_weight; - u8 queue_id2; - __le32 queue_id2_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id2_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id2_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id2_pri_lvl; - u8 queue_id2_bw_weight; - u8 queue_id3; - __le32 queue_id3_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id3_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id3_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id3_pri_lvl; - u8 queue_id3_bw_weight; - u8 queue_id4; - __le32 queue_id4_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id4_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id4_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id4_pri_lvl; - u8 queue_id4_bw_weight; - u8 queue_id5; - __le32 queue_id5_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id5_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id5_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id5_pri_lvl; - u8 queue_id5_bw_weight; - u8 queue_id6; - __le32 queue_id6_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id6_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id6_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id6_pri_lvl; - u8 queue_id6_bw_weight; - u8 queue_id7; - __le32 queue_id7_min_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID - __le32 queue_id7_max_bw; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29 - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID - u8 queue_id7_tsa_assign; - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL - #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL - u8 queue_id7_pri_lvl; - u8 queue_id7_bw_weight; + struct { + u8 queue_id; + __le32 queue_id_min_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MIN_BW_BW_VALUE_UNIT_INVALID + __le32 queue_id_max_bw; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_MASK 0xfffffffUL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_SFT 0 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE 0x10000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BITS (0x0UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES (0x1UL << 28) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_SCALE_BYTES + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_SFT 29 + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29) + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID_MAX_BW_BW_VALUE_UNIT_INVALID + u8 queue_id_tsa_assign; + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_SP 0x0UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_ETS 0x1UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_FIRST 0x2UL + #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID_TSA_ASSIGN_RESERVED_LAST 0xffUL + u8 queue_id_pri_lvl; + u8 queue_id_bw_weight; + } __packed cfg[7]; u8 unused_1[5]; }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index d8afcf8d6b30..38d89d80b4a9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -373,9 +373,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, struct flow_dissector *dissector = rule->match.dissector; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ - if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || - (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { - netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n", + if ((dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || + (dissector->used_keys & BIT_ULL(FLOW_DISSECTOR_KEY_BASIC)) == 0) { + netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%llx\n", dissector->used_keys); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 4efa5fe6972b..96f5ca778c67 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -15,7 +15,7 @@ #include <linux/bpf.h> #include <linux/bpf_trace.h> #include <linux/filter.h> -#include <net/page_pool.h> +#include <net/page_pool/helpers.h> #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_xdp.h" @@ -125,16 +125,20 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, dma_unmap_len_set(tx_buf, len, 0); } -void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring; struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; bool rx_doorbell_needed = false; + int nr_pkts = bnapi->tx_pkts; struct bnxt_sw_tx_bd *tx_buf; u16 tx_cons = txr->tx_cons; u16 last_tx_cons = tx_cons; int i, j, frags; + if (!budget) + return; + for (i = 0; i < nr_pkts; i++) { tx_buf = &txr->tx_buf_ring[tx_cons]; @@ -149,6 +153,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) tx_buf->action = 0; tx_buf->xdpf = NULL; } else if (tx_buf->action == XDP_TX) { + tx_buf->action = 0; rx_doorbell_needed = true; last_tx_cons = tx_cons; @@ -158,9 +163,14 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) tx_buf = &txr->tx_buf_ring[tx_cons]; page_pool_recycle_direct(rxr->page_pool, tx_buf->page); } + } else { + bnxt_sched_reset_txr(bp, txr, i); + return; } tx_cons = NEXT_TX(tx_cons); } + + bnapi->tx_pkts = 0; WRITE_ONCE(txr->tx_cons, tx_cons); if (rx_doorbell_needed) { tx_buf = &txr->tx_buf_ring[last_tx_cons]; @@ -180,8 +190,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, u8 *data_ptr, unsigned int len, struct xdp_buff *xdp) { + u32 buflen = BNXT_RX_PAGE_SIZE; struct bnxt_sw_rx_bd *rx_buf; - u32 buflen = PAGE_SIZE; struct pci_dev *pdev; dma_addr_t mapping; u32 offset; @@ -297,7 +307,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, rx_buf = &rxr->rx_buf_ring[cons]; mapping = rx_buf->mapping - bp->rx_dma_offset; dma_unmap_page_attrs(&pdev->dev, mapping, - PAGE_SIZE, bp->rx_dir, + BNXT_RX_PAGE_SIZE, bp->rx_dir, DMA_ATTR_WEAK_ORDERING); /* if we are unable to allocate a new buffer, abort and reuse */ @@ -480,7 +490,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, } xdp_update_skb_shared_info(skb, num_frags, sinfo->xdp_frags_size, - PAGE_SIZE * sinfo->nr_frags, + BNXT_RX_PAGE_SIZE * sinfo->nr_frags, xdp_buff_is_frag_pfmemalloc(xdp)); return skb; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index ea430d6961df..5e412c5655ba 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -16,7 +16,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, struct bnxt_tx_ring_info *txr, dma_addr_t mapping, u32 len, struct xdp_buff *xdp); -void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct xdp_buff xdp, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2b5761ad2f92..24bade875ca6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2077,12 +2077,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock(&ring->lock); if (ring->free_bds <= (nr_frags + 1)) { - if (!netif_tx_queue_stopped(txq)) { + if (!netif_tx_queue_stopped(txq)) netif_tx_stop_queue(txq); - netdev_err(dev, - "%s: tx ring %d full when queue %d awake\n", - __func__, index, ring->queue); - } ret = NETDEV_TX_BUSY; goto out; } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 0092e46c46f8..97ea76d443ab 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -617,9 +617,9 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) }; phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phydev || IS_ERR(phydev)) { + if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); - return -ENODEV; + return PTR_ERR(phydev); } /* Make sure we initialize MoCA PHYs with a link down */ diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 5ef073a79ce9..14b311196b8f 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1539,8 +1539,7 @@ static int tg3_mdio_init(struct tg3 *tp) return -ENOMEM; tp->mdio_bus->name = "tg3 mdio bus"; - snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", - (tp->pdev->bus->number << 8) | tp->pdev->devfn); + snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev)); tp->mdio_bus->priv = tp; tp->mdio_bus->parent = &tp->pdev->dev; tp->mdio_bus->read = &tg3_mdio_read; @@ -6881,7 +6880,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) ri->data = NULL; - skb = build_skb(data, frag_size); + if (frag_size) + skb = build_skb(data, frag_size); + else + skb = slab_build_skb(data); if (!skb) { tg3_frag_free(frag_size != 0, data); goto drop_it_no_recycle; @@ -17792,10 +17794,7 @@ static int tg3_init_one(struct pci_dev *pdev, tnapi->tx_pending = TG3_DEF_TX_RING_PENDING; tnapi->int_mbox = intmbx; - if (i <= 4) - intmbx += 0x8; - else - intmbx += 0x4; + intmbx += 0x8; tnapi->consmbox = rcvmbx; tnapi->prodmbox = sndmbx; |