diff options
107 files changed, 1368 insertions, 404 deletions
diff --git a/Documentation/devicetree/bindings/net/mdio.yaml b/Documentation/devicetree/bindings/net/mdio.yaml index d6a3bf8550eb..26afb556dfae 100644 --- a/Documentation/devicetree/bindings/net/mdio.yaml +++ b/Documentation/devicetree/bindings/net/mdio.yaml @@ -39,6 +39,13 @@ properties: and must therefore be appropriately determined based on all devices requirements (maximum value of all per-device RESET pulse widths). + reset-post-delay-us: + description: + Delay after reset deassert in microseconds. It applies to all MDIO + devices and it's determined by how fast all devices are ready for + communication. This delay happens just before e.g. Ethernet PHY + type ID auto detection. + clock-frequency: description: Desired MDIO bus clock frequency in Hz. Values greater than IEEE 802.3 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b3d8e00e7671..39be444534d0 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -1034,6 +1034,7 @@ static enum enq_res do_tx(struct sk_buff *skb) u32 dma_rd,dma_wr; u32 size; /* in words */ int aal5,dma_size,i,j; + unsigned char skb_data3; DPRINTK(">do_tx\n"); NULLCHECK(skb); @@ -1108,6 +1109,7 @@ DPRINTK("iovcnt = %d\n",skb_shinfo(skb)->nr_frags); vcc->dev->number); return enq_jam; } + skb_data3 = skb->data[3]; paddr = dma_map_single(&eni_dev->pci_dev->dev,skb->data,skb->len, DMA_TO_DEVICE); ENI_PRV_PADDR(skb) = paddr; @@ -1150,7 +1152,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */ (size/(ATM_CELL_PAYLOAD/4)),tx->send+tx->tx_pos*4); /*printk("dsc = 0x%08lx\n",(unsigned long) readl(tx->send+tx->tx_pos*4));*/ writel((vcc->vci << MID_SEG_VCI_SHIFT) | - (aal5 ? 0 : (skb->data[3] & 0xf)) | + (aal5 ? 0 : (skb_data3 & 0xf)) | (ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? MID_SEG_CLP : 0), tx->send+((tx->tx_pos+1) & (tx->words-1))*4); DPRINTK("size: %d, len:%d\n",size,skb->len); diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index df51680e8931..65a3886f68c9 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -835,6 +835,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, unsigned long flags; int error; int aal; + u32 word4; if (skb->len == 0) { printk("%s: invalid skb->len (%d)\n", card->name, skb->len); @@ -846,6 +847,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, tbd = &IDT77252_PRV_TBD(skb); vcc = ATM_SKB(skb)->vcc; + word4 = (skb->data[0] << 24) | (skb->data[1] << 16) | + (skb->data[2] << 8) | (skb->data[3] << 0); IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data, skb->len, DMA_TO_DEVICE); @@ -859,8 +862,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, tbd->word_1 = SAR_TBD_OAM | ATM_CELL_PAYLOAD | SAR_TBD_EPDU; tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4; tbd->word_3 = 0x00000000; - tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) | - (skb->data[2] << 8) | (skb->data[3] << 0); + tbd->word_4 = word4; if (test_bit(VCF_RSV, &vc->flags)) vc = card->vcs[0]; @@ -890,8 +892,7 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, tbd->word_2 = IDT77252_PRV_PADDR(skb) + 4; tbd->word_3 = 0x00000000; - tbd->word_4 = (skb->data[0] << 24) | (skb->data[1] << 16) | - (skb->data[2] << 8) | (skb->data[3] << 0); + tbd->word_4 = word4; break; case ATM_AAL5: diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index a5566de82853..f1e484477e35 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -408,6 +408,112 @@ qca8k_fdb_flush(struct qca8k_priv *priv) mutex_unlock(&priv->reg_mutex); } +static int +qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) +{ + u32 reg; + + /* Set the command and VLAN index */ + reg = QCA8K_VTU_FUNC1_BUSY; + reg |= cmd; + reg |= vid << QCA8K_VTU_FUNC1_VID_S; + + /* Write the function register triggering the table access */ + qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg); + + /* wait for completion */ + if (qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY)) + return -ETIMEDOUT; + + /* Check for table full violation when adding an entry */ + if (cmd == QCA8K_VLAN_LOAD) { + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC1); + if (reg & QCA8K_VTU_FUNC1_FULL) + return -ENOMEM; + } + + return 0; +} + +static int +qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) +{ + u32 reg; + int ret; + + /* + We do the right thing with VLAN 0 and treat it as untagged while + preserving the tag on egress. + */ + if (vid == 0) + return 0; + + mutex_lock(&priv->reg_mutex); + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); + if (ret < 0) + goto out; + + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); + reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN; + reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + if (untagged) + reg |= QCA8K_VTU_FUNC0_EG_MODE_UNTAG << + QCA8K_VTU_FUNC0_EG_MODE_S(port); + else + reg |= QCA8K_VTU_FUNC0_EG_MODE_TAG << + QCA8K_VTU_FUNC0_EG_MODE_S(port); + + qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); + +out: + mutex_unlock(&priv->reg_mutex); + + return ret; +} + +static int +qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) +{ + u32 reg, mask; + int ret, i; + bool del; + + mutex_lock(&priv->reg_mutex); + ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); + if (ret < 0) + goto out; + + reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0); + reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port)); + reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT << + QCA8K_VTU_FUNC0_EG_MODE_S(port); + + /* Check if we're the last member to be removed */ + del = true; + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + mask = QCA8K_VTU_FUNC0_EG_MODE_NOT; + mask <<= QCA8K_VTU_FUNC0_EG_MODE_S(i); + + if ((reg & mask) != mask) { + del = false; + break; + } + } + + if (del) { + ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid); + } else { + qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); + ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid); + } + +out: + mutex_unlock(&priv->reg_mutex); + + return ret; +} + static void qca8k_mib_init(struct qca8k_priv *priv) { @@ -663,10 +769,11 @@ qca8k_setup(struct dsa_switch *ds) * default egress vid */ qca8k_rmw(priv, QCA8K_EGRESS_VLAN(i), - 0xffff << shift, 1 << shift); + 0xfff << shift, + QCA8K_PORT_VID_DEF << shift); qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(i), - QCA8K_PORT_VLAN_CVID(1) | - QCA8K_PORT_VLAN_SVID(1)); + QCA8K_PORT_VLAN_CVID(QCA8K_PORT_VID_DEF) | + QCA8K_PORT_VLAN_SVID(QCA8K_PORT_VID_DEF)); } } @@ -1133,7 +1240,7 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, { /* Set the vid to the port vlan id if no vid is set */ if (!vid) - vid = 1; + vid = QCA8K_PORT_VID_DEF; return qca8k_fdb_add(priv, addr, port_mask, vid, QCA8K_ATU_STATUS_STATIC); @@ -1157,7 +1264,7 @@ qca8k_port_fdb_del(struct dsa_switch *ds, int port, u16 port_mask = BIT(port); if (!vid) - vid = 1; + vid = QCA8K_PORT_VID_DEF; return qca8k_fdb_del(priv, addr, port_mask, vid); } @@ -1186,6 +1293,76 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port, return 0; } +static int +qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) +{ + struct qca8k_priv *priv = ds->priv; + + if (vlan_filtering) { + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_VLAN_MODE, + QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE); + } else { + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_VLAN_MODE, + QCA8K_PORT_LOOKUP_VLAN_MODE_NONE); + } + + return 0; +} + +static int +qca8k_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + return 0; +} + +static void +qca8k_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct qca8k_priv *priv = ds->priv; + int ret = 0; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid) + ret = qca8k_vlan_add(priv, port, vid, untagged); + + if (ret) + dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret); + + if (pvid) { + int shift = 16 * (port % 2); + + qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port), + 0xfff << shift, + vlan->vid_end << shift); + qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), + QCA8K_PORT_VLAN_CVID(vlan->vid_end) | + QCA8K_PORT_VLAN_SVID(vlan->vid_end)); + } +} + +static int +qca8k_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct qca8k_priv *priv = ds->priv; + int ret = 0; + u16 vid; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end && !ret; ++vid) + ret = qca8k_vlan_del(priv, port, vid); + + if (ret) + dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); + + return ret; +} + static enum dsa_tag_protocol qca8k_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mp) @@ -1211,6 +1388,10 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .port_fdb_add = qca8k_port_fdb_add, .port_fdb_del = qca8k_port_fdb_del, .port_fdb_dump = qca8k_port_fdb_dump, + .port_vlan_filtering = qca8k_port_vlan_filtering, + .port_vlan_prepare = qca8k_port_vlan_prepare, + .port_vlan_add = qca8k_port_vlan_add, + .port_vlan_del = qca8k_port_vlan_del, .phylink_validate = qca8k_phylink_validate, .phylink_mac_link_state = qca8k_phylink_mac_link_state, .phylink_mac_config = qca8k_phylink_mac_config, @@ -1261,6 +1442,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) priv->ds->dev = &mdiodev->dev; priv->ds->num_ports = QCA8K_NUM_PORTS; + priv->ds->configure_vlan_while_not_filtering = true; priv->ds->priv = priv; priv->ops = qca8k_switch_ops; priv->ds->ops = &priv->ops; diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 31439396401c..7ca4b93e0bb5 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -22,6 +22,8 @@ #define QCA8K_CPU_PORT 0 +#define QCA8K_PORT_VID_DEF 1 + /* Global control registers */ #define QCA8K_REG_MASK_CTRL 0x000 #define QCA8K_MASK_CTRL_ID_M 0xff @@ -126,6 +128,19 @@ #define QCA8K_ATU_FUNC_FULL BIT(12) #define QCA8K_ATU_FUNC_PORT_M 0xf #define QCA8K_ATU_FUNC_PORT_S 8 +#define QCA8K_REG_VTU_FUNC0 0x610 +#define QCA8K_VTU_FUNC0_VALID BIT(20) +#define QCA8K_VTU_FUNC0_IVL_EN BIT(19) +#define QCA8K_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) +#define QCA8K_VTU_FUNC0_EG_MODE_MASK 3 +#define QCA8K_VTU_FUNC0_EG_MODE_UNMOD 0 +#define QCA8K_VTU_FUNC0_EG_MODE_UNTAG 1 +#define QCA8K_VTU_FUNC0_EG_MODE_TAG 2 +#define QCA8K_VTU_FUNC0_EG_MODE_NOT 3 +#define QCA8K_REG_VTU_FUNC1 0x614 +#define QCA8K_VTU_FUNC1_BUSY BIT(31) +#define QCA8K_VTU_FUNC1_VID_S 16 +#define QCA8K_VTU_FUNC1_FULL BIT(4) #define QCA8K_REG_GLOBAL_FW_CTRL0 0x620 #define QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN BIT(10) #define QCA8K_REG_GLOBAL_FW_CTRL1 0x624 @@ -135,6 +150,11 @@ #define QCA8K_GLOBAL_FW_CTRL1_UC_DP_S 0 #define QCA8K_PORT_LOOKUP_CTRL(_i) (0x660 + (_i) * 0xc) #define QCA8K_PORT_LOOKUP_MEMBER GENMASK(6, 0) +#define QCA8K_PORT_LOOKUP_VLAN_MODE GENMASK(9, 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_NONE (0 << 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_FALLBACK (1 << 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_CHECK (2 << 8) +#define QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE (3 << 8) #define QCA8K_PORT_LOOKUP_STATE_MASK GENMASK(18, 16) #define QCA8K_PORT_LOOKUP_STATE_DISABLED (0 << 16) #define QCA8K_PORT_LOOKUP_STATE_BLOCKING (1 << 16) @@ -178,6 +198,15 @@ enum qca8k_fdb_cmd { QCA8K_FDB_SEARCH = 7, }; +enum qca8k_vlan_cmd { + QCA8K_VLAN_FLUSH = 1, + QCA8K_VLAN_LOAD = 2, + QCA8K_VLAN_PURGE = 3, + QCA8K_VLAN_REMOVE_PORT = 4, + QCA8K_VLAN_NEXT = 5, + QCA8K_VLAN_READ = 6, +}; + struct ar8xxx_port_status { int enabled; }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index c38a4b8a14cb..611875ef2cd1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -786,7 +786,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, int err = 0; if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) { - err = EBADRQC; + err = -EBADRQC; goto err_exit; } for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) { diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 43d11c38b38a..4cddd628d41b 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1167,7 +1167,7 @@ static int cn23xx_get_pf_num(struct octeon_device *oct) oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & CN23XX_PCIE_SRIOV_FDL_MASK); } else { - ret = EINVAL; + ret = -EINVAL; /* Under some virtual environments, extended PCI regs are * inaccessible, in which case the above read will have failed. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 12ef9ddd1e54..9f3173f86eed 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -31,7 +31,7 @@ enum cxgb4_ethtool_tests { }; static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = { - "Loop back test", + "Loop back test (offline)", }; static const char * const flash_region_strings[] = { @@ -2095,12 +2095,13 @@ static void cxgb4_self_test(struct net_device *netdev, memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST); - if (!(adap->flags & CXGB4_FW_OK)) { + if (!(adap->flags & CXGB4_FULL_INIT_DONE) || + !(adap->flags & CXGB4_FW_OK)) { eth_test->flags |= ETH_TEST_FL_FAILED; return; } - if (eth_test->flags == ETH_TEST_FL_OFFLINE) + if (eth_test->flags & ETH_TEST_FL_OFFLINE) cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]); if (data[CXGB4_ETHTOOL_LB_TEST]) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 6251444ffa58..f642c1b475c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -80,6 +80,19 @@ static void cxgb4_process_flow_match(struct net_device *dev, struct flow_rule *rule, struct ch_filter_specification *fs) { + u16 addr_type = 0; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + } + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_match_basic match; u16 ethtype_key, ethtype_mask; @@ -102,7 +115,7 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.proto = match.mask->ip_proto; } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; flow_rule_match_ipv4_addrs(rule, &match); @@ -117,7 +130,7 @@ static void cxgb4_process_flow_match(struct net_device *dev, memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src)); } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { struct flow_match_ipv6_addrs match; flow_rule_match_ipv6_addrs(rule, &match); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c index a9939550fa8f..1c4a535890da 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c @@ -1130,7 +1130,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, !is_zero_ether_addr(match.mask->src)) { NL_SET_ERR_MSG_MOD(extack, "Cannot match on both source and destination MAC"); - err = EINVAL; + err = -EINVAL; goto free_filter; } @@ -1138,7 +1138,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, if (!is_broadcast_ether_addr(match.mask->dst)) { NL_SET_ERR_MSG_MOD(extack, "Masked matching on destination MAC not supported"); - err = EINVAL; + err = -EINVAL; goto free_filter; } ether_addr_copy(filter->sid.dst_mac, match.key->dst); @@ -1149,7 +1149,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, if (!is_broadcast_ether_addr(match.mask->src)) { NL_SET_ERR_MSG_MOD(extack, "Masked matching on source MAC not supported"); - err = EINVAL; + err = -EINVAL; goto free_filter; } ether_addr_copy(filter->sid.src_mac, match.key->src); @@ -1157,7 +1157,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv, } } else { NL_SET_ERR_MSG_MOD(extack, "Unsupported, must include ETH_ADDRS"); - err = EINVAL; + err = -EINVAL; goto free_filter; } diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index f151d6e111dd..ef67e8599b39 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1398,8 +1398,7 @@ static void enable_time_stamp(struct fman *fman) { struct fman_fpm_regs __iomem *fpm_rg = fman->fpm_regs; u16 fm_clk_freq = fman->state->fm_clk_freq; - u32 tmp, intgr, ts_freq; - u64 frac; + u32 tmp, intgr, ts_freq, frac; ts_freq = (u32)(1 << fman->state->count1_micro_bit); /* configure timestamp so that bit 8 will count 1 microsecond diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 004c266802a8..bce3c9398887 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -1200,7 +1200,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->multicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; @@ -1213,7 +1213,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) list_for_each(pos, &dtsec->unicast_addr_hash->lsts[bucket]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h index dd6d0526f6c1..19f327efdaff 100644 --- a/drivers/net/ethernet/freescale/fman/fman_mac.h +++ b/drivers/net/ethernet/freescale/fman/fman_mac.h @@ -252,7 +252,7 @@ static inline struct eth_hash_t *alloc_hash_table(u16 size) struct eth_hash_t *hash; /* Allocate address hash table */ - hash = kmalloc_array(size, sizeof(struct eth_hash_t *), GFP_KERNEL); + hash = kmalloc(sizeof(*hash), GFP_KERNEL); if (!hash) return NULL; diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index a5500ede4070..645764abdaae 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -852,7 +852,6 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority, tmp = ioread32be(®s->command_config); tmp &= ~CMD_CFG_PFC_MODE; - priority = 0; iowrite32be(tmp, ®s->command_config); @@ -982,7 +981,7 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr) list_for_each(pos, &memac->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 87b26f063cc8..c27df153f895 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1767,6 +1767,7 @@ static int fman_port_probe(struct platform_device *of_dev) struct fman_port *port; struct fman *fman; struct device_node *fm_node, *port_node; + struct platform_device *fm_pdev; struct resource res; struct resource *dev_res; u32 val; @@ -1791,8 +1792,14 @@ static int fman_port_probe(struct platform_device *of_dev) goto return_err; } - fman = dev_get_drvdata(&of_find_device_by_node(fm_node)->dev); + fm_pdev = of_find_device_by_node(fm_node); of_node_put(fm_node); + if (!fm_pdev) { + err = -EINVAL; + goto return_err; + } + + fman = dev_get_drvdata(&fm_pdev->dev); if (!fman) { err = -EINVAL; goto return_err; diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index 8c7eb878d5b4..41946b16f6c7 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c @@ -626,7 +626,7 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr) list_for_each(pos, &tgec->multicast_addr_hash->lsts[hash]) { hash_entry = ETH_HASH_ENTRY_OBJ(pos); - if (hash_entry->addr == addr) { + if (hash_entry && hash_entry->addr == addr) { list_del_init(&hash_entry->node); kfree(hash_entry); break; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 7d194d2a031a..fe140ff38f74 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -256,6 +256,7 @@ struct ice_vsi { u32 tx_busy; u32 rx_buf_failed; u32 rx_page_failed; + u32 rx_gro_dropped; u16 num_q_vectors; u16 base_vector; /* IRQ base for OS reserved vectors */ enum ice_vsi_type type; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 7fec9309d379..ba9375218fef 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1581,7 +1581,7 @@ struct ice_aqc_get_set_rss_keys { struct ice_aqc_get_set_rss_lut { #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 -#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) +#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) __le16 vsi_id; #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0 #define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index ad5941ec7b11..34abfcea9858 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1027,23 +1027,23 @@ void ice_deinit_hw(struct ice_hw *hw) */ enum ice_status ice_check_reset(struct ice_hw *hw) { - u32 cnt, reg = 0, grst_delay, uld_mask; + u32 cnt, reg = 0, grst_timeout, uld_mask; /* Poll for Device Active state in case a recent CORER, GLOBR, * or EMPR has occurred. The grst delay value is in 100ms units. * Add 1sec for outstanding AQ commands that can take a long time. */ - grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> - GLGEN_RSTCTL_GRSTDEL_S) + 10; + grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >> + GLGEN_RSTCTL_GRSTDEL_S) + 10; - for (cnt = 0; cnt < grst_delay; cnt++) { + for (cnt = 0; cnt < grst_timeout; cnt++) { mdelay(100); reg = rd32(hw, GLGEN_RSTAT); if (!(reg & GLGEN_RSTAT_DEVSTATE_M)) break; } - if (cnt == grst_delay) { + if (cnt == grst_timeout) { ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n"); return ICE_ERR_RESET_FAILED; @@ -1718,8 +1718,7 @@ ice_alloc_res_exit: * @num: number of resources * @res: pointer to array that contains the resources to free */ -enum ice_status -ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) +enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res) { struct ice_aqc_alloc_free_res_elem *buf; enum ice_status status; @@ -2121,7 +2120,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, * @cap_count: the number of capabilities * * Helper device to parse device (0x000B) capabilities list. For - * capabilities shared between device and device, this relies on + * capabilities shared between device and function, this relies on * ice_parse_common_caps. * * Loop through the list of provided capabilities and extract the relevant diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index dbbd8b6f9d1a..111d6bfe4222 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -357,7 +357,7 @@ void ice_devlink_unregister(struct ice_pf *pf) * * Create and register a devlink_port for this PF. Note that although each * physical function is connected to a separate devlink instance, the port - * will still be numbered according to the physical function id. + * will still be numbered according to the physical function ID. * * Return: zero on success or an error code on failure. */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 06b93e97892d..9e8e9531cd87 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -59,8 +59,11 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed), ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), + ICE_VSI_STAT("rx_gro_dropped", rx_gro_dropped), ICE_VSI_STAT("tx_errors", eth_stats.tx_errors), ICE_VSI_STAT("tx_linearize", tx_linearize), + ICE_VSI_STAT("tx_busy", tx_busy), + ICE_VSI_STAT("tx_restart", tx_restart), }; enum ice_ethtool_test_id { @@ -100,6 +103,7 @@ static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("rx_broadcast.nic", stats.eth.rx_broadcast), ICE_PF_STAT("tx_broadcast.nic", stats.eth.tx_broadcast), ICE_PF_STAT("tx_errors.nic", stats.eth.tx_errors), + ICE_PF_STAT("tx_timeout.nic", tx_timeout_count), ICE_PF_STAT("rx_size_64.nic", stats.rx_size_64), ICE_PF_STAT("tx_size_64.nic", stats.tx_size_64), ICE_PF_STAT("rx_size_127.nic", stats.rx_size_127), diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 3c217e51b27e..b17ae3e20157 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -644,7 +644,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max) * This function generates a key from a value, a don't care mask and a never * match mask. * upd, dc, and nm are optional parameters, and can be NULL: - * upd == NULL --> udp mask is all 1's (update all bits) + * upd == NULL --> upd mask is all 1's (update all bits) * dc == NULL --> dc mask is all 0's (no don't care bits) * nm == NULL --> nm mask is all 0's (no never match bits) */ @@ -2921,6 +2921,8 @@ static void ice_free_flow_profs(struct ice_hw *hw, u8 blk_idx) ICE_FLOW_ENTRY_HNDL(e)); list_del(&p->l_entry); + + mutex_destroy(&p->entries_lock); devm_kfree(ice_hw_to_dev(hw), p); } mutex_unlock(&hw->fl_profs_locks[blk_idx]); @@ -3038,7 +3040,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw) memset(prof_redir->t, 0, prof_redir->count * sizeof(*prof_redir->t)); - memset(es->t, 0, es->count * sizeof(*es->t)); + memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw); memset(es->ref_count, 0, es->count * sizeof(*es->ref_count)); memset(es->written, 0, es->count * sizeof(*es->written)); } @@ -3149,10 +3151,12 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw) es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->ref_count), GFP_KERNEL); + if (!es->ref_count) + goto err; es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count, sizeof(*es->written), GFP_KERNEL); - if (!es->ref_count) + if (!es->written) goto err; } return 0; @@ -3874,16 +3878,16 @@ err_ice_add_prof: } /** - * ice_search_prof_id_low - Search for a profile tracking ID low level + * ice_search_prof_id - Search for a profile tracking ID * @hw: pointer to the HW struct * @blk: hardware block * @id: profile tracking ID * - * This will search for a profile tracking ID which was previously added. This - * version assumes that the caller has already acquired the prof map lock. + * This will search for a profile tracking ID which was previously added. + * The profile map lock should be held before calling this function. */ static struct ice_prof_map * -ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) +ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) { struct ice_prof_map *entry = NULL; struct ice_prof_map *map; @@ -3898,26 +3902,6 @@ ice_search_prof_id_low(struct ice_hw *hw, enum ice_block blk, u64 id) } /** - * ice_search_prof_id - Search for a profile tracking ID - * @hw: pointer to the HW struct - * @blk: hardware block - * @id: profile tracking ID - * - * This will search for a profile tracking ID which was previously added. - */ -static struct ice_prof_map * -ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id) -{ - struct ice_prof_map *entry; - - mutex_lock(&hw->blk[blk].es.prof_map_lock); - entry = ice_search_prof_id_low(hw, blk, id); - mutex_unlock(&hw->blk[blk].es.prof_map_lock); - - return entry; -} - -/** * ice_vsig_prof_id_count - count profiles in a VSIG * @hw: pointer to the HW struct * @blk: hardware block @@ -4133,7 +4117,7 @@ enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id) mutex_lock(&hw->blk[blk].es.prof_map_lock); - pmap = ice_search_prof_id_low(hw, blk, id); + pmap = ice_search_prof_id(hw, blk, id); if (!pmap) { status = ICE_ERR_DOES_NOT_EXIST; goto err_ice_rem_prof; @@ -4166,22 +4150,28 @@ static enum ice_status ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, struct list_head *chg) { + enum ice_status status = 0; struct ice_prof_map *map; struct ice_chs_chg *p; u16 i; + mutex_lock(&hw->blk[blk].es.prof_map_lock); /* Get the details on the profile specified by the handle ID */ map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_get_prof; + } for (i = 0; i < map->ptg_cnt; i++) if (!hw->blk[blk].es.written[map->prof_id]) { /* add ES to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) + if (!p) { + status = ICE_ERR_NO_MEMORY; goto err_ice_get_prof; + } p->type = ICE_PTG_ES_ADD; p->ptype = 0; @@ -4196,11 +4186,10 @@ ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl, list_add(&p->list_entry, chg); } - return 0; - err_ice_get_prof: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ - return ICE_ERR_NO_MEMORY; + return status; } /** @@ -4254,17 +4243,23 @@ static enum ice_status ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, struct list_head *lst, u64 hdl) { + enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *p; u16 i; + mutex_lock(&hw->blk[blk].es.prof_map_lock); map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_add_prof_to_lst; + } p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) - return ICE_ERR_NO_MEMORY; + if (!p) { + status = ICE_ERR_NO_MEMORY; + goto err_ice_add_prof_to_lst; + } p->profile_cookie = map->profile_cookie; p->prof_id = map->prof_id; @@ -4278,7 +4273,9 @@ ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk, list_add(&p->list, lst); - return 0; +err_ice_add_prof_to_lst: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); + return status; } /** @@ -4496,16 +4493,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 }; u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 }; + enum ice_status status = 0; struct ice_prof_map *map; struct ice_vsig_prof *t; struct ice_chs_chg *p; u16 vsig_idx, i; - /* Get the details on the profile specified by the handle ID */ - map = ice_search_prof_id(hw, blk, hdl); - if (!map) - return ICE_ERR_DOES_NOT_EXIST; - /* Error, if this VSIG already has this profile */ if (ice_has_prof_vsig(hw, blk, vsig, hdl)) return ICE_ERR_ALREADY_EXISTS; @@ -4515,19 +4508,28 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, if (!t) return ICE_ERR_NO_MEMORY; + mutex_lock(&hw->blk[blk].es.prof_map_lock); + /* Get the details on the profile specified by the handle ID */ + map = ice_search_prof_id(hw, blk, hdl); + if (!map) { + status = ICE_ERR_DOES_NOT_EXIST; + goto err_ice_add_prof_id_vsig; + } + t->profile_cookie = map->profile_cookie; t->prof_id = map->prof_id; t->tcam_count = map->ptg_cnt; /* create TCAM entries */ for (i = 0; i < map->ptg_cnt; i++) { - enum ice_status status; u16 tcam_idx; /* add TCAM to change list */ p = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*p), GFP_KERNEL); - if (!p) + if (!p) { + status = ICE_ERR_NO_MEMORY; goto err_ice_add_prof_id_vsig; + } /* allocate the TCAM entry index */ status = ice_alloc_tcam_ent(hw, blk, &tcam_idx); @@ -4571,12 +4573,14 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl, list_add(&t->list, &hw->blk[blk].xlt2.vsig_tbl[vsig_idx].prop_lst); - return 0; + mutex_unlock(&hw->blk[blk].es.prof_map_lock); + return status; err_ice_add_prof_id_vsig: + mutex_unlock(&hw->blk[blk].es.prof_map_lock); /* let caller clean up the change list */ devm_kfree(ice_hw_to_dev(hw), t); - return ICE_ERR_NO_MEMORY; + return status; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index d74e5290677f..fe677621dd51 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -1187,7 +1187,7 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) if (list_empty(&hw->fl_profs[blk])) return 0; - mutex_lock(&hw->fl_profs_locks[blk]); + mutex_lock(&hw->rss_locks); list_for_each_entry_safe(p, t, &hw->fl_profs[blk], l_entry) if (test_bit(vsi_handle, p->vsis)) { status = ice_flow_disassoc_prof(hw, blk, p, vsi_handle); @@ -1195,12 +1195,12 @@ enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle) break; if (bitmap_empty(p->vsis, ICE_MAX_VSI)) { - status = ice_flow_rem_prof_sync(hw, blk, p); + status = ice_flow_rem_prof(hw, blk, p->id); if (status) break; } } - mutex_unlock(&hw->fl_profs_locks[blk]); + mutex_unlock(&hw->rss_locks); return status; } @@ -1597,7 +1597,8 @@ enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle) */ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) { - struct ice_rss_cfg *r, *rss_cfg = NULL; + u64 rss_hash = ICE_HASH_INVALID; + struct ice_rss_cfg *r; /* verify if the protocol header is non zero and VSI is valid */ if (hdrs == ICE_FLOW_SEG_HDR_NONE || !ice_is_vsi_valid(hw, vsi_handle)) @@ -1607,10 +1608,10 @@ u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs) list_for_each_entry(r, &hw->rss_list_head, l_entry) if (test_bit(vsi_handle, r->vsis) && r->packet_hdr == hdrs) { - rss_cfg = r; + rss_hash = r->hashed_flds; break; } mutex_unlock(&hw->rss_locks); - return rss_cfg ? rss_cfg->hashed_flds : ICE_HASH_INVALID; + return rss_hash; } diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 92e4abca62a4..90abc8612a6a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -57,7 +57,7 @@ #define PRTDCB_GENS 0x00083020 #define PRTDCB_GENS_DCBX_STATUS_S 0 #define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0) -#define PRTDCB_TUP2TC 0x001D26C0 /* Reset Source: CORER */ +#define PRTDCB_TUP2TC 0x001D26C0 #define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4)) #define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) @@ -362,6 +362,7 @@ #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) +#define PRTRPB_RDPC 0x000AC260 #define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) #define VSIQF_FD_CNT_FD_GCNT_S 0 #define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0) @@ -378,6 +379,5 @@ #define PFPM_WUS_FW_RST_WK_M BIT(31) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) -#define PRTRPB_RDPC 0x000AC260 #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 14dfbbc1b2cf..4ec24c3e813f 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -601,6 +601,7 @@ struct ice_tlan_ctx { /* shorter macros makes the table fit but are terse */ #define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG +#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG /* Lookup table mapping the HW PTYPE to the bit field for decoding */ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { @@ -608,6 +609,319 @@ static const struct ice_rx_ptype_decoded ice_ptype_lkup[] = { ICE_PTT_UNUSED_ENTRY(0), ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), ICE_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(3), + ICE_PTT_UNUSED_ENTRY(4), + ICE_PTT_UNUSED_ENTRY(5), + ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(8), + ICE_PTT_UNUSED_ENTRY(9), + ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), + ICE_PTT_UNUSED_ENTRY(12), + ICE_PTT_UNUSED_ENTRY(13), + ICE_PTT_UNUSED_ENTRY(14), + ICE_PTT_UNUSED_ENTRY(15), + ICE_PTT_UNUSED_ENTRY(16), + ICE_PTT_UNUSED_ENTRY(17), + ICE_PTT_UNUSED_ENTRY(18), + ICE_PTT_UNUSED_ENTRY(19), + ICE_PTT_UNUSED_ENTRY(20), + ICE_PTT_UNUSED_ENTRY(21), + + /* Non Tunneled IPv4 */ + ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(25), + ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), + ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), + ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv4 --> IPv4 */ + ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(32), + ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> IPv6 */ + ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(39), + ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT */ + ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> IPv4 */ + ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(47), + ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> IPv6 */ + ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(54), + ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC */ + ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ + ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(62), + ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ + ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(69), + ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv4 --> GRE/NAT --> MAC/VLAN */ + ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ + ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(77), + ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ + ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(84), + ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* Non Tunneled IPv6 */ + ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), + ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3), + ICE_PTT_UNUSED_ENTRY(91), + ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), + ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), + ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), + + /* IPv6 --> IPv4 */ + ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), + ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), + ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(98), + ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), + ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), + ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> IPv6 */ + ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), + ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), + ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(105), + ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), + ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), + ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT */ + ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> IPv4 */ + ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), + ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), + ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(113), + ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), + ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), + ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> IPv6 */ + ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), + ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), + ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(120), + ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), + ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), + ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC */ + ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ + ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), + ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), + ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(128), + ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), + ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), + ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ + ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), + ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), + ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(135), + ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), + ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), + ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN */ + ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ + ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), + ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), + ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(143), + ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), + ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), + ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), + + /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ + ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), + ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), + ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), + ICE_PTT_UNUSED_ENTRY(150), + ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), + ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), + ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), + + /* unused entries */ + ICE_PTT_UNUSED_ENTRY(154), + ICE_PTT_UNUSED_ENTRY(155), + ICE_PTT_UNUSED_ENTRY(156), + ICE_PTT_UNUSED_ENTRY(157), + ICE_PTT_UNUSED_ENTRY(158), + ICE_PTT_UNUSED_ENTRY(159), + + ICE_PTT_UNUSED_ENTRY(160), + ICE_PTT_UNUSED_ENTRY(161), + ICE_PTT_UNUSED_ENTRY(162), + ICE_PTT_UNUSED_ENTRY(163), + ICE_PTT_UNUSED_ENTRY(164), + ICE_PTT_UNUSED_ENTRY(165), + ICE_PTT_UNUSED_ENTRY(166), + ICE_PTT_UNUSED_ENTRY(167), + ICE_PTT_UNUSED_ENTRY(168), + ICE_PTT_UNUSED_ENTRY(169), + + ICE_PTT_UNUSED_ENTRY(170), + ICE_PTT_UNUSED_ENTRY(171), + ICE_PTT_UNUSED_ENTRY(172), + ICE_PTT_UNUSED_ENTRY(173), + ICE_PTT_UNUSED_ENTRY(174), + ICE_PTT_UNUSED_ENTRY(175), + ICE_PTT_UNUSED_ENTRY(176), + ICE_PTT_UNUSED_ENTRY(177), + ICE_PTT_UNUSED_ENTRY(178), + ICE_PTT_UNUSED_ENTRY(179), + + ICE_PTT_UNUSED_ENTRY(180), + ICE_PTT_UNUSED_ENTRY(181), + ICE_PTT_UNUSED_ENTRY(182), + ICE_PTT_UNUSED_ENTRY(183), + ICE_PTT_UNUSED_ENTRY(184), + ICE_PTT_UNUSED_ENTRY(185), + ICE_PTT_UNUSED_ENTRY(186), + ICE_PTT_UNUSED_ENTRY(187), + ICE_PTT_UNUSED_ENTRY(188), + ICE_PTT_UNUSED_ENTRY(189), + + ICE_PTT_UNUSED_ENTRY(190), + ICE_PTT_UNUSED_ENTRY(191), + ICE_PTT_UNUSED_ENTRY(192), + ICE_PTT_UNUSED_ENTRY(193), + ICE_PTT_UNUSED_ENTRY(194), + ICE_PTT_UNUSED_ENTRY(195), + ICE_PTT_UNUSED_ENTRY(196), + ICE_PTT_UNUSED_ENTRY(197), + ICE_PTT_UNUSED_ENTRY(198), + ICE_PTT_UNUSED_ENTRY(199), + + ICE_PTT_UNUSED_ENTRY(200), + ICE_PTT_UNUSED_ENTRY(201), + ICE_PTT_UNUSED_ENTRY(202), + ICE_PTT_UNUSED_ENTRY(203), + ICE_PTT_UNUSED_ENTRY(204), + ICE_PTT_UNUSED_ENTRY(205), + ICE_PTT_UNUSED_ENTRY(206), + ICE_PTT_UNUSED_ENTRY(207), + ICE_PTT_UNUSED_ENTRY(208), + ICE_PTT_UNUSED_ENTRY(209), + + ICE_PTT_UNUSED_ENTRY(210), + ICE_PTT_UNUSED_ENTRY(211), + ICE_PTT_UNUSED_ENTRY(212), + ICE_PTT_UNUSED_ENTRY(213), + ICE_PTT_UNUSED_ENTRY(214), + ICE_PTT_UNUSED_ENTRY(215), + ICE_PTT_UNUSED_ENTRY(216), + ICE_PTT_UNUSED_ENTRY(217), + ICE_PTT_UNUSED_ENTRY(218), + ICE_PTT_UNUSED_ENTRY(219), + + ICE_PTT_UNUSED_ENTRY(220), + ICE_PTT_UNUSED_ENTRY(221), + ICE_PTT_UNUSED_ENTRY(222), + ICE_PTT_UNUSED_ENTRY(223), + ICE_PTT_UNUSED_ENTRY(224), + ICE_PTT_UNUSED_ENTRY(225), + ICE_PTT_UNUSED_ENTRY(226), + ICE_PTT_UNUSED_ENTRY(227), + ICE_PTT_UNUSED_ENTRY(228), + ICE_PTT_UNUSED_ENTRY(229), + + ICE_PTT_UNUSED_ENTRY(230), + ICE_PTT_UNUSED_ENTRY(231), + ICE_PTT_UNUSED_ENTRY(232), + ICE_PTT_UNUSED_ENTRY(233), + ICE_PTT_UNUSED_ENTRY(234), + ICE_PTT_UNUSED_ENTRY(235), + ICE_PTT_UNUSED_ENTRY(236), + ICE_PTT_UNUSED_ENTRY(237), + ICE_PTT_UNUSED_ENTRY(238), + ICE_PTT_UNUSED_ENTRY(239), + + ICE_PTT_UNUSED_ENTRY(240), + ICE_PTT_UNUSED_ENTRY(241), + ICE_PTT_UNUSED_ENTRY(242), + ICE_PTT_UNUSED_ENTRY(243), + ICE_PTT_UNUSED_ENTRY(244), + ICE_PTT_UNUSED_ENTRY(245), + ICE_PTT_UNUSED_ENTRY(246), + ICE_PTT_UNUSED_ENTRY(247), + ICE_PTT_UNUSED_ENTRY(248), + ICE_PTT_UNUSED_ENTRY(249), + + ICE_PTT_UNUSED_ENTRY(250), + ICE_PTT_UNUSED_ENTRY(251), + ICE_PTT_UNUSED_ENTRY(252), + ICE_PTT_UNUSED_ENTRY(253), + ICE_PTT_UNUSED_ENTRY(254), + ICE_PTT_UNUSED_ENTRY(255), }; static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 84202c814c3b..f2682776f8c8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2017,6 +2017,13 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) if (!vsi) return -EINVAL; + /* Don't enable VLAN pruning if the netdev is currently in promiscuous + * mode. VLAN pruning will be enabled when the interface exits + * promiscuous mode if any VLAN filters are active. + */ + if (vsi->netdev && vsi->netdev->flags & IFF_PROMISC && ena) + return 0; + pf = vsi->back; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (!ctxt) diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c0bde24ab344..22e3d32463f1 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -369,6 +369,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) ~IFF_PROMISC; goto out_promisc; } + ice_cfg_vlan_pruning(vsi, false, false); } } else { /* Clear Rx filter to remove traffic from wire */ @@ -381,6 +382,8 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi) IFF_PROMISC; goto out_promisc; } + if (vsi->num_vlan > 1) + ice_cfg_vlan_pruning(vsi, true, false); } } } @@ -4467,7 +4470,7 @@ err_reinit: * Power Management callback to quiesce the device and prepare * for D3 transition. */ -static int ice_suspend(struct device *dev) +static int __maybe_unused ice_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ice_pf *pf; @@ -4531,7 +4534,7 @@ static int ice_suspend(struct device *dev) * ice_resume - PM callback for waking up from D3 * @dev: generic device information structure */ -static int ice_resume(struct device *dev) +static int __maybe_unused ice_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); enum ice_reset_req reset_type; @@ -5290,6 +5293,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi->tx_linearize = 0; vsi->rx_buf_failed = 0; vsi->rx_page_failed = 0; + vsi->rx_gro_dropped = 0; rcu_read_lock(); @@ -5304,6 +5308,7 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi) vsi_stats->rx_bytes += bytes; vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed; vsi->rx_page_failed += ring->rx_stats.alloc_page_failed; + vsi->rx_gro_dropped += ring->rx_stats.gro_dropped; } /* update XDP Tx rings counters */ @@ -5335,7 +5340,7 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) ice_update_eth_stats(vsi); cur_ns->tx_errors = cur_es->tx_errors; - cur_ns->rx_dropped = cur_es->rx_discards; + cur_ns->rx_dropped = cur_es->rx_discards + vsi->rx_gro_dropped; cur_ns->tx_dropped = cur_es->tx_discards; cur_ns->multicast = cur_es->rx_multicast; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 355f727563e4..44a228530253 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -170,7 +170,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, return ICE_ERR_PARAM; } - /* query the current node information from FW before additing it + /* query the current node information from FW before adding it * to the SW DB */ status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); @@ -578,7 +578,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) /** * ice_aq_rl_profile - performs a rate limiting task * @hw: pointer to the HW struct - * @opcode:opcode for add, query, or remove profile(s) + * @opcode: opcode for add, query, or remove profile(s) * @num_profiles: the number of profiles * @buf: pointer to buffer * @buf_size: buffer size in bytes diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index a508d4f463e9..9d0d6b0025cf 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -631,10 +631,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi) dma_addr_t dma; /* since we are recycling buffers we should seldom need to alloc */ - if (likely(page)) { - rx_ring->rx_stats.page_reuse_count++; + if (likely(page)) return true; - } /* alloc new page for storage */ page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); @@ -1033,7 +1031,6 @@ static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) if (ice_can_reuse_rx_page(rx_buf)) { /* hand second half of page back to the ring */ ice_reuse_rx_page(rx_ring, rx_buf); - rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, @@ -1254,12 +1251,12 @@ construct_skb: * @itr: ITR value to update * * Calculate how big of an increment should be applied to the ITR value passed - * in based on wmem_default, SKB overhead, Ethernet overhead, and the current + * in based on wmem_default, SKB overhead, ethernet overhead, and the current * link speed. * * The following is a calculation derived from: * wmem_default / (size + overhead) = desired_pkts_per_int - * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value * * Assuming wmem_default is 212992 and overhead is 640 bytes per @@ -2294,10 +2291,30 @@ static bool __ice_chk_linearize(struct sk_buff *skb) /* Walk through fragments adding latest fragment, testing it, and * then removing stale fragments from the sum. */ - stale = &skb_shinfo(skb)->frags[0]; - for (;;) { + for (stale = &skb_shinfo(skb)->frags[0];; stale++) { + int stale_size = skb_frag_size(stale); + sum += skb_frag_size(frag++); + /* The stale fragment may present us with a smaller + * descriptor than the actual fragment size. To account + * for that we need to remove all the data on the front and + * figure out what the remainder would be in the last + * descriptor associated with the fragment. + */ + if (stale_size > ICE_MAX_DATA_PER_TXD) { + int align_pad = -(skb_frag_off(stale)) & + (ICE_MAX_READ_REQ_SIZE - 1); + + sum -= align_pad; + stale_size -= align_pad; + + do { + sum -= ICE_MAX_DATA_PER_TXD_ALIGNED; + stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED; + } while (stale_size > ICE_MAX_DATA_PER_TXD); + } + /* if sum is negative we failed to make sufficient progress */ if (sum < 0) return true; @@ -2305,7 +2322,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) if (!nr_frags--) break; - sum -= skb_frag_size(stale++); + sum -= stale_size; } return false; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index e70c4619edc3..51b4df7a59d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -193,7 +193,7 @@ struct ice_rxq_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buf_failed; - u64 page_reuse_count; + u64 gro_dropped; /* GRO returned dropped */ }; /* this enum matches hardware bits and is meant to be used by DYN_CTLN diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 02b12736ea80..bc2f4390b51d 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -191,7 +191,12 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag) if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && (vlan_tag & VLAN_VID_MASK)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - napi_gro_receive(&rx_ring->q_vector->napi, skb); + if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) { + /* this is tracked separately to help us debug stack drops */ + rx_ring->rx_stats.gro_dropped++; + netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n", + rx_ring->q_index); + } } /** diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 1eb83d9b0546..4cdccfadf274 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -321,7 +321,7 @@ struct ice_nvm_info { u32 flash_size; /* Size of available flash in bytes */ u8 major_ver; /* major version of NVM package */ u8 minor_ver; /* minor version of dev starter */ - u8 blank_nvm_mode; /* is NVM empty (no FW present) */ + u8 blank_nvm_mode; /* is NVM empty (no FW present) */ }; struct ice_link_default_override_tlv { diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index 7061730f0f37..71497776ac62 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -932,6 +932,8 @@ static int ice_set_per_vf_res(struct ice_pf *pf) num_msix_per_vf = ICE_NUM_VF_MSIX_MED; } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) { num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL; + } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) { + num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN; } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) { num_msix_per_vf = ICE_MIN_INTR_PER_VF; } else { @@ -2972,8 +2974,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->max_frame = qpi->rxq.max_pkt_size; } - /* VF can request to configure less than allocated queues - * or default allocated queues. So update the VSI with new number + /* VF can request to configure less than allocated queues or default + * allocated queues. So update the VSI with new number */ vsi->num_txq = num_txq; vsi->num_rxq = num_rxq; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 049e0b583383..0f519fba3770 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -32,6 +32,7 @@ #define ICE_MAX_RSS_QS_PER_VF 16 #define ICE_NUM_VF_MSIX_MED 17 #define ICE_NUM_VF_MSIX_SMALL 5 +#define ICE_NUM_VF_MSIX_MULTIQ_MIN 3 #define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1) #define ICE_MAX_VF_RESET_TRIES 40 #define ICE_MAX_VF_RESET_SLEEP_MS 20 diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 87862918bc7a..20ac5fca68c6 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -298,7 +298,6 @@ static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid) } } - /** * ice_xsk_umem_disable - disable a UMEM region * @vsi: Current VSI @@ -594,7 +593,6 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget) if (!size) break; - rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean]; rx_buf->xdp->data_end = rx_buf->xdp->data + size; xsk_buff_dma_sync_for_cpu(rx_buf->xdp); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 6a3f356640a0..4298a029be55 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3508,6 +3508,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, err = mvpp2_rx_refill(port, bm_pool, pp, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); + dev_kfree_skb_any(skb); goto err_drop_frame; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 095561924bdc..3c57c331729f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -403,8 +403,7 @@ static int ionic_get_coalesce(struct net_device *netdev, { struct ionic_lif *lif = netdev_priv(netdev); - /* Tx uses Rx interrupt */ - coalesce->tx_coalesce_usecs = lif->rx_coalesce_usecs; + coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs; coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs; return 0; @@ -417,7 +416,8 @@ static int ionic_set_coalesce(struct net_device *netdev, struct ionic_identity *ident; struct ionic_qcq *qcq; unsigned int i; - u32 coal; + u32 rx_coal; + u32 tx_coal; ident = &lif->ionic->ident; if (ident->dev.intr_coal_div == 0) { @@ -426,26 +426,31 @@ static int ionic_set_coalesce(struct net_device *netdev, return -EIO; } - /* Tx uses Rx interrupt, so only change Rx */ - if (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) { + /* Tx normally shares Rx interrupt, so only change Rx */ + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) && + coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) { netdev_warn(netdev, "only the rx-usecs can be changed\n"); return -EINVAL; } - /* Convert the usec request to a HW useable value. If they asked + /* Convert the usec request to a HW usable value. If they asked * for non-zero and it resolved to zero, bump it up */ - coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs); - if (!coal && coalesce->rx_coalesce_usecs) - coal = 1; - - if (coal > IONIC_INTR_CTRL_COAL_MAX) + rx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->rx_coalesce_usecs); + if (!rx_coal && coalesce->rx_coalesce_usecs) + rx_coal = 1; + tx_coal = ionic_coal_usec_to_hw(lif->ionic, coalesce->tx_coalesce_usecs); + if (!tx_coal && coalesce->tx_coalesce_usecs) + tx_coal = 1; + + if (rx_coal > IONIC_INTR_CTRL_COAL_MAX || + tx_coal > IONIC_INTR_CTRL_COAL_MAX) return -ERANGE; - /* Save the new value */ + /* Save the new values */ lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs; - if (coal != lif->rx_coalesce_hw) { - lif->rx_coalesce_hw = coal; + if (rx_coal != lif->rx_coalesce_hw) { + lif->rx_coalesce_hw = rx_coal; if (test_bit(IONIC_LIF_F_UP, lif->state)) { for (i = 0; i < lif->nxqs; i++) { @@ -457,6 +462,23 @@ static int ionic_set_coalesce(struct net_device *netdev, } } + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs; + else + lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs; + if (tx_coal != lif->tx_coalesce_hw) { + lif->tx_coalesce_hw = tx_coal; + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + for (i = 0; i < lif->nxqs; i++) { + qcq = lif->txqcqs[i].qcq; + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + qcq->intr.index, + lif->tx_coalesce_hw); + } + } + } + return 0; } @@ -510,29 +532,63 @@ static void ionic_get_channels(struct net_device *netdev, /* report maximum channels */ ch->max_combined = lif->ionic->ntxqs_per_lif; + ch->max_rx = lif->ionic->ntxqs_per_lif / 2; + ch->max_tx = lif->ionic->ntxqs_per_lif / 2; /* report current channels */ - ch->combined_count = lif->nxqs; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + ch->rx_count = lif->nxqs; + ch->tx_count = lif->nxqs; + } else { + ch->combined_count = lif->nxqs; + } } static void ionic_set_queuecount(struct ionic_lif *lif, void *arg) { struct ethtool_channels *ch = arg; - lif->nxqs = ch->combined_count; + if (ch->combined_count) { + lif->nxqs = ch->combined_count; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; + lif->tx_coalesce_hw = lif->rx_coalesce_hw; + netdev_info(lif->netdev, "Sharing queue interrupts\n"); + } + } else { + lif->nxqs = ch->rx_count; + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state); + netdev_info(lif->netdev, "Splitting queue interrupts\n"); + } + } } static int ionic_set_channels(struct net_device *netdev, struct ethtool_channels *ch) { struct ionic_lif *lif = netdev_priv(netdev); + int new_cnt; - if (!ch->combined_count || ch->other_count || - ch->rx_count || ch->tx_count) + if (ch->rx_count != ch->tx_count) { + netdev_info(netdev, "The rx and tx count must be equal\n"); return -EINVAL; + } - if (ch->combined_count == lif->nxqs) - return 0; + if (ch->combined_count && ch->rx_count) { + netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n"); + return -EINVAL; + } + + if (ch->combined_count) + new_cnt = ch->combined_count; + else + new_cnt = ch->rx_count; + + if (lif->nxqs != new_cnt) + netdev_info(netdev, "Changing queue count from %d to %d\n", + lif->nxqs, new_cnt); return ionic_reset_queues(lif, ionic_set_queuecount, ch); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 85e4cf229a96..1944bf5264db 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -616,7 +616,6 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) .index = cpu_to_le32(q->index), .flags = cpu_to_le16(IONIC_QINIT_F_IRQ | IONIC_QINIT_F_SG), - .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), .pid = cpu_to_le16(q->pid), .ring_size = ilog2(q->num_descs), .ring_base = cpu_to_le64(q->base_pa), @@ -624,14 +623,22 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) .sg_ring_base = cpu_to_le64(q->sg_base_pa), }, }; + unsigned int intr_index; int err; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + intr_index = qcq->intr.index; + else + intr_index = lif->rxqcqs[q->index].qcq->intr.index; + ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index); + dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid); dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index); dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base); dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags); dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver); + dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); q->tail = q->info; q->head = q->tail; @@ -648,6 +655,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi, + NAPI_POLL_WEIGHT); + qcq->flags |= IONIC_QCQ_F_INITED; return 0; @@ -684,6 +695,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size); dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags); dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver); + dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index); q->tail = q->info; q->head = q->tail; @@ -700,8 +712,12 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); - netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, - NAPI_POLL_WEIGHT); + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi, + NAPI_POLL_WEIGHT); + else + netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi, + NAPI_POLL_WEIGHT); qcq->flags |= IONIC_QCQ_F_INITED; @@ -1537,6 +1553,8 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) sg_desc_sz = sizeof(struct ionic_txq_sg_desc); flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG; + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + flags |= IONIC_QCQ_F_INTR; for (i = 0; i < lif->nxqs; i++) { err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags, lif->ntxq_descs, @@ -1547,6 +1565,11 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) if (err) goto err_out; + if (flags & IONIC_QCQ_F_INTR) + ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, + lif->txqcqs[i].qcq->intr.index, + lif->tx_coalesce_hw); + lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats; ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq); } @@ -1562,13 +1585,15 @@ static int ionic_txrx_alloc(struct ionic_lif *lif) if (err) goto err_out; - lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; - ionic_intr_coal_init(lif->ionic->idev.intr_ctrl, lif->rxqcqs[i].qcq->intr.index, lif->rx_coalesce_hw); - ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, - lif->txqcqs[i].qcq); + + if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) + ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq, + lif->txqcqs[i].qcq); + + lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats; ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq); } @@ -2065,11 +2090,14 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index lif->index = index; lif->ntxq_descs = IONIC_DEF_TXRX_DESC; lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + lif->tx_budget = IONIC_TX_BUDGET_DEFAULT; /* Convert the default coalesce value to actual hw resolution */ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic, lif->rx_coalesce_usecs); + lif->tx_coalesce_usecs = lif->rx_coalesce_usecs; + lif->tx_coalesce_hw = lif->rx_coalesce_hw; snprintf(lif->name, sizeof(lif->name), "lif%u", index); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index edad17d7aeeb..1ee3b14c8d50 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -13,6 +13,7 @@ #define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1) #define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1) #define IONIC_RX_COPYBREAK_DEFAULT 256 +#define IONIC_TX_BUDGET_DEFAULT 256 struct ionic_tx_stats { u64 dma_map_err; @@ -136,6 +137,7 @@ enum ionic_lif_state_flags { IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, IONIC_LIF_F_FW_RESET, + IONIC_LIF_F_SPLIT_INTR, /* leave this as last */ IONIC_LIF_F_STATE_SIZE @@ -176,6 +178,7 @@ struct ionic_lif { unsigned int ntxq_descs; unsigned int nrxq_descs; u32 rx_copybreak; + u32 tx_budget; unsigned int rx_mode; u64 hw_features; bool mc_overflow; @@ -203,6 +206,8 @@ struct ionic_lif { struct dentry *dentry; u32 rx_coalesce_usecs; /* what the user asked for */ u32 rx_coalesce_hw; /* what the hw is using */ + u32 tx_coalesce_usecs; /* what the user asked for */ + u32 tx_coalesce_hw; /* what the hw is using */ struct work_struct tx_timeout_work; }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 85eb8f276a37..8107d32c2767 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -15,6 +15,10 @@ static void ionic_rx_clean(struct ionic_queue *q, struct ionic_cq_info *cq_info, void *cb_arg); +static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); + +static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); + static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, ionic_desc_cb cb_func, void *cb_arg) { @@ -249,29 +253,13 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) return true; } -static u32 ionic_rx_walk_cq(struct ionic_cq *rxcq, u32 limit) -{ - u32 work_done = 0; - - while (ionic_rx_service(rxcq, rxcq->tail)) { - if (rxcq->tail->last) - rxcq->done_color = !rxcq->done_color; - rxcq->tail = rxcq->tail->next; - DEBUG_STATS_CQE_CNT(rxcq); - - if (++work_done >= limit) - break; - } - - return work_done; -} - void ionic_rx_flush(struct ionic_cq *cq) { struct ionic_dev *idev = &cq->lif->ionic->idev; u32 work_done; - work_done = ionic_rx_walk_cq(cq, cq->num_descs); + work_done = ionic_cq_service(cq, cq->num_descs, + ionic_rx_service, NULL, NULL); if (work_done) ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, @@ -331,9 +319,6 @@ static void ionic_rx_page_free(struct ionic_queue *q, struct page *page, __free_page(page); } -#define IONIC_RX_RING_DOORBELL_STRIDE ((1 << 5) - 1) -#define IONIC_RX_RING_HEAD_BUF_SZ 2048 - void ionic_rx_fill(struct ionic_queue *q) { struct net_device *netdev = q->lif->netdev; @@ -345,7 +330,6 @@ void ionic_rx_fill(struct ionic_queue *q) unsigned int remain_len; unsigned int seg_len; unsigned int nfrags; - bool ring_doorbell; unsigned int i, j; unsigned int len; @@ -360,9 +344,7 @@ void ionic_rx_fill(struct ionic_queue *q) page_info = &desc_info->pages[0]; if (page_info->page) { /* recycle the buffer */ - ring_doorbell = ((q->head->index + 1) & - IONIC_RX_RING_DOORBELL_STRIDE) == 0; - ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL); + ionic_rxq_post(q, false, ionic_rx_clean, NULL); continue; } @@ -401,10 +383,11 @@ void ionic_rx_fill(struct ionic_queue *q) page_info++; } - ring_doorbell = ((q->head->index + 1) & - IONIC_RX_RING_DOORBELL_STRIDE) == 0; - ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, NULL); + ionic_rxq_post(q, false, ionic_rx_clean, NULL); } + + ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type, + q->dbval | q->head->index); } static void ionic_rx_fill_cb(void *arg) @@ -436,40 +419,117 @@ void ionic_rx_empty(struct ionic_queue *q) } } +int ionic_tx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = napi_to_cq(napi); + struct ionic_dev *idev; + struct ionic_lif *lif; + u32 work_done = 0; + u32 flags = 0; + + lif = cq->bound_q->lif; + idev = &lif->ionic->idev; + + work_done = ionic_cq_service(cq, budget, + ionic_tx_service, NULL, NULL); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + DEBUG_STATS_INTR_REARM(cq->bound_intr); + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(idev->intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + int ionic_rx_napi(struct napi_struct *napi, int budget) { struct ionic_qcq *qcq = napi_to_qcq(napi); + struct ionic_cq *cq = napi_to_cq(napi); + struct ionic_dev *idev; + struct ionic_lif *lif; + u32 work_done = 0; + u32 flags = 0; + + lif = cq->bound_q->lif; + idev = &lif->ionic->idev; + + work_done = ionic_cq_service(cq, budget, + ionic_rx_service, NULL, NULL); + + if (work_done) + ionic_rx_fill(cq->bound_q); + + if (work_done < budget && napi_complete_done(napi, work_done)) { + flags |= IONIC_INTR_CRED_UNMASK; + DEBUG_STATS_INTR_REARM(cq->bound_intr); + } + + if (work_done || flags) { + flags |= IONIC_INTR_CRED_RESET_COALESCE; + ionic_intr_credits(idev->intr_ctrl, + cq->bound_intr->index, + work_done, flags); + } + + DEBUG_STATS_NAPI_POLL(qcq, work_done); + + return work_done; +} + +int ionic_txrx_napi(struct napi_struct *napi, int budget) +{ + struct ionic_qcq *qcq = napi_to_qcq(napi); struct ionic_cq *rxcq = napi_to_cq(napi); unsigned int qi = rxcq->bound_q->index; struct ionic_dev *idev; struct ionic_lif *lif; struct ionic_cq *txcq; + u32 rx_work_done = 0; + u32 tx_work_done = 0; u32 work_done = 0; u32 flags = 0; + bool unmask; lif = rxcq->bound_q->lif; idev = &lif->ionic->idev; txcq = &lif->txqcqs[qi].qcq->cq; - ionic_tx_flush(txcq); + tx_work_done = ionic_cq_service(txcq, lif->tx_budget, + ionic_tx_service, NULL, NULL); - work_done = ionic_rx_walk_cq(rxcq, budget); - - if (work_done) + rx_work_done = ionic_cq_service(rxcq, budget, + ionic_rx_service, NULL, NULL); + if (rx_work_done) ionic_rx_fill_cb(rxcq->bound_q); - if (work_done < budget && napi_complete_done(napi, work_done)) { + unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget); + + if (unmask && napi_complete_done(napi, rx_work_done)) { flags |= IONIC_INTR_CRED_UNMASK; DEBUG_STATS_INTR_REARM(rxcq->bound_intr); + work_done = rx_work_done; + } else { + work_done = budget; } if (work_done || flags) { flags |= IONIC_INTR_CRED_RESET_COALESCE; ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index, - work_done, flags); + tx_work_done + rx_work_done, flags); } - DEBUG_STATS_NAPI_POLL(qcq, work_done); + DEBUG_STATS_NAPI_POLL(qcq, rx_work_done); + DEBUG_STATS_NAPI_POLL(qcq, tx_work_done); return work_done; } @@ -557,43 +617,39 @@ static void ionic_tx_clean(struct ionic_queue *q, } } -void ionic_tx_flush(struct ionic_cq *cq) +static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info) { - struct ionic_txq_comp *comp = cq->tail->cq_desc; - struct ionic_dev *idev = &cq->lif->ionic->idev; + struct ionic_txq_comp *comp = cq_info->cq_desc; struct ionic_queue *q = cq->bound_q; struct ionic_desc_info *desc_info; - unsigned int work_done = 0; - - /* walk the completed cq entries */ - while (work_done < cq->num_descs && - color_match(comp->color, cq->done_color)) { - - /* clean the related q entries, there could be - * several q entries completed for each cq completion - */ - do { - desc_info = q->tail; - q->tail = desc_info->next; - ionic_tx_clean(q, desc_info, cq->tail, - desc_info->cb_arg); - desc_info->cb = NULL; - desc_info->cb_arg = NULL; - } while (desc_info->index != le16_to_cpu(comp->comp_index)); - - if (cq->tail->last) - cq->done_color = !cq->done_color; - - cq->tail = cq->tail->next; - comp = cq->tail->cq_desc; - DEBUG_STATS_CQE_CNT(cq); - - work_done++; - } + if (!color_match(comp->color, cq->done_color)) + return false; + + /* clean the related q entries, there could be + * several q entries completed for each cq completion + */ + do { + desc_info = q->tail; + q->tail = desc_info->next; + ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg); + desc_info->cb = NULL; + desc_info->cb_arg = NULL; + } while (desc_info->index != le16_to_cpu(comp->comp_index)); + + return true; +} + +void ionic_tx_flush(struct ionic_cq *cq) +{ + struct ionic_dev *idev = &cq->lif->ionic->idev; + u32 work_done; + + work_done = ionic_cq_service(cq, cq->num_descs, + ionic_tx_service, NULL, NULL); if (work_done) ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index, - work_done, 0); + work_done, IONIC_INTR_CRED_RESET_COALESCE); } void ionic_tx_empty(struct ionic_queue *q) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h index 71973e3c35a6..a5883be0413f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.h @@ -11,6 +11,8 @@ void ionic_rx_fill(struct ionic_queue *q); void ionic_rx_empty(struct ionic_queue *q); void ionic_tx_empty(struct ionic_queue *q); int ionic_rx_napi(struct napi_struct *napi, int budget); +int ionic_tx_napi(struct napi_struct *napi, int budget); +int ionic_txrx_napi(struct napi_struct *napi, int budget); netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); #endif /* _IONIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 9489089706fe..f1f75b6d0421 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -5038,8 +5038,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(vf->shadow_config.macs[i], vf_info->mac)) { - memset(vf->shadow_config.macs[i], 0, - ETH_ALEN); + eth_zero_addr(vf->shadow_config.macs[i]); DP_VERBOSE(hwfn, QED_MSG_IOV, "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", vf_info->mac, vf_id); @@ -5048,7 +5047,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) } ether_addr_copy(vf_info->mac, force_mac); - memset(vf_info->forced_mac, 0, ETH_ALEN); + eth_zero_addr(vf_info->forced_mac); vf->bulletin.p_virt->valid_bitmap &= ~BIT(MAC_ADDR_FORCED); qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); @@ -5059,7 +5058,7 @@ static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) if (!vf_info->is_trusted_configured) { u8 empty_mac[ETH_ALEN]; - memset(empty_mac, 0, ETH_ALEN); + eth_zero_addr(empty_mac); for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { if (ether_addr_equal(vf->shadow_config.macs[i], empty_mac)) { diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 4250c17940c0..140a392a81bb 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2674,8 +2674,8 @@ static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) data->feat_flags |= QED_TLV_LSO; ether_addr_copy(data->mac[0], edev->ndev->dev_addr); - memset(data->mac[1], 0, ETH_ALEN); - memset(data->mac[2], 0, ETH_ALEN); + eth_zero_addr(data->mac[1]); + eth_zero_addr(data->mac[2]); /* Copy the first two UC macs */ netif_addr_lock_bh(edev->ndev); i = 1; diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 6646eba9f57f..6eef0f45b133 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -951,7 +951,7 @@ out_stop: dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); if (ip->tx_ring) - dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, + dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); out_free: free_netdev(dev); @@ -964,7 +964,7 @@ static int ioc3eth_remove(struct platform_device *pdev) struct ioc3_private *ip = netdev_priv(dev); dma_free_coherent(ip->dma_dev, RX_RING_SIZE, ip->rxr, ip->rxr_dma); - dma_free_coherent(ip->dma_dev, TX_RING_SIZE, ip->tx_ring, ip->txr_dma); + dma_free_coherent(ip->dma_dev, TX_RING_SIZE + SZ_16K - 1, ip->tx_ring, ip->txr_dma); unregister_netdev(dev); del_timer_sync(&ip->ioc3_timer); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 05d63963fdb7..ac5e8cc5fb9f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -625,7 +625,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) int ret = phylink_ethtool_set_wol(priv->phylink, wol); if (!ret) - device_set_wakeup_enable(&dev->dev, !!wol->wolopts); + device_set_wakeup_enable(priv->device, !!wol->wolopts); return ret; } diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 3902b3aeb0c2..07389702a540 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -283,8 +283,8 @@ spider_net_free_chain(struct spider_net_card *card, descr = descr->next; } while (descr != chain->ring); - dma_free_coherent(&card->pdev->dev, chain->num_desc, - chain->hwring, chain->dma_addr); + dma_free_coherent(&card->pdev->dev, chain->num_desc * sizeof(struct spider_net_hw_descr), + chain->hwring, chain->dma_addr); } /** @@ -314,8 +314,6 @@ spider_net_init_chain(struct spider_net_card *card, if (!chain->hwring) return -ENOMEM; - memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr)); - /* Set up the hardware pointers in each descriptor */ descr = chain->ring; hwdescr = chain->hwring; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index dd20c2c27c2f..726e4b240e7e 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -194,6 +194,7 @@ config MDIO_MSCC_MIIM config MDIO_MVUSB tristate "Marvell USB to MDIO Adapter" depends on USB + select MDIO_DEVRES help A USB to MDIO converter present on development boards for Marvell's Link Street family of Ethernet switches. diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 46b33701ad4b..0af20faad69d 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -554,8 +554,10 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) bus->reset_gpiod = gpiod; gpiod_set_value_cansleep(gpiod, 1); - udelay(bus->reset_delay_us); + fsleep(bus->reset_delay_us); gpiod_set_value_cansleep(gpiod, 0); + if (bus->reset_post_delay_us > 0) + fsleep(bus->reset_post_delay_us); } if (bus->reset) { diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c index 0f625a1b1644..0837319a52d7 100644 --- a/drivers/net/phy/mdio_device.c +++ b/drivers/net/phy/mdio_device.c @@ -132,7 +132,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value) d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay; if (d) - usleep_range(d, d + max_t(unsigned int, d / 10, 100)); + fsleep(d); } EXPORT_SYMBOL(mdio_device_reset); diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index eb84507de28a..cb32d7ef4938 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -268,6 +268,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) /* Get bus level PHY reset GPIO details */ mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; of_property_read_u32(np, "reset-delay-us", &mdio->reset_delay_us); + mdio->reset_post_delay_us = 0; + of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us); /* Register the MDIO bus */ rc = mdiobus_register(mdio); diff --git a/include/linux/phy.h b/include/linux/phy.h index 0403eb799913..3a09d2bf69ea 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -293,6 +293,8 @@ struct mii_bus { /* GPIO reset pulse width in microseconds */ int reset_delay_us; + /* GPIO reset deassert delay in microseconds */ + int reset_post_delay_us; /* RESET GPIO descriptor pointer */ struct gpio_desc *reset_gpiod; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index fa817a105517..3ad65d4ce085 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -283,6 +283,7 @@ struct nf_bridge_info { */ struct tc_skb_ext { __u32 chain; + __u16 mru; }; #endif diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 011f407b76fe..9a59a33787cb 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -14,6 +14,7 @@ #include <linux/spinlock.h> /* for struct rwlock_t */ #include <linux/atomic.h> /* for struct atomic_t */ #include <linux/refcount.h> /* for struct refcount_t */ +#include <linux/workqueue.h> #include <linux/compiler.h> #include <linux/timer.h> @@ -886,6 +887,8 @@ struct netns_ipvs { atomic_t conn_out_counter; #ifdef CONFIG_SYSCTL + /* delayed work for expiring no dest connections */ + struct delayed_work expire_nodest_conn_work; /* 1/rate drop and drop-entry variables */ struct delayed_work defense_work; /* Work handler */ int drop_rate; @@ -1051,6 +1054,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) return ipvs->sysctl_conn_reuse_mode; } +static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_expire_nodest_conn; +} + static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) { return ipvs->sysctl_schedule_icmp; @@ -1138,6 +1146,11 @@ static inline int sysctl_conn_reuse_mode(struct netns_ipvs *ipvs) return 1; } +static inline int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) +{ + return 0; +} + static inline int sysctl_schedule_icmp(struct netns_ipvs *ipvs) { return 0; @@ -1507,6 +1520,22 @@ static inline int ip_vs_todrop(struct netns_ipvs *ipvs) static inline int ip_vs_todrop(struct netns_ipvs *ipvs) { return 0; } #endif +#ifdef CONFIG_SYSCTL +/* Enqueue delayed work for expiring no dest connections + * Only run when sysctl_expire_nodest=1 + */ +static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) +{ + if (sysctl_expire_nodest_conn(ipvs)) + queue_delayed_work(system_long_wq, + &ipvs->expire_nodest_conn_work, 1); +} + +void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs); +#else +static inline void ip_vs_enqueue_expire_nodest_conns(struct netns_ipvs *ipvs) {} +#endif + #define IP_VS_DFWD_METHOD(dest) (atomic_read(&(dest)->conn_flags) & \ IP_VS_CONN_F_FWD_MASK) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index c510b03b9751..d60e7c39d60c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -384,6 +384,7 @@ struct qdisc_skb_cb { }; #define QDISC_CB_PRIV_LEN 20 unsigned char data[QDISC_CB_PRIV_LEN]; + u16 mru; }; typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); @@ -463,7 +464,7 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; - BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); BUILD_BUG_ON(sizeof(qcb->data) < sz); } diff --git a/include/uapi/linux/netfilter/xt_connmark.h b/include/uapi/linux/netfilter/xt_connmark.h index 1aa5c955ee1e..f01c19b83a2b 100644 --- a/include/uapi/linux/netfilter/xt_connmark.h +++ b/include/uapi/linux/netfilter/xt_connmark.h @@ -4,7 +4,7 @@ #include <linux/types.h> -/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> +/* Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * * This program is free software; you can redistribute it and/or modify diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 9b14519e74d9..8300cc29dec8 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -86,6 +86,7 @@ enum ovs_datapath_attr { OVS_DP_ATTR_MEGAFLOW_STATS, /* struct ovs_dp_megaflow_stats */ OVS_DP_ATTR_USER_FEATURES, /* OVS_DP_F_* */ OVS_DP_ATTR_PAD, + OVS_DP_ATTR_MASKS_CACHE_SIZE, __OVS_DP_ATTR_MAX }; @@ -102,8 +103,8 @@ struct ovs_dp_megaflow_stats { __u64 n_mask_hit; /* Number of masks used for flow lookups. */ __u32 n_masks; /* Number of masks for the datapath. */ __u32 pad0; /* Pad for future expension. */ + __u64 n_cache_hit; /* Number of cache matches for flow lookups. */ __u64 pad1; /* Pad for future expension. */ - __u64 pad2; /* Pad for future expension. */ }; struct ovs_vport_stats { diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h index 09fb608a35ec..eb815e0d0ac3 100644 --- a/include/uapi/linux/seg6_iptunnel.h +++ b/include/uapi/linux/seg6_iptunnel.h @@ -37,25 +37,4 @@ enum { SEG6_IPTUN_MODE_L2ENCAP, }; -#ifdef __KERNEL__ - -static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) -{ - int head = 0; - - switch (tuninfo->mode) { - case SEG6_IPTUN_MODE_INLINE: - break; - case SEG6_IPTUN_MODE_ENCAP: - head = sizeof(struct ipv6hdr); - break; - case SEG6_IPTUN_MODE_L2ENCAP: - return 0; - } - - return ((tuninfo->srh->hdrlen + 1) << 3) + head; -} - -#endif - #endif diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index 550c6ca007cc..9c1241292d1d 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -229,6 +229,8 @@ int __init atalk_proc_init(void) sizeof(struct aarp_iter_state), NULL)) goto out; + return 0; + out: remove_proc_subtree("atalk", init_net.proc_net); return -ENOMEM; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 8c7b78f8bc23..9a2fb4aa1a10 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -36,6 +36,8 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) const unsigned char *dest; u16 vid = 0; + memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); + rcu_read_lock(); nf_ops = rcu_dereference(nf_br_ops); if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) { diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index d35173e803d3..fe66932f5abb 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1852,7 +1852,7 @@ static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt, size_kern = match_size; module_put(match->me); break; - case EBT_COMPAT_WATCHER: /* fallthrough */ + case EBT_COMPAT_WATCHER: case EBT_COMPAT_TARGET: wt = xt_request_find_target(NFPROTO_BRIDGE, name, mwt->u.revision); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4e2edfbe0e19..2828f6d5ba89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3758,7 +3758,6 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, int err = -ENOMEM; int i = 0; int pos; - int dummy; if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) && (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) { @@ -3780,7 +3779,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, } __skb_push(head_skb, doffset); - proto = skb_network_protocol(head_skb, &dummy); + proto = skb_network_protocol(head_skb, NULL); if (unlikely(!proto)) return ERR_PTR(-EINVAL); @@ -4413,7 +4412,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) * at the moment even if they are anonymous). */ if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && - __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) + !__pskb_pull_tail(skb, __skb_pagelen(skb))) return -ENOMEM; /* Easy case. Most of packets will go this way. */ diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index dc705769acc9..26a9193df783 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -6,7 +6,7 @@ * * DECnet Routing Message Grabulator * - * (C) 2000 ChyGwyn Limited - http://www.chygwyn.com/ + * (C) 2000 ChyGwyn Limited - https://www.chygwyn.com/ * * Author: Steven Whitehouse <steve@chygwyn.com> */ diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c index 317a93129551..7044a2853886 100644 --- a/net/ethtool/linkmodes.c +++ b/net/ethtool/linkmodes.c @@ -421,8 +421,7 @@ int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info) ret = __ethtool_get_link_ksettings(dev, &ksettings); if (ret < 0) { - if (info) - GENL_SET_ERR_MSG(info, "failed to retrieve link settings"); + GENL_SET_ERR_MSG(info, "failed to retrieve link settings"); goto out_ops; } diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 2e6d1b7a7bc9..e0a246575887 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -15,12 +15,12 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, netdev_features_t features) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); + bool need_csum, need_recompute_csum, gso_partial; struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; - bool need_csum, gso_partial; if (!skb->encapsulation) goto out; @@ -41,6 +41,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb->protocol = skb->inner_protocol; need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); + need_recompute_csum = skb->csum_not_inet; skb->encap_hdr_csum = need_csum; features &= skb->dev->hw_enc_features; @@ -98,7 +99,15 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, } *(pcsum + 1) = 0; - *pcsum = gso_make_checksum(skb, 0); + if (need_recompute_csum && !skb_is_gso(skb)) { + __wsum csum; + + csum = skb_checksum(skb, gre_offset, + skb->len - gre_offset, 0); + *pcsum = csum_fold(csum); + } else { + *pcsum = gso_make_checksum(skb, 0); + } } while ((skb = skb->next)); out: return segs; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 739da25b0c23..184ea556f50e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2950,6 +2950,8 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag, u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) { + if (!delta) + delta = 1; seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ); ca_rtt_us = seq_rtt_us; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 840bfdb3d7bd..0acf6a9796ca 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -163,7 +163,7 @@ static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); static void addrconf_type_change(struct net_device *dev, unsigned long event); -static int addrconf_ifdown(struct net_device *dev, int how); +static int addrconf_ifdown(struct net_device *dev, bool unregister); static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, int plen, @@ -3630,7 +3630,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, * an L3 master device (e.g., VRF) */ if (info->upper_dev && netif_is_l3_master(info->upper_dev)) - addrconf_ifdown(dev, 0); + addrconf_ifdown(dev, false); } return NOTIFY_OK; @@ -3663,9 +3663,9 @@ static bool addr_is_local(const struct in6_addr *addr) (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); } -static int addrconf_ifdown(struct net_device *dev, int how) +static int addrconf_ifdown(struct net_device *dev, bool unregister) { - unsigned long event = how ? NETDEV_UNREGISTER : NETDEV_DOWN; + unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN; struct net *net = dev_net(dev); struct inet6_dev *idev; struct inet6_ifaddr *ifa, *tmp; @@ -3684,7 +3684,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) * Step 1: remove reference to ipv6 device from parent device. * Do not dev_put! */ - if (how) { + if (unregister) { idev->dead = 1; /* protected by rtnl_lock */ @@ -3698,7 +3698,7 @@ static int addrconf_ifdown(struct net_device *dev, int how) /* combine the user config with event to determine if permanent * addresses are to be removed from address hash table */ - if (!how && !idev->cnf.disable_ipv6) { + if (!unregister && !idev->cnf.disable_ipv6) { /* aggregate the system setting and interface setting */ int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down; @@ -3736,7 +3736,7 @@ restart: addrconf_del_rs_timer(idev); /* Step 2: clear flags for stateless addrconf */ - if (!how) + if (!unregister) idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); /* Step 3: clear tempaddr list */ @@ -3806,7 +3806,7 @@ restart: write_unlock_bh(&idev->lock); /* Step 5: Discard anycast and multicast list */ - if (how) { + if (unregister) { ipv6_ac_destroy_dev(idev); ipv6_mc_destroy_dev(idev); } else { @@ -3816,7 +3816,7 @@ restart: idev->tstamp = jiffies; /* Last: Shot the device (if unregistered) */ - if (how) { + if (unregister) { addrconf_sysctl_unregister(idev); neigh_parms_release(&nd_tbl, idev->nd_parms); neigh_ifdown(&nd_tbl, dev); @@ -4038,7 +4038,7 @@ static void addrconf_dad_work(struct work_struct *w) in6_ifa_hold(ifp); addrconf_dad_stop(ifp, 1); if (disable_ipv6) - addrconf_ifdown(idev->dev, 0); + addrconf_ifdown(idev->dev, false); goto out; } @@ -7187,9 +7187,9 @@ void addrconf_cleanup(void) for_each_netdev(&init_net, dev) { if (__in6_dev_get(dev) == NULL) continue; - addrconf_ifdown(dev, 1); + addrconf_ifdown(dev, true); } - addrconf_ifdown(init_net.loopback_dev, 2); + addrconf_ifdown(init_net.loopback_dev, true); /* * Check hash table. diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c index 4e15a14435e4..70da2f2ce064 100644 --- a/net/ipv6/netfilter/ip6t_ah.c +++ b/net/ipv6/netfilter/ip6t_ah.c @@ -74,8 +74,7 @@ static bool ah_mt6(const struct sk_buff *skb, struct xt_action_param *par) ahinfo->hdrres, ah->reserved, !(ahinfo->hdrres && ah->reserved)); - return (ah != NULL) && - spi_match(ahinfo->spis[0], ahinfo->spis[1], + return spi_match(ahinfo->spis[0], ahinfo->spis[1], ntohl(ah->spi), !!(ahinfo->invflags & IP6T_AH_INV_SPI)) && (!ahinfo->hdrlen || diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c index fb91eeee4a1e..3aad6439386b 100644 --- a/net/ipv6/netfilter/ip6t_frag.c +++ b/net/ipv6/netfilter/ip6t_frag.c @@ -85,8 +85,7 @@ frag_mt6(const struct sk_buff *skb, struct xt_action_param *par) !((fraginfo->flags & IP6T_FRAG_NMF) && (ntohs(fh->frag_off) & IP6_MF))); - return (fh != NULL) && - id_match(fraginfo->ids[0], fraginfo->ids[1], + return id_match(fraginfo->ids[0], fraginfo->ids[1], ntohl(fh->identification), !!(fraginfo->invflags & IP6T_FRAG_INV_IDS)) && !((fraginfo->flags & IP6T_FRAG_RES) && diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c index 467b2a86031b..e7a3fb9355ee 100644 --- a/net/ipv6/netfilter/ip6t_hbh.c +++ b/net/ipv6/netfilter/ip6t_hbh.c @@ -86,8 +86,7 @@ hbh_mt6(const struct sk_buff *skb, struct xt_action_param *par) ((optinfo->hdrlen == hdrlen) ^ !!(optinfo->invflags & IP6T_OPTS_INV_LEN)))); - ret = (oh != NULL) && - (!(optinfo->flags & IP6T_OPTS_LEN) || + ret = (!(optinfo->flags & IP6T_OPTS_LEN) || ((optinfo->hdrlen == hdrlen) ^ !!(optinfo->invflags & IP6T_OPTS_INV_LEN))); diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c index f633dc84ca3f..733c83d38b30 100644 --- a/net/ipv6/netfilter/ip6t_rt.c +++ b/net/ipv6/netfilter/ip6t_rt.c @@ -89,8 +89,7 @@ static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) !((rtinfo->flags & IP6T_RT_RES) && (((const struct rt0_hdr *)rh)->reserved))); - ret = (rh != NULL) && - (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], + ret = (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], rh->segments_left, !!(rtinfo->invflags & IP6T_RT_INV_SGS))) && (!(rtinfo->flags & IP6T_RT_LEN) || diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index e0e9f48ab14f..897fa59c47de 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -27,6 +27,23 @@ #include <net/seg6_hmac.h> #endif +static size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo) +{ + int head = 0; + + switch (tuninfo->mode) { + case SEG6_IPTUN_MODE_INLINE: + break; + case SEG6_IPTUN_MODE_ENCAP: + head = sizeof(struct ipv6hdr); + break; + case SEG6_IPTUN_MODE_L2ENCAP: + return 0; + } + + return ((tuninfo->srh->hdrlen + 1) << 3) + head; +} + struct seg6_lwt { struct dst_cache cache; struct seg6_iptunnel_encap tuninfo[]; diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 0ffe2b8723c4..25313c29d799 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -447,7 +447,7 @@ config NF_TABLES replace the existing {ip,ip6,arp,eb}_tables infrastructure. It provides a pseudo-state machine with an extensible instruction-set (also known as expressions) that the userspace 'nft' utility - (http://www.netfilter.org/projects/nftables) uses to build the + (https://www.netfilter.org/projects/nftables) uses to build the rule-set. It also comes with the generic set infrastructure that allows you to construct mappings between matchings and actions for performance lookups. diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 56621d6bfd29..920b7c4331f0 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -1644,7 +1644,7 @@ dump_last: goto next_set; if (set->variant->uref) set->variant->uref(set, cb, true); - /* fall through */ + fallthrough; default: ret = set->variant->list(set, skb, cb); if (!cb->args[IPSET_CB_ARG0]) diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index b3921ae92740..a90b8eac16ac 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1389,6 +1389,45 @@ flush_again: goto flush_again; } } + +#ifdef CONFIG_SYSCTL +void ip_vs_expire_nodest_conn_flush(struct netns_ipvs *ipvs) +{ + int idx; + struct ip_vs_conn *cp, *cp_c; + struct ip_vs_dest *dest; + + rcu_read_lock(); + for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { + hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) { + if (cp->ipvs != ipvs) + continue; + + dest = cp->dest; + if (!dest || (dest->flags & IP_VS_DEST_F_AVAILABLE)) + continue; + + if (atomic_read(&cp->n_control)) + continue; + + cp_c = cp->control; + IP_VS_DBG(4, "del connection\n"); + ip_vs_conn_del(cp); + if (cp_c && !atomic_read(&cp_c->n_control)) { + IP_VS_DBG(4, "del controlling connection\n"); + ip_vs_conn_del(cp_c); + } + } + cond_resched_rcu(); + + /* netns clean up started, abort delayed work */ + if (!ipvs->enable) + break; + } + rcu_read_unlock(); +} +#endif + /* * per netns init and exit */ diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index b4a6b7662f3f..e3668a6e54e4 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -694,16 +694,10 @@ static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) return ipvs->sysctl_nat_icmp_send; } -static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) -{ - return ipvs->sysctl_expire_nodest_conn; -} - #else static int sysctl_snat_reroute(struct netns_ipvs *ipvs) { return 0; } static int sysctl_nat_icmp_send(struct netns_ipvs *ipvs) { return 0; } -static int sysctl_expire_nodest_conn(struct netns_ipvs *ipvs) { return 0; } #endif @@ -2097,36 +2091,35 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int } } - if (unlikely(!cp)) { - int v; - - if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph)) - return v; - } - - IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet"); - /* Check the server status */ - if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { + if (cp && cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) { /* the destination server is not available */ + if (sysctl_expire_nodest_conn(ipvs)) { + bool old_ct = ip_vs_conn_uses_old_conntrack(cp, skb); - __u32 flags = cp->flags; - - /* when timer already started, silently drop the packet.*/ - if (timer_pending(&cp->timer)) - __ip_vs_conn_put(cp); - else - ip_vs_conn_put(cp); + if (!old_ct) + cp->flags &= ~IP_VS_CONN_F_NFCT; - if (sysctl_expire_nodest_conn(ipvs) && - !(flags & IP_VS_CONN_F_ONE_PACKET)) { - /* try to expire the connection immediately */ ip_vs_conn_expire_now(cp); + __ip_vs_conn_put(cp); + if (old_ct) + return NF_DROP; + cp = NULL; + } else { + __ip_vs_conn_put(cp); + return NF_DROP; } + } - return NF_DROP; + if (unlikely(!cp)) { + int v; + + if (!ip_vs_try_to_schedule(ipvs, af, skb, pd, &v, &cp, &iph)) + return v; } + IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet"); + ip_vs_in_stats(cp, skb); ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); if (cp->packet_xmit) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index bcac316addab..678c5b14841c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -210,6 +210,17 @@ static void update_defense_level(struct netns_ipvs *ipvs) local_bh_enable(); } +/* Handler for delayed work for expiring no + * destination connections + */ +static void expire_nodest_conn_handler(struct work_struct *work) +{ + struct netns_ipvs *ipvs; + + ipvs = container_of(work, struct netns_ipvs, + expire_nodest_conn_work.work); + ip_vs_expire_nodest_conn_flush(ipvs); +} /* * Timer for checking the defense @@ -1164,6 +1175,12 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest, list_add(&dest->t_list, &ipvs->dest_trash); dest->idle_start = 0; spin_unlock_bh(&ipvs->dest_trash_lock); + + /* Queue up delayed work to expire all no destination connections. + * No-op when CONFIG_SYSCTL is disabled. + */ + if (!cleanup) + ip_vs_enqueue_expire_nodest_conns(ipvs); } @@ -4086,6 +4103,10 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) queue_delayed_work(system_long_wq, &ipvs->defense_work, DEFENSE_TIMER_PERIOD); + /* Init delayed work for expiring no dest conn */ + INIT_DELAYED_WORK(&ipvs->expire_nodest_conn_work, + expire_nodest_conn_handler); + return 0; } @@ -4093,6 +4114,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs) { struct net *net = ipvs->net; + cancel_delayed_work_sync(&ipvs->expire_nodest_conn_work); cancel_delayed_work_sync(&ipvs->defense_work); cancel_work_sync(&ipvs->defense_work.work); unregister_net_sysctl_table(ipvs->sysctl_hdr); diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index 573cb4481481..e697a824b001 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c @@ -257,15 +257,15 @@ static unsigned int get_uint(struct bitstr *bs, int b) case 4: v |= *bs->cur++; v <<= 8; - /* fall through */ + fallthrough; case 3: v |= *bs->cur++; v <<= 8; - /* fall through */ + fallthrough; case 2: v |= *bs->cur++; v <<= 8; - /* fall through */ + fallthrough; case 1: v |= *bs->cur++; break; diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index a0560d175a7f..95f79980348c 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -610,7 +610,7 @@ void nf_ct_netns_put(struct net *net, uint8_t nfproto) switch (nfproto) { case NFPROTO_BRIDGE: nf_ct_netns_do_put(net, NFPROTO_BRIDGE); - /* fall through */ + fallthrough; case NFPROTO_INET: nf_ct_netns_do_put(net, NFPROTO_IPV4); nf_ct_netns_do_put(net, NFPROTO_IPV6); diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 1926fd56df56..6892e497781c 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -900,7 +900,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct, return -NF_REPEAT; return NF_DROP; } - /* Fall through */ + fallthrough; case TCP_CONNTRACK_IGNORE: /* Ignored packets: * diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6a26299cb064..a604f43e3e6b 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -60,7 +60,7 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, ntohs(tuple->src.u.tcp.port), ntohs(tuple->dst.u.tcp.port)); break; - case IPPROTO_UDPLITE: /* fallthrough */ + case IPPROTO_UDPLITE: case IPPROTO_UDP: seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.udp.port), diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index bfc555fcbc72..ea923f8cf9c4 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -408,7 +408,7 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, static const unsigned int max_attempts = 128; switch (tuple->dst.protonum) { - case IPPROTO_ICMP: /* fallthrough */ + case IPPROTO_ICMP: case IPPROTO_ICMPV6: /* id is same for either direction... */ keyptr = &tuple->src.u.icmp.id; @@ -442,11 +442,11 @@ static void nf_nat_l4proto_unique_tuple(struct nf_conntrack_tuple *tuple, } goto find_free_id; #endif - case IPPROTO_UDP: /* fallthrough */ - case IPPROTO_UDPLITE: /* fallthrough */ - case IPPROTO_TCP: /* fallthrough */ - case IPPROTO_SCTP: /* fallthrough */ - case IPPROTO_DCCP: /* fallthrough */ + case IPPROTO_UDP: + case IPPROTO_UDPLITE: + case IPPROTO_TCP: + case IPPROTO_SCTP: + case IPPROTO_DCCP: if (maniptype == NF_NAT_MANIP_SRC) keyptr = &tuple->src.u.all; else diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index ebcdc8e54476..9cca35d22927 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -704,8 +704,7 @@ ipv4_synproxy_hook(void *priv, struct sk_buff *skb, nf_ct_seqadj_init(ct, ctinfo, 0); synproxy->tsoff = 0; this_cpu_inc(snet->stats->conn_reopened); - - /* fall through */ + fallthrough; case TCP_CONNTRACK_SYN_SENT: if (!synproxy_parse_options(skb, thoff, th, &opts)) return NF_DROP; @@ -1128,8 +1127,7 @@ ipv6_synproxy_hook(void *priv, struct sk_buff *skb, nf_ct_seqadj_init(ct, ctinfo, 0); synproxy->tsoff = 0; this_cpu_inc(snet->stats->conn_reopened); - - /* fall through */ + fallthrough; case TCP_CONNTRACK_SYN_SENT: if (!synproxy_parse_options(skb, thoff, th, &opts)) return NF_DROP; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index de70a7c4e769..dc0e4f5cb975 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2084,7 +2084,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, if (nla[NFTA_CHAIN_HOOK]) { if (!nft_is_base_chain(chain)) - return -EBUSY; + return -EEXIST; err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family, false); @@ -2094,21 +2094,21 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, basechain = nft_base_chain(chain); if (basechain->type != hook.type) { nft_chain_release_hook(&hook); - return -EBUSY; + return -EEXIST; } if (ctx->family == NFPROTO_NETDEV) { if (!nft_hook_list_equal(&basechain->hook_list, &hook.list)) { nft_chain_release_hook(&hook); - return -EBUSY; + return -EEXIST; } } else { ops = &basechain->ops; if (ops->hooknum != hook.num || ops->priority != hook.priority) { nft_chain_release_hook(&hook); - return -EBUSY; + return -EEXIST; } } nft_chain_release_hook(&hook); @@ -2496,6 +2496,7 @@ nla_put_failure: struct nft_expr_info { const struct nft_expr_ops *ops; + const struct nlattr *attr; struct nlattr *tb[NFT_EXPR_MAXATTR + 1]; }; @@ -2543,7 +2544,9 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, } else ops = type->ops; + info->attr = nla; info->ops = ops; + return 0; err1: @@ -3201,8 +3204,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, expr = nft_expr_first(rule); for (i = 0; i < n; i++) { err = nf_tables_newexpr(&ctx, &info[i], expr); - if (err < 0) + if (err < 0) { + NL_SET_BAD_ATTR(extack, info[i].attr); goto err2; + } if (info[i].ops->validate) nft_validate_state_update(net, NFT_VALIDATE_NEED); @@ -4362,7 +4367,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: set->use--; - /* fall through */ + fallthrough; default: nf_tables_unbind_set(ctx, set, binding, phase == NFT_TRANS_COMMIT); @@ -5244,10 +5249,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^ nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) || nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^ - nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) { - err = -EBUSY; + nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) goto err_element_clash; - } if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) && nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) && memcmp(nft_set_ext_data(ext), @@ -5255,7 +5258,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) && nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF) && *nft_set_ext_obj(ext) != *nft_set_ext_obj(ext2))) - err = -EBUSY; + goto err_element_clash; else if (!(nlmsg_flags & NLM_F_EXCL)) err = 0; } else if (err == -ENOTEMPTY) { @@ -6243,7 +6246,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx, case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: flowtable->use--; - /* fall through */ + fallthrough; default: return; } @@ -6405,7 +6408,7 @@ static int nft_register_flowtable_net_hooks(struct net *net, list_for_each_entry(hook2, &ft->hook_list, list) { if (hook->ops.dev == hook2->ops.dev && hook->ops.pf == hook2->ops.pf) { - err = -EBUSY; + err = -EEXIST; goto err_unregister_net_hooks; } } @@ -7249,7 +7252,7 @@ static int nf_tables_validate(struct net *net) break; case NFT_VALIDATE_NEED: nft_validate_state_update(net, NFT_VALIDATE_DO); - /* fall through */ + fallthrough; case NFT_VALIDATE_DO: list_for_each_entry(table, &net->nft.tables, list) { if (nft_table_validate(net, table) < 0) @@ -8323,7 +8326,7 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, default: return -EINVAL; } - /* fall through */ + fallthrough; case NFT_CONTINUE: case NFT_BREAK: case NFT_RETURN: diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 96c74c4c7176..587897a2498b 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -213,7 +213,7 @@ next_rule: jumpstack[stackptr].chain = chain; jumpstack[stackptr].rules = rules + 1; stackptr++; - /* fall through */ + fallthrough; case NFT_GOTO: nft_trace_packet(&info, chain, rule, NFT_TRACETYPE_RULE); diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 5827117f2635..5bfec829c12f 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org> - * (C) 2011 Intra2net AG <http://www.intra2net.com> + * (C) 2011 Intra2net AG <https://www.intra2net.com> */ #include <linux/init.h> #include <linux/module.h> diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index da915c224a82..89a381f7f945 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -451,7 +451,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl, case IPPROTO_TCP: timeouts = nf_tcp_pernet(net)->timeouts; break; - case IPPROTO_UDP: /* fallthrough */ + case IPPROTO_UDP: case IPPROTO_UDPLITE: timeouts = nf_udp_pernet(net)->timeouts; break; diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index 8a28c127effc..16f4d84599ac 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -43,7 +43,7 @@ void nft_cmp_eval(const struct nft_expr *expr, case NFT_CMP_LT: if (d == 0) goto mismatch; - /* fall through */ + fallthrough; case NFT_CMP_LTE: if (d > 0) goto mismatch; @@ -51,7 +51,7 @@ void nft_cmp_eval(const struct nft_expr *expr, case NFT_CMP_GT: if (d == 0) goto mismatch; - /* fall through */ + fallthrough; case NFT_CMP_GTE: if (d < 0) goto mismatch; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 77258af1fce0..322bd674963e 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -129,7 +129,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, return; } #endif - case NFT_CT_BYTES: /* fallthrough */ + case NFT_CT_BYTES: case NFT_CT_PKTS: { const struct nf_conn_acct *acct = nf_conn_acct_find(ct); u64 count = 0; @@ -1013,8 +1013,8 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx, help6 = nf_conntrack_helper_try_module_get(name, family, priv->l4proto); break; - case NFPROTO_NETDEV: /* fallthrough */ - case NFPROTO_BRIDGE: /* same */ + case NFPROTO_NETDEV: + case NFPROTO_BRIDGE: case NFPROTO_INET: help4 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV4, priv->l4proto); diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c index cfac0964f48d..4dfdaeaf09a5 100644 --- a/net/netfilter/nft_fib.c +++ b/net/netfilter/nft_fib.c @@ -32,7 +32,7 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, unsigned int hooks; switch (priv->result) { - case NFT_FIB_RESULT_OIF: /* fallthrough */ + case NFT_FIB_RESULT_OIF: case NFT_FIB_RESULT_OIFNAME: hooks = (1 << NF_INET_PRE_ROUTING); break; diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 9e556638bb32..c63eb3b17178 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -103,9 +103,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, { const struct nft_immediate_expr *priv = nft_expr_priv(expr); const struct nft_data *data = &priv->data; + struct nft_rule *rule, *n; struct nft_ctx chain_ctx; struct nft_chain *chain; - struct nft_rule *rule; if (priv->dreg != NFT_REG_VERDICT) return; @@ -121,7 +121,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, chain_ctx = *ctx; chain_ctx.chain = chain; - list_for_each_entry(rule, &chain->rules, list) + list_for_each_entry_safe(rule, n, &chain->rules, list) nf_tables_rule_release(&chain_ctx, rule); nf_tables_chain_destroy(&chain_ctx); diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index a7de3a58f553..ed7cb9f747f6 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -467,7 +467,7 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt, case IPPROTO_UDP: if (!nft_payload_udp_checksum(skb, pkt->xt.thoff)) return -1; - /* Fall through. */ + fallthrough; case IPPROTO_UDPLITE: *l4csum_offset = offsetof(struct udphdr, check); break; diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index cc6082a5f7ad..9944523f5c2c 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -312,7 +312,7 @@ * Jay Ligatti, Josh Kuhn, and Chris Gage. * Proceedings of the IEEE International Conference on Computer * Communication Networks (ICCCN), August 2010. - * http://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf + * https://www.cse.usf.edu/~ligatti/papers/grouper-conf.pdf * * [Rottenstreich 2010] * Worst-Case TCAM Rule Expansion @@ -325,7 +325,7 @@ * Kirill Kogan, Sergey Nikolenko, Ori Rottenstreich, William Culhane, * and Patrick Eugster. * Proceedings of the 2014 ACM conference on SIGCOMM, August 2014. - * http://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf + * https://www.sigcomm.org/sites/default/files/ccr/papers/2014/August/2619239-2626294.pdf */ #include <linux/kernel.h> diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c index 51b454d8fa9c..cedf47ab3c6f 100644 --- a/net/netfilter/utils.c +++ b/net/netfilter/utils.c @@ -25,7 +25,7 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, skb->ip_summed = CHECKSUM_UNNECESSARY; break; } - /* fall through */ + fallthrough; case CHECKSUM_NONE: if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) skb->csum = 0; @@ -51,7 +51,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip_checksum(skb, hook, dataoff, protocol); - /* fall through */ + fallthrough; case CHECKSUM_NONE: skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, skb->len - dataoff, 0); @@ -79,7 +79,7 @@ __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, skb->ip_summed = CHECKSUM_UNNECESSARY; break; } - /* fall through */ + fallthrough; case CHECKSUM_NONE: skb->csum = ~csum_unfold( csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, @@ -106,7 +106,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip6_checksum(skb, hook, dataoff, protocol); - /* fall through */ + fallthrough; case CHECKSUM_NONE: hsum = skb_checksum(skb, 0, dataoff, 0); skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 91bf6635ea9e..73d343abd115 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1571,7 +1571,7 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, trav->curr = trav->curr->next; if (trav->curr != trav->head) break; - /* fall through */ + fallthrough; default: return NULL; } diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index a5c8b653476a..76acecf3e757 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c @@ -6,7 +6,7 @@ * with the SECMARK target and state match. * * Based somewhat on CONNMARK: - * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> + * Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * * (C) 2006,2008 Red Hat, Inc., James Morris <jmorris@redhat.com> diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index eec2f3a88d73..e5ebc0810675 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -2,7 +2,7 @@ /* * xt_connmark - Netfilter module to operate on connection marks * - * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com> + * Copyright (C) 2002,2004 MARA Systems AB <https://www.marasystems.com> * by Henrik Nordstrom <hno@marasystems.com> * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * Jan Engelhardt <jengelh@medozas.de> diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c index 5aab6df74e0f..a97c2259bbc8 100644 --- a/net/netfilter/xt_nfacct.c +++ b/net/netfilter/xt_nfacct.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * (C) 2011 Pablo Neira Ayuso <pablo@netfilter.org> - * (C) 2011 Intra2net AG <http://www.intra2net.com> + * (C) 2011 Intra2net AG <https://www.intra2net.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index 67cb98489415..6aa12d0f54e2 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c @@ -5,7 +5,7 @@ * based on ipt_time by Fabrice MARIE <fabrice@netfilter.org> * This is a module which is used for time matching * It is using some modified code from dietlibc (localtime() function) - * that you can find at http://www.fefe.de/dietlibc/ + * that you can find at https://www.fefe.de/dietlibc/ * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from gnu.org/gpl. */ diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 4340f25fe390..98d393e70de3 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -276,10 +276,6 @@ void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key) ovs_ct_update_key(skb, NULL, key, false, false); } -#define IN6_ADDR_INITIALIZER(ADDR) \ - { (ADDR).s6_addr32[0], (ADDR).s6_addr32[1], \ - (ADDR).s6_addr32[2], (ADDR).s6_addr32[3] } - int ovs_ct_put_key(const struct sw_flow_key *swkey, const struct sw_flow_key *output, struct sk_buff *skb) { @@ -301,24 +297,30 @@ int ovs_ct_put_key(const struct sw_flow_key *swkey, if (swkey->ct_orig_proto) { if (swkey->eth.type == htons(ETH_P_IP)) { - struct ovs_key_ct_tuple_ipv4 orig = { - output->ipv4.ct_orig.src, - output->ipv4.ct_orig.dst, - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv4 orig; + + memset(&orig, 0, sizeof(orig)); + orig.ipv4_src = output->ipv4.ct_orig.src; + orig.ipv4_dst = output->ipv4.ct_orig.dst; + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv4_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV4, sizeof(orig), &orig)) return -EMSGSIZE; } else if (swkey->eth.type == htons(ETH_P_IPV6)) { - struct ovs_key_ct_tuple_ipv6 orig = { - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.src), - IN6_ADDR_INITIALIZER(output->ipv6.ct_orig.dst), - output->ct.orig_tp.src, - output->ct.orig_tp.dst, - output->ct_orig_proto, - }; + struct ovs_key_ct_tuple_ipv6 orig; + + memset(&orig, 0, sizeof(orig)); + memcpy(orig.ipv6_src, output->ipv6.ct_orig.src.s6_addr32, + sizeof(orig.ipv6_src)); + memcpy(orig.ipv6_dst, output->ipv6.ct_orig.dst.s6_addr32, + sizeof(orig.ipv6_dst)); + orig.src_port = output->ct.orig_tp.src; + orig.dst_port = output->ct.orig_tp.dst; + orig.ipv6_proto = output->ct_orig_proto; + if (nla_put(skb, OVS_KEY_ATTR_CT_ORIG_TUPLE_IPV6, sizeof(orig), &orig)) return -EMSGSIZE; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 6b6822f82f70..42f8cc70bb2c 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -225,13 +225,14 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) struct dp_stats_percpu *stats; u64 *stats_counter; u32 n_mask_hit; + u32 n_cache_hit; int error; stats = this_cpu_ptr(dp->stats_percpu); /* Look up flow. */ flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb), - &n_mask_hit); + &n_mask_hit, &n_cache_hit); if (unlikely(!flow)) { struct dp_upcall_info upcall; @@ -262,6 +263,7 @@ out: u64_stats_update_begin(&stats->syncp); (*stats_counter)++; stats->n_mask_hit += n_mask_hit; + stats->n_cache_hit += n_cache_hit; u64_stats_update_end(&stats->syncp); } @@ -699,6 +701,7 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, stats->n_missed += local_stats.n_missed; stats->n_lost += local_stats.n_lost; mega_stats->n_mask_hit += local_stats.n_mask_hit; + mega_stats->n_cache_hit += local_stats.n_cache_hit; } } @@ -1495,6 +1498,7 @@ static size_t ovs_dp_cmd_msg_size(void) msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats)); msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats)); msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */ + msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_MASKS_CACHE_SIZE */ return msgsize; } @@ -1532,6 +1536,10 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features)) goto nla_put_failure; + if (nla_put_u32(skb, OVS_DP_ATTR_MASKS_CACHE_SIZE, + ovs_flow_tbl_masks_cache_size(&dp->table))) + goto nla_put_failure; + genlmsg_end(skb, ovs_header); return 0; @@ -1596,6 +1604,16 @@ static int ovs_dp_change(struct datapath *dp, struct nlattr *a[]) #endif } + if (a[OVS_DP_ATTR_MASKS_CACHE_SIZE]) { + int err; + u32 cache_size; + + cache_size = nla_get_u32(a[OVS_DP_ATTR_MASKS_CACHE_SIZE]); + err = ovs_flow_tbl_masks_cache_resize(&dp->table, cache_size); + if (err) + return err; + } + dp->user_features = user_features; if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) @@ -1884,6 +1902,8 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = { [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 }, [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 }, + [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0, + PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)), }; static const struct genl_ops dp_datapath_genl_ops[] = { diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 24fcec22fde2..38f7d3e66ca6 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -38,12 +38,15 @@ * @n_mask_hit: Number of masks looked up for flow match. * @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked * up per packet. + * @n_cache_hit: The number of received packets that had their mask found using + * the mask cache. */ struct dp_stats_percpu { u64 n_hit; u64 n_missed; u64 n_lost; u64 n_mask_hit; + u64 n_cache_hit; struct u64_stats_sync syncp; }; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 9d375e74b607..03942c30d83e 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -890,6 +890,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, if (static_branch_unlikely(&tc_recirc_sharing_support)) { tc_ext = skb_ext_find(skb, TC_SKB_EXT); key->recirc_id = tc_ext ? tc_ext->chain : 0; + OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0; } else { key->recirc_id = 0; } diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index af22c9ee28dd..6527d84c3ea6 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -38,8 +38,8 @@ #define MASK_ARRAY_SIZE_MIN 16 #define REHASH_INTERVAL (10 * 60 * HZ) +#define MC_DEFAULT_HASH_ENTRIES 256 #define MC_HASH_SHIFT 8 -#define MC_HASH_ENTRIES (1u << MC_HASH_SHIFT) #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT) static struct kmem_cache *flow_cache; @@ -341,15 +341,79 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) } } +static void __mask_cache_destroy(struct mask_cache *mc) +{ + free_percpu(mc->mask_cache); + kfree(mc); +} + +static void mask_cache_rcu_cb(struct rcu_head *rcu) +{ + struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu); + + __mask_cache_destroy(mc); +} + +static struct mask_cache *tbl_mask_cache_alloc(u32 size) +{ + struct mask_cache_entry __percpu *cache = NULL; + struct mask_cache *new; + + /* Only allow size to be 0, or a power of 2, and does not exceed + * percpu allocation size. + */ + if ((!is_power_of_2(size) && size != 0) || + (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) + return NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + new->cache_size = size; + if (new->cache_size > 0) { + cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry), + new->cache_size), + __alignof__(struct mask_cache_entry)); + if (!cache) { + kfree(new); + return NULL; + } + } + + new->mask_cache = cache; + return new; +} +int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size) +{ + struct mask_cache *mc = rcu_dereference(table->mask_cache); + struct mask_cache *new; + + if (size == mc->cache_size) + return 0; + + if ((!is_power_of_2(size) && size != 0) || + (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE) + return -EINVAL; + + new = tbl_mask_cache_alloc(size); + if (!new) + return -ENOMEM; + + rcu_assign_pointer(table->mask_cache, new); + call_rcu(&mc->rcu, mask_cache_rcu_cb); + + return 0; +} + int ovs_flow_tbl_init(struct flow_table *table) { struct table_instance *ti, *ufid_ti; + struct mask_cache *mc; struct mask_array *ma; - table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) * - MC_HASH_ENTRIES, - __alignof__(struct mask_cache_entry)); - if (!table->mask_cache) + mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES); + if (!mc) return -ENOMEM; ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN); @@ -367,6 +431,7 @@ int ovs_flow_tbl_init(struct flow_table *table) rcu_assign_pointer(table->ti, ti); rcu_assign_pointer(table->ufid_ti, ufid_ti); rcu_assign_pointer(table->mask_array, ma); + rcu_assign_pointer(table->mask_cache, mc); table->last_rehash = jiffies; table->count = 0; table->ufid_count = 0; @@ -377,7 +442,7 @@ free_ti: free_mask_array: __mask_array_destroy(ma); free_mask_cache: - free_percpu(table->mask_cache); + __mask_cache_destroy(mc); return -ENOMEM; } @@ -453,9 +518,11 @@ void ovs_flow_tbl_destroy(struct flow_table *table) { struct table_instance *ti = rcu_dereference_raw(table->ti); struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); + struct mask_cache *mc = rcu_dereference(table->mask_cache); + struct mask_array *ma = rcu_dereference_ovsl(table->mask_array); - free_percpu(table->mask_cache); - call_rcu(&table->mask_array->rcu, mask_array_rcu_cb); + call_rcu(&mc->rcu, mask_cache_rcu_cb); + call_rcu(&ma->rcu, mask_array_rcu_cb); table_instance_destroy(table, ti, ufid_ti, false); } @@ -667,6 +734,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, struct mask_array *ma, const struct sw_flow_key *key, u32 *n_mask_hit, + u32 *n_cache_hit, u32 *index) { u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr); @@ -682,6 +750,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, u64_stats_update_begin(&ma->syncp); usage_counters[*index]++; u64_stats_update_end(&ma->syncp); + (*n_cache_hit)++; return flow; } } @@ -719,8 +788,10 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl, struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, const struct sw_flow_key *key, u32 skb_hash, - u32 *n_mask_hit) + u32 *n_mask_hit, + u32 *n_cache_hit) { + struct mask_cache *mc = rcu_dereference(tbl->mask_cache); struct mask_array *ma = rcu_dereference(tbl->mask_array); struct table_instance *ti = rcu_dereference(tbl->ti); struct mask_cache_entry *entries, *ce; @@ -729,10 +800,13 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, int seg; *n_mask_hit = 0; - if (unlikely(!skb_hash)) { + *n_cache_hit = 0; + if (unlikely(!skb_hash || mc->cache_size == 0)) { u32 mask_index = 0; + u32 cache = 0; - return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index); + return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache, + &mask_index); } /* Pre and post recirulation flows usually have the same skb_hash @@ -743,17 +817,17 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, ce = NULL; hash = skb_hash; - entries = this_cpu_ptr(tbl->mask_cache); + entries = this_cpu_ptr(mc->mask_cache); /* Find the cache entry 'ce' to operate on. */ for (seg = 0; seg < MC_HASH_SEGS; seg++) { - int index = hash & (MC_HASH_ENTRIES - 1); + int index = hash & (mc->cache_size - 1); struct mask_cache_entry *e; e = &entries[index]; if (e->skb_hash == skb_hash) { flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, - &e->mask_index); + n_cache_hit, &e->mask_index); if (!flow) e->skb_hash = 0; return flow; @@ -766,10 +840,12 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl, } /* Cache miss, do full lookup. */ - flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, &ce->mask_index); + flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit, + &ce->mask_index); if (flow) ce->skb_hash = skb_hash; + *n_cache_hit = 0; return flow; } @@ -779,9 +855,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array); u32 __always_unused n_mask_hit; + u32 __always_unused n_cache_hit; u32 index = 0; - return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index); + return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index); } struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, @@ -858,6 +935,13 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table) return READ_ONCE(ma->count); } +u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table) +{ + struct mask_cache *mc = rcu_dereference(table->mask_cache); + + return READ_ONCE(mc->cache_size); +} + static struct table_instance *table_instance_expand(struct table_instance *ti, bool ufid) { @@ -1086,8 +1170,8 @@ void ovs_flow_masks_rebalance(struct flow_table *table) for (i = 0; i < masks_entries; i++) { int index = masks_and_count[i].index; - new->masks[new->count++] = - rcu_dereference_ovsl(ma->masks[index]); + if (ovsl_dereference(ma->masks[index])) + new->masks[new->count++] = ma->masks[index]; } rcu_assign_pointer(table->mask_array, new); diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 1f664b050e3b..74ce48fecba9 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -27,6 +27,12 @@ struct mask_cache_entry { u32 mask_index; }; +struct mask_cache { + struct rcu_head rcu; + u32 cache_size; /* Must be ^2 value. */ + struct mask_cache_entry __percpu *mask_cache; +}; + struct mask_count { int index; u64 counter; @@ -53,7 +59,7 @@ struct table_instance { struct flow_table { struct table_instance __rcu *ti; struct table_instance __rcu *ufid_ti; - struct mask_cache_entry __percpu *mask_cache; + struct mask_cache __rcu *mask_cache; struct mask_array __rcu *mask_array; unsigned long last_rehash; unsigned int count; @@ -77,12 +83,15 @@ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, const struct sw_flow_mask *mask); void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); int ovs_flow_tbl_num_masks(const struct flow_table *table); +u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table); +int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size); struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, u32 *bucket, u32 *idx); struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, const struct sw_flow_key *, u32 skb_hash, - u32 *n_mask_hit); + u32 *n_mask_hit, + u32 *n_cache_hit); struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, const struct sw_flow_key *); struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 97e27946897f..e6ad42b11835 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -706,8 +706,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, if (err && err != -EINPROGRESS) goto out_free; - if (!err) + if (!err) { *defrag = true; + cb.mru = IPCB(skb)->frag_max_size; + } } else { /* NFPROTO_IPV6 */ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; @@ -717,8 +719,10 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb, if (err && err != -EINPROGRESS) goto out_free; - if (!err) + if (!err) { *defrag = true; + cb.mru = IP6CB(skb)->frag_max_size; + } #else err = -EOPNOTSUPP; goto out_free; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0b8623b3b24f..41a55c6cbeb8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1629,6 +1629,7 @@ int tcf_classify_ingress(struct sk_buff *skb, if (WARN_ON_ONCE(!ext)) return TC_ACT_SHOT; ext->chain = last_executed_chain; + ext->mru = qdisc_skb_cb(skb)->mru; } return ret; diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index 8b0bb600602d..c68019697cfe 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -62,12 +62,10 @@ static int tipc_eth_raw2addr(struct tipc_bearer *b, struct tipc_media_addr *addr, char *msg) { - char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; - memset(addr, 0, sizeof(*addr)); ether_addr_copy(addr->value, msg); addr->media_id = TIPC_MEDIA_TYPE_ETH; - addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN); + addr->broadcast = is_broadcast_ether_addr(addr->value); return 0; } |