diff options
29 files changed, 313 insertions, 116 deletions
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index 8edaa339d590..39b0b5277b11 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -343,21 +343,19 @@ static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev) of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio"); } -static int tcan4x5x_get_gpios(struct m_can_classdev *cdev, - const struct tcan4x5x_version_info *version_info) +static int tcan4x5x_get_gpios(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); int ret; - if (version_info->has_wake_pin) { - tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", - GPIOD_OUT_HIGH); - if (IS_ERR(tcan4x5x->device_wake_gpio)) { - if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) - return -EPROBE_DEFER; + tcan4x5x->device_wake_gpio = devm_gpiod_get_optional(cdev->dev, + "device-wake", + GPIOD_OUT_HIGH); + if (IS_ERR(tcan4x5x->device_wake_gpio)) { + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) + return -EPROBE_DEFER; - tcan4x5x_disable_wake(cdev); - } + tcan4x5x->device_wake_gpio = NULL; } tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", @@ -369,14 +367,31 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev, if (ret) return ret; - if (version_info->has_state_pin) { - tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, - "device-state", - GPIOD_IN); - if (IS_ERR(tcan4x5x->device_state_gpio)) { - tcan4x5x->device_state_gpio = NULL; - tcan4x5x_disable_state(cdev); - } + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, + "device-state", + GPIOD_IN); + if (IS_ERR(tcan4x5x->device_state_gpio)) + tcan4x5x->device_state_gpio = NULL; + + return 0; +} + +static int tcan4x5x_check_gpios(struct m_can_classdev *cdev, + const struct tcan4x5x_version_info *version_info) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + int ret; + + if (version_info->has_wake_pin && !tcan4x5x->device_wake_gpio) { + ret = tcan4x5x_disable_wake(cdev); + if (ret) + return ret; + } + + if (version_info->has_state_pin && !tcan4x5x->device_state_gpio) { + ret = tcan4x5x_disable_state(cdev); + if (ret) + return ret; } return 0; @@ -468,15 +483,21 @@ static int tcan4x5x_can_probe(struct spi_device *spi) goto out_m_can_class_free_dev; } + ret = tcan4x5x_get_gpios(mcan_class); + if (ret) { + dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret)); + goto out_power; + } + version_info = tcan4x5x_find_version(priv); if (IS_ERR(version_info)) { ret = PTR_ERR(version_info); goto out_power; } - ret = tcan4x5x_get_gpios(mcan_class, version_info); + ret = tcan4x5x_check_gpios(mcan_class, version_info); if (ret) { - dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret)); + dev_err(&spi->dev, "Checking gpios failed %pe\n", ERR_PTR(ret)); goto out_power; } diff --git a/drivers/net/ethernet/airoha/airoha_npu.c b/drivers/net/ethernet/airoha/airoha_npu.c index 0e5b8c21b9aa..1e58a4aeb9a0 100644 --- a/drivers/net/ethernet/airoha/airoha_npu.c +++ b/drivers/net/ethernet/airoha/airoha_npu.c @@ -401,12 +401,13 @@ struct airoha_npu *airoha_npu_get(struct device *dev, dma_addr_t *stats_addr) return ERR_PTR(-ENODEV); pdev = of_find_device_by_node(np); - of_node_put(np); if (!pdev) { dev_err(dev, "cannot find device node %s\n", np->name); + of_node_put(np); return ERR_PTR(-ENODEV); } + of_node_put(np); if (!try_module_get(THIS_MODULE)) { dev_err(dev, "failed to get the device driver module\n"); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 6119a4108838..65a2816142d9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -189,13 +189,14 @@ struct fm10k_q_vector { struct fm10k_ring_container rx, tx; struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + cpumask_t affinity_mask; char name[IFNAMSIZ + 9]; #ifdef CONFIG_DEBUG_FS struct dentry *dbg_q_vector; #endif /* CONFIG_DEBUG_FS */ - struct rcu_head rcu; /* to avoid race with update stats on free */ /* for dynamic allocation of rings associated with this q_vector */ struct fm10k_ring ring[] ____cacheline_internodealigned_in_smp; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index c67963bfe14e..7c600d6e66ba 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -945,6 +945,7 @@ struct i40e_q_vector { u16 reg_idx; /* register index of the interrupt */ struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ struct i40e_ring_container rx; struct i40e_ring_container tx; @@ -955,7 +956,6 @@ struct i40e_q_vector { cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; - struct rcu_head rcu; /* to avoid race with update stats on free */ char name[I40E_INT_NAME_STR_LEN]; bool arm_wb_state; bool in_busy_poll; diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c index 9fc0fd95a13d..cb71eca6a85b 100644 --- a/drivers/net/ethernet/intel/ice/ice_debugfs.c +++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c @@ -606,7 +606,7 @@ void ice_debugfs_fwlog_init(struct ice_pf *pf) pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog", pf->ice_debugfs_pf); - if (IS_ERR(pf->ice_debugfs_pf)) + if (IS_ERR(pf->ice_debugfs_pf_fwlog)) goto err_create_module_files; fw_modules_dir = debugfs_create_dir("modules", diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 2410aee59fb2..d132eb477551 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -2226,7 +2226,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf) struct ice_lag *lag = pf->lag; struct net_device *tmp_nd; - if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag) + if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || + !lag || !lag->upper_netdev) return false; rcu_read_lock(); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 47311b134a7a..e6acd791bf64 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -507,9 +507,10 @@ struct ixgbe_q_vector { struct ixgbe_ring_container rx, tx; struct napi_struct napi; + struct rcu_head rcu; /* to avoid race with update stats on free */ + cpumask_t affinity_mask; int numa_node; - struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; /* for dynamic allocation of rings associated with this q_vector */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 84b1ab8233b8..7462514c7f3d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1154,8 +1154,9 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) } } -static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, - u32 cqe_bcnt) +static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb, + struct mlx5_cqe64 *cqe, + u32 cqe_bcnt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); struct tcphdr *tcp; @@ -1205,6 +1206,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, tcp->check = tcp_v6_check(payload_len, &ipv6->saddr, &ipv6->daddr, check); } + + return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data); } static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) @@ -1561,8 +1564,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe); if (lro_num_seg > 1) { - mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); - skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); + unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); + + skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg); /* Subtract one since we already counted this as one * "regular" packet in mlx5e_complete_rx_cqe() */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 9a47015254bb..ea33ae39be6b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -433,6 +433,12 @@ static int intel_crosststamp(ktime_t *device, return -ETIMEDOUT; } + *system = (struct system_counterval_t) { + .cycles = 0, + .cs_id = CSID_X86_ART, + .use_nsecs = false, + }; + num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & GMAC_TIMESTAMP_ATSNS_MASK) >> GMAC_TIMESTAMP_ATSNS_SHIFT; @@ -448,7 +454,7 @@ static int intel_crosststamp(ktime_t *device, } system->cycles *= intel_priv->crossts_adj; - system->cs_id = CSID_X86_ART; + priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; return 0; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 0f4be72116b8..f0823aa1ede6 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1912,7 +1912,6 @@ static void wx_configure_rx_ring(struct wx *wx, struct wx_ring *ring) { u16 reg_idx = ring->reg_idx; - union wx_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; @@ -1942,9 +1941,9 @@ static void wx_configure_rx_ring(struct wx *wx, memset(ring->rx_buffer_info, 0, sizeof(struct wx_rx_buffer) * ring->count); - /* initialize Rx descriptor 0 */ - rx_desc = WX_RX_DESC(ring, 0); - rx_desc->wb.upper.length = 0; + /* reset ntu and ntc to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; /* enable receive descriptor ring */ wr32m(wx, WX_PX_RR_CFG(reg_idx), @@ -2778,6 +2777,8 @@ void wx_update_stats(struct wx *wx) hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS); } + /* qmprc is not cleared on read, manual reset it */ + hwstats->qmprc = 0; for (i = wx->num_vfs * wx->num_rx_queues_per_pool; i < wx->mac.max_rx_queues; i++) hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 55e252789db3..0213ad5a6ceb 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -174,10 +174,6 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring, skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); - - /* If the page was released, just unmap it. */ - if (unlikely(WX_CB(skb)->page_released)) - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); } static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, @@ -227,10 +223,6 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring, struct sk_buff *skb, int rx_buffer_pgcnt) { - if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) - /* the page has been released from the ring */ - WX_CB(skb)->page_released = true; - /* clear contents of rx_buffer */ rx_buffer->page = NULL; rx_buffer->skb = NULL; @@ -315,7 +307,7 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, return false; dma = page_pool_get_dma_addr(page); - bi->page_dma = dma; + bi->dma = dma; bi->page = page; bi->page_offset = 0; @@ -352,7 +344,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) DMA_FROM_DEVICE); rx_desc->read.pkt_addr = - cpu_to_le64(bi->page_dma + bi->page_offset); + cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; @@ -365,6 +357,8 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) /* clear the status bits for the next_to_use descriptor */ rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); @@ -2423,9 +2417,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; - if (WX_CB(skb)->page_released) - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - dev_kfree_skb(skb); } @@ -2449,6 +2440,9 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) } } + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index c363379126c0..1a90fcede86b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -909,7 +909,6 @@ enum wx_reset_type { struct wx_cb { dma_addr_t dma; u16 append_cnt; /* number of skb's appended */ - bool page_released; bool dma_released; }; @@ -998,7 +997,6 @@ struct wx_tx_buffer { struct wx_rx_buffer { struct sk_buff *skb; dma_addr_t dma; - dma_addr_t page_dma; struct page *page; unsigned int page_offset; }; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 73f9cb2e2844..f76ee8489504 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3416,7 +3416,8 @@ static int phy_probe(struct device *dev) /* Get the LEDs from the device tree, and instantiate standard * LEDs for them. */ - if (IS_ENABLED(CONFIG_PHYLIB_LEDS)) + if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) && + !phy_driver_is_genphy_10g(phydev)) err = of_phy_leds(phydev); out: @@ -3433,7 +3434,8 @@ static int phy_remove(struct device *dev) cancel_delayed_work_sync(&phydev->state_queue); - if (IS_ENABLED(CONFIG_PHYLIB_LEDS)) + if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) && + !phy_driver_is_genphy_10g(phydev)) phy_leds_unregister(phydev); phydev->state = PHY_DOWN; diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index c30ca415d1d3..36c73db44f77 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -689,6 +689,10 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) status); return -ENODEV; } + if (!dev->status) { + dev_err(&dev->udev->dev, "No status endpoint found"); + return -ENODEV; + } /* Initialize sierra private data */ priv = kzalloc(sizeof *priv, GFP_KERNEL); if (!priv) diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index d293087b426d..be5c2294610e 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -359,6 +359,7 @@ struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb, flush |= skb->ip_summed != p->ip_summed; flush |= skb->csum_level != p->csum_level; flush |= NAPI_GRO_CB(p)->count >= 64; + skb_set_network_header(skb, skb_gro_receive_network_offset(skb)); if (flush || skb_gro_receive_list(p, skb)) mss = 1; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 85b5aa82d7d7..e0a6bfa95118 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -767,6 +767,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head, NAPI_GRO_CB(skb)->flush = 1; return NULL; } + skb_set_network_header(skb, skb_gro_receive_network_offset(skb)); ret = skb_gro_receive_list(p, skb); } else { skb_gro_postpull_rcsum(skb, uh, diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 65831b4fee1f..616bf4c0c8fd 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -807,8 +807,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) } else { im->mca_crcount = idev->mc_qrv; } - in6_dev_put(pmc->idev); ip6_mc_clear_src(pmc); + in6_dev_put(pmc->idev); kfree_rcu(pmc, rcu); } } diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index 7c05ac846646..eccfa4203e96 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -129,13 +129,13 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, struct dst_entry *cache_dst) { struct ipv6_rpl_sr_hdr *isrh, *csrh; - const struct ipv6hdr *oldhdr; + struct ipv6hdr oldhdr; struct ipv6hdr *hdr; unsigned char *buf; size_t hdrlen; int err; - oldhdr = ipv6_hdr(skb); + memcpy(&oldhdr, ipv6_hdr(skb), sizeof(oldhdr)); buf = kcalloc(struct_size(srh, segments.addr, srh->segments_left), 2, GFP_ATOMIC); if (!buf) @@ -147,7 +147,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, memcpy(isrh, srh, sizeof(*isrh)); memcpy(isrh->rpl_segaddr, &srh->rpl_segaddr[1], (srh->segments_left - 1) * 16); - isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr->daddr; + isrh->rpl_segaddr[srh->segments_left - 1] = oldhdr.daddr; ipv6_rpl_srh_compress(csrh, isrh, &srh->rpl_segaddr[0], isrh->segments_left - 1); @@ -169,7 +169,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, skb_mac_header_rebuild(skb); hdr = ipv6_hdr(skb); - memmove(hdr, oldhdr, sizeof(*hdr)); + memmove(hdr, &oldhdr, sizeof(*hdr)); isrh = (void *)hdr + sizeof(*hdr); memcpy(isrh, csrh, hdrlen); diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 421ced031289..1f898888b223 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -978,8 +978,9 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk, if (subflow->mp_join) goto reset; subflow->mp_capable = 0; + if (!mptcp_try_fallback(ssk)) + goto reset; pr_fallback(msk); - mptcp_do_fallback(ssk); return false; } diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c index feb01747d7d8..420d416e2603 100644 --- a/net/mptcp/pm.c +++ b/net/mptcp/pm.c @@ -765,8 +765,14 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq) pr_debug("fail_seq=%llu\n", fail_seq); - if (!READ_ONCE(msk->allow_infinite_fallback)) + /* After accepting the fail, we can't create any other subflows */ + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_infinite_fallback) { + spin_unlock_bh(&msk->fallback_lock); return; + } + msk->allow_subflows = false; + spin_unlock_bh(&msk->fallback_lock); if (!subflow->fail_tout) { pr_debug("send MP_FAIL response and infinite map\n"); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index edf14c2c2062..6a817a13b154 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -560,10 +560,9 @@ static bool mptcp_check_data_fin(struct sock *sk) static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) { - if (READ_ONCE(msk->allow_infinite_fallback)) { + if (mptcp_try_fallback(ssk)) { MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONFALLBACK); - mptcp_do_fallback(ssk); } else { MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET); mptcp_subflow_reset(ssk); @@ -792,7 +791,7 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk) static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) { mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); - WRITE_ONCE(msk->allow_infinite_fallback, false); + msk->allow_infinite_fallback = false; mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); } @@ -803,6 +802,14 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) if (sk->sk_state != TCP_ESTABLISHED) return false; + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_subflows) { + spin_unlock_bh(&msk->fallback_lock); + return false; + } + mptcp_subflow_joined(msk, ssk); + spin_unlock_bh(&msk->fallback_lock); + /* attach to msk socket only after we are sure we will deal with it * at close time */ @@ -811,7 +818,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; mptcp_sockopt_sync_locked(msk, ssk); - mptcp_subflow_joined(msk, ssk); mptcp_stop_tout_timer(sk); __mptcp_propagate_sndbuf(sk, ssk); return true; @@ -1136,10 +1142,14 @@ static void mptcp_update_infinite_map(struct mptcp_sock *msk, mpext->infinite_map = 1; mpext->data_len = 0; + if (!mptcp_try_fallback(ssk)) { + mptcp_subflow_reset(ssk); + return; + } + MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX); mptcp_subflow_ctx(ssk)->send_infinite_map = 0; pr_fallback(msk); - mptcp_do_fallback(ssk); } #define MPTCP_MAX_GSO_SIZE (GSO_LEGACY_MAX_SIZE - (MAX_TCP_HEADER + 1)) @@ -2543,9 +2553,9 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk) static void __mptcp_retrans(struct sock *sk) { + struct mptcp_sendmsg_info info = { .data_lock_held = true, }; struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_subflow_context *subflow; - struct mptcp_sendmsg_info info = {}; struct mptcp_data_frag *dfrag; struct sock *ssk; int ret, err; @@ -2590,6 +2600,18 @@ static void __mptcp_retrans(struct sock *sk) info.sent = 0; info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : dfrag->already_sent; + + /* + * make the whole retrans decision, xmit, disallow + * fallback atomic + */ + spin_lock_bh(&msk->fallback_lock); + if (__mptcp_check_fallback(msk)) { + spin_unlock_bh(&msk->fallback_lock); + release_sock(ssk); + return; + } + while (info.sent < info.limit) { ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); if (ret <= 0) @@ -2603,8 +2625,9 @@ static void __mptcp_retrans(struct sock *sk) len = max(copied, len); tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, info.size_goal); - WRITE_ONCE(msk->allow_infinite_fallback, false); + msk->allow_infinite_fallback = false; } + spin_unlock_bh(&msk->fallback_lock); release_sock(ssk); } @@ -2730,7 +2753,8 @@ static void __mptcp_init_sock(struct sock *sk) WRITE_ONCE(msk->first, NULL); inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); - WRITE_ONCE(msk->allow_infinite_fallback, true); + msk->allow_infinite_fallback = true; + msk->allow_subflows = true; msk->recovery = false; msk->subflow_id = 1; msk->last_data_sent = tcp_jiffies32; @@ -2738,6 +2762,7 @@ static void __mptcp_init_sock(struct sock *sk) msk->last_ack_recv = tcp_jiffies32; mptcp_pm_data_init(msk); + spin_lock_init(&msk->fallback_lock); /* re-use the csk retrans timer for MPTCP-level retrans */ timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); @@ -3117,7 +3142,16 @@ static int mptcp_disconnect(struct sock *sk, int flags) * subflow */ mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); + + /* The first subflow is already in TCP_CLOSE status, the following + * can't overlap with a fallback anymore + */ + spin_lock_bh(&msk->fallback_lock); + msk->allow_subflows = true; + msk->allow_infinite_fallback = true; WRITE_ONCE(msk->flags, 0); + spin_unlock_bh(&msk->fallback_lock); + msk->cb_flags = 0; msk->recovery = false; WRITE_ONCE(msk->can_ack, false); @@ -3524,7 +3558,13 @@ bool mptcp_finish_join(struct sock *ssk) /* active subflow, already present inside the conn_list */ if (!list_empty(&subflow->node)) { + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_subflows) { + spin_unlock_bh(&msk->fallback_lock); + return false; + } mptcp_subflow_joined(msk, ssk); + spin_unlock_bh(&msk->fallback_lock); mptcp_propagate_sndbuf(parent, ssk); return true; } diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 3dd11dd3ba16..6ec245fd2778 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -346,10 +346,16 @@ struct mptcp_sock { u64 rtt_us; /* last maximum rtt of subflows */ } rcvq_space; u8 scaling_ratio; + bool allow_subflows; u32 subflow_id; u32 setsockopt_seq; char ca_name[TCP_CA_NAME_MAX]; + + spinlock_t fallback_lock; /* protects fallback, + * allow_infinite_fallback and + * allow_join + */ }; #define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock) @@ -1216,15 +1222,22 @@ static inline bool mptcp_check_fallback(const struct sock *sk) return __mptcp_check_fallback(msk); } -static inline void __mptcp_do_fallback(struct mptcp_sock *msk) +static inline bool __mptcp_try_fallback(struct mptcp_sock *msk) { if (__mptcp_check_fallback(msk)) { pr_debug("TCP fallback already done (msk=%p)\n", msk); - return; + return true; } - if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback))) - return; + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_infinite_fallback) { + spin_unlock_bh(&msk->fallback_lock); + return false; + } + + msk->allow_subflows = false; set_bit(MPTCP_FALLBACK_DONE, &msk->flags); + spin_unlock_bh(&msk->fallback_lock); + return true; } static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk) @@ -1236,14 +1249,15 @@ static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk) TCPF_SYN_RECV | TCPF_LISTEN)); } -static inline void mptcp_do_fallback(struct sock *ssk) +static inline bool mptcp_try_fallback(struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = subflow->conn; struct mptcp_sock *msk; msk = mptcp_sk(sk); - __mptcp_do_fallback(msk); + if (!__mptcp_try_fallback(msk)) + return false; if (READ_ONCE(msk->snd_data_fin_enable) && !(ssk->sk_shutdown & SEND_SHUTDOWN)) { gfp_t saved_allocation = ssk->sk_allocation; @@ -1255,6 +1269,7 @@ static inline void mptcp_do_fallback(struct sock *ssk) tcp_shutdown(ssk, SEND_SHUTDOWN); ssk->sk_allocation = saved_allocation; } + return true; } #define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)\n", __func__, a) @@ -1264,7 +1279,7 @@ static inline void mptcp_subflow_early_fallback(struct mptcp_sock *msk, { pr_fallback(msk); subflow->request_mptcp = 0; - __mptcp_do_fallback(msk); + WARN_ON_ONCE(!__mptcp_try_fallback(msk)); } static inline bool mptcp_check_infinite_map(struct sk_buff *skb) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 15613d691bfe..1802bc5435a1 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -544,9 +544,11 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) mptcp_get_options(skb, &mp_opt); if (subflow->request_mptcp) { if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) { + if (!mptcp_try_fallback(sk)) + goto do_reset; + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEFALLBACK); - mptcp_do_fallback(sk); pr_fallback(msk); goto fallback; } @@ -1300,20 +1302,29 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss mptcp_schedule_work(sk); } -static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) +static bool mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); unsigned long fail_tout; + /* we are really failing, prevent any later subflow join */ + spin_lock_bh(&msk->fallback_lock); + if (!msk->allow_infinite_fallback) { + spin_unlock_bh(&msk->fallback_lock); + return false; + } + msk->allow_subflows = false; + spin_unlock_bh(&msk->fallback_lock); + /* graceful failure can happen only on the MPC subflow */ if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first))) - return; + return false; /* since the close timeout take precedence on the fail one, * no need to start the latter when the first is already set */ if (sock_flag((struct sock *)msk, SOCK_DEAD)) - return; + return true; /* we don't need extreme accuracy here, use a zero fail_tout as special * value meaning no fail timeout at all; @@ -1325,6 +1336,7 @@ static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) tcp_send_ack(ssk); mptcp_reset_tout_timer(msk, subflow->fail_tout); + return true; } static bool subflow_check_data_avail(struct sock *ssk) @@ -1385,17 +1397,16 @@ fallback: (subflow->mp_join || subflow->valid_csum_seen)) { subflow->send_mp_fail = 1; - if (!READ_ONCE(msk->allow_infinite_fallback)) { + if (!mptcp_subflow_fail(msk, ssk)) { subflow->reset_transient = 0; subflow->reset_reason = MPTCP_RST_EMIDDLEBOX; goto reset; } - mptcp_subflow_fail(msk, ssk); WRITE_ONCE(subflow->data_avail, true); return true; } - if (!READ_ONCE(msk->allow_infinite_fallback)) { + if (!mptcp_try_fallback(ssk)) { /* fatal protocol error, close the socket. * subflow_error_report() will introduce the appropriate barriers */ @@ -1413,8 +1424,6 @@ reset: WRITE_ONCE(subflow->data_avail, false); return false; } - - mptcp_do_fallback(ssk); } skb = skb_peek(&ssk->sk_receive_queue); @@ -1679,7 +1688,6 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_pm_local *local, /* discard the subflow socket */ mptcp_sock_graft(ssk, sk->sk_socket); iput(SOCK_INODE(sf)); - WRITE_ONCE(msk->allow_infinite_fallback, false); mptcp_stop_tout_timer(sk); return 0; @@ -1851,7 +1859,7 @@ static void subflow_state_change(struct sock *sk) msk = mptcp_sk(parent); if (subflow_simultaneous_connect(sk)) { - mptcp_do_fallback(sk); + WARN_ON_ONCE(!mptcp_try_fallback(sk)); pr_fallback(msk); subflow->conn_finished = 1; mptcp_propagate_state(parent, sk, subflow, NULL); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3d43f3eae759..be608f07441f 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2785,7 +2785,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) int len_sum = 0; int status = TP_STATUS_AVAILABLE; int hlen, tlen, copylen = 0; - long timeo = 0; + long timeo; mutex_lock(&po->pg_vec_lock); @@ -2839,22 +2839,28 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz) size_max = dev->mtu + reserve + VLAN_HLEN; + timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); reinit_completion(&po->skb_completion); do { ph = packet_current_frame(po, &po->tx_ring, TP_STATUS_SEND_REQUEST); if (unlikely(ph == NULL)) { - if (need_wait && skb) { - timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT); + /* Note: packet_read_pending() might be slow if we + * have to call it as it's per_cpu variable, but in + * fast-path we don't have to call it, only when ph + * is NULL, we need to check the pending_refcnt. + */ + if (need_wait && packet_read_pending(&po->tx_ring)) { timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo); if (timeo <= 0) { err = !timeo ? -ETIMEDOUT : -ERESTARTSYS; goto out_put; } - } - /* check for additional frames */ - continue; + /* check for additional frames */ + continue; + } else + break; } skb = NULL; @@ -2943,14 +2949,7 @@ tpacket_error: } packet_increment_head(&po->tx_ring); len_sum += tp_len; - } while (likely((ph != NULL) || - /* Note: packet_read_pending() might be slow if we have - * to call it as it's per_cpu variable, but in fast-path - * we already short-circuit the loop with the first - * condition, and luckily don't have to go that path - * anyway. - */ - (need_wait && packet_read_pending(&po->tx_ring)))); + } while (1); err = len_sum; goto out_put; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index bcce36608871..f0eb70353744 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -412,7 +412,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, bool existing = false; struct nlattr *tb[TCA_QFQ_MAX + 1]; struct qfq_aggregate *new_agg = NULL; - u32 weight, lmax, inv_w; + u32 weight, lmax, inv_w, old_weight, old_lmax; int err; int delta_w; @@ -443,12 +443,16 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, inv_w = ONE_FP / weight; weight = ONE_FP / inv_w; - if (cl != NULL && - lmax == cl->agg->lmax && - weight == cl->agg->class_weight) - return 0; /* nothing to change */ + if (cl != NULL) { + sch_tree_lock(sch); + old_weight = cl->agg->class_weight; + old_lmax = cl->agg->lmax; + sch_tree_unlock(sch); + if (lmax == old_lmax && weight == old_weight) + return 0; /* nothing to change */ + } - delta_w = weight - (cl ? cl->agg->class_weight : 0); + delta_w = weight - (cl ? old_weight : 0); if (q->wsum + delta_w > QFQ_MAX_WSUM) { NL_SET_ERR_MSG_FMT_MOD(extack, @@ -555,10 +559,10 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg, qdisc_purge_queue(cl->qdisc); qdisc_class_hash_remove(&q->clhash, &cl->common); + qfq_destroy_class(sch, cl); sch_tree_unlock(sch); - qfq_destroy_class(sch, cl); return 0; } @@ -625,6 +629,7 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, { struct qfq_class *cl = (struct qfq_class *)arg; struct nlattr *nest; + u32 class_weight, lmax; tcm->tcm_parent = TC_H_ROOT; tcm->tcm_handle = cl->common.classid; @@ -633,8 +638,13 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg, nest = nla_nest_start_noflag(skb, TCA_OPTIONS); if (nest == NULL) goto nla_put_failure; - if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) || - nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax)) + + sch_tree_lock(sch); + class_weight = cl->agg->class_weight; + lmax = cl->agg->lmax; + sch_tree_unlock(sch); + if (nla_put_u32(skb, TCA_QFQ_WEIGHT, class_weight) || + nla_put_u32(skb, TCA_QFQ_LMAX, lmax)) goto nla_put_failure; return nla_nest_end(skb, nest); @@ -651,8 +661,10 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, memset(&xstats, 0, sizeof(xstats)); + sch_tree_lock(sch); xstats.weight = cl->agg->class_weight; xstats.lmax = cl->agg->lmax; + sch_tree_unlock(sch); if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 3760131f1484..1882bab8e00e 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -30,6 +30,10 @@ #include <linux/splice.h> #include <net/sock.h> +#include <net/inet_common.h> +#if IS_ENABLED(CONFIG_IPV6) +#include <net/ipv6.h> +#endif #include <net/tcp.h> #include <net/smc.h> #include <asm/ioctls.h> @@ -360,6 +364,16 @@ static void smc_destruct(struct sock *sk) return; if (!sock_flag(sk, SOCK_DEAD)) return; + switch (sk->sk_family) { + case AF_INET: + inet_sock_destruct(sk); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + inet6_sock_destruct(sk); + break; +#endif + } } static struct lock_class_key smc_key; diff --git a/net/smc/smc.h b/net/smc/smc.h index 78ae10d06ed2..2c9084963739 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -283,10 +283,10 @@ struct smc_connection { }; struct smc_sock { /* smc sock container */ - struct sock sk; -#if IS_ENABLED(CONFIG_IPV6) - struct ipv6_pinfo *pinet6; -#endif + union { + struct sock sk; + struct inet_sock icsk_inet; + }; struct socket *clcsock; /* internal tcp socket */ void (*clcsk_state_change)(struct sock *sk); /* original stat_change fct. */ diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh index 1dc337c709f8..b17e032a6d75 100755 --- a/tools/testing/selftests/net/udpgro.sh +++ b/tools/testing/selftests/net/udpgro.sh @@ -48,7 +48,7 @@ run_one() { cfg_veth - ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} & + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} & local PID1=$! wait_local_port_listen ${PEER_NS} 8000 udp @@ -95,7 +95,7 @@ run_one_nat() { # will land on the 'plain' one ip netns exec "${PEER_NS}" ./udpgso_bench_rx -G ${family} -b ${addr1} -n 0 & local PID1=$! - ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${family} -b ${addr2%/*} ${rx_args} & + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${family} -b ${addr2%/*} ${rx_args} & local PID2=$! wait_local_port_listen "${PEER_NS}" 8000 udp @@ -117,9 +117,9 @@ run_one_2sock() { cfg_veth - ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 10 ${rx_args} -p 12345 & + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 1000 -R 100 ${rx_args} -p 12345 & local PID1=$! - ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 10 ${rx_args} & + ip netns exec "${PEER_NS}" ./udpgso_bench_rx -C 2000 -R 100 ${rx_args} & local PID2=$! wait_local_port_listen "${PEER_NS}" 12345 udp diff --git a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json index 5c6851e8d311..b344570e7f40 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json +++ b/tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json @@ -672,5 +672,71 @@ "teardown": [ "$TC qdisc del dev $DUMMY root handle 1: drr" ] + }, + { + "id": "be28", + "name": "Try to add fq_codel qdisc as a child of an hhf qdisc", + "category": [ + "qdisc", + "fq_codel", + "hhf" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DUMMY root handle a: hhf" + ], + "cmdUnderTest": "$TC qdisc add dev $DUMMY parent a: handle b: fq_codel", + "expExitCode": "2", + "verifyCmd": "$TC -j qdisc ls dev $DUMMY handle b:", + "matchJSON": [], + "teardown": [ + "$TC qdisc del dev $DUMMY root" + ] + }, + { + "id": "fcb5", + "name": "Try to add pie qdisc as a child of a drr qdisc", + "category": [ + "qdisc", + "pie", + "drr" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DUMMY root handle a: drr" + ], + "cmdUnderTest": "$TC qdisc add dev $DUMMY parent a: handle b: pie", + "expExitCode": "2", + "verifyCmd": "$TC -j qdisc ls dev $DUMMY handle b:", + "matchJSON": [], + "teardown": [ + "$TC qdisc del dev $DUMMY root" + ] + }, + { + "id": "7801", + "name": "Try to add fq qdisc as a child of an inexistent hfsc class", + "category": [ + "qdisc", + "sfq", + "hfsc" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "$TC qdisc add dev $DUMMY root handle a: hfsc" + ], + "cmdUnderTest": "$TC qdisc add dev $DUMMY parent a:fff2 sfq limit 4", + "expExitCode": "2", + "verifyCmd": "$TC -j qdisc ls dev $DUMMY handle b:", + "matchJSON": [], + "teardown": [ + "$TC qdisc del dev $DUMMY root" + ] } ] |