diff options
author | David S. Miller <davem@davemloft.net> | 2020-03-31 06:48:43 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-03-31 06:48:43 +0300 |
commit | 5a470b1a63ac211e01a93de9d913753d64a21d9a (patch) | |
tree | c7b1c6914425e6e73fe25c282d610a34bdba7a24 | |
parent | 3902baf9abfa320b21e38fe206d66d5e6688e721 (diff) | |
parent | 0141317611ab913d6c227b82f3dd02ff3e6fc5d8 (diff) | |
download | linux-5a470b1a63ac211e01a93de9d913753d64a21d9a.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hnae3.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 54 | ||||
-rw-r--r-- | net/core/skbuff.c | 1 | ||||
-rw-r--r-- | net/ipv4/fib_trie.c | 3 | ||||
-rw-r--r-- | net/ipv4/ip_tunnel.c | 6 | ||||
-rw-r--r-- | net/ipv4/udp_offload.c | 1 | ||||
-rw-r--r-- | net/mac80211/tx.c | 3 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 20 | ||||
-rw-r--r-- | net/sctp/protocol.c | 28 | ||||
-rw-r--r-- | net/sctp/socket.c | 31 |
12 files changed, 115 insertions, 61 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index a3e4081b84ba..5587605d6deb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -78,6 +78,7 @@ enum hns_desc_type { DESC_TYPE_SKB, + DESC_TYPE_FRAGLIST_SKB, DESC_TYPE_PAGE, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 8e04d3909321..da98fd7c8eca 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1107,6 +1107,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return ret; dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + } else if (type == DESC_TYPE_FRAGLIST_SKB) { + struct sk_buff *skb = (struct sk_buff *)priv; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); } else { frag = (skb_frag_t *)priv; dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); @@ -1144,8 +1148,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ desc_cb->priv = priv; desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; - desc_cb->type = (type == DESC_TYPE_SKB && !k) ? - DESC_TYPE_SKB : DESC_TYPE_PAGE; + desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB || + type == DESC_TYPE_SKB) && !k) ? + type : DESC_TYPE_PAGE; /* now, fill the descriptor */ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); @@ -1354,7 +1359,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) ring_ptr_move_bw(ring, next_to_use); /* unmap the descriptor dma address */ - if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) + if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB || + ring->desc_cb[ring->next_to_use].type == + DESC_TYPE_FRAGLIST_SKB) dma_unmap_single(dev, ring->desc_cb[ring->next_to_use].dma, ring->desc_cb[ring->next_to_use].length, @@ -1447,7 +1454,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) goto out; skb_walk_frags(skb, frag_skb) { - ret = hns3_fill_skb_to_desc(ring, frag_skb, DESC_TYPE_PAGE); + ret = hns3_fill_skb_to_desc(ring, frag_skb, + DESC_TYPE_FRAGLIST_SKB); if (unlikely(ret < 0)) goto fill_err; @@ -2356,7 +2364,7 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) static void hns3_unmap_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) { - if (cb->type == DESC_TYPE_SKB) + if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB) dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, ring_to_dma_dir(ring)); else if (cb->length) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 75d0d0fcd69b..a758f9ae32be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -6768,7 +6768,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) struct hclge_dev *hdev = vport->back; if (enable) { - hclge_task_schedule(hdev, round_jiffies_relative(HZ)); + hclge_task_schedule(hdev, 0); } else { /* Set the DOWN flag here to disable link updating */ set_bit(HCLGE_STATE_DOWN, &hdev->state); @@ -8986,6 +8986,12 @@ static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type, struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + /* When nic is down, the service task is not running, doesn't update + * the port information per second. Query the port information before + * return the media type, ensure getting the correct media information. + */ + hclge_update_port_info(hdev); + if (media_type) *media_type = hdev->hw.mac.media_type; @@ -10674,7 +10680,7 @@ static int hclge_init(void) { pr_info("%s is initializing\n", HCLGE_NAME); - hclge_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGE_NAME); + hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME); if (!hclge_wq) { pr_err("%s: failed to create workqueue\n", HCLGE_NAME); return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 2c1d01fe05a4..e02d427131ee 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2149,50 +2149,51 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) return ret; } -static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) +static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev) { struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; - int ret; + struct hclgevf_rss_tuple_cfg *tuple_sets; u32 i; + rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ; rss_cfg->rss_size = hdev->nic.kinfo.rss_size; - + tuple_sets = &rss_cfg->rss_tuple_sets; if (hdev->pdev->revision >= 0x21) { rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE; memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key, HCLGEVF_RSS_KEY_SIZE); + tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; + tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP; + tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER; + } + + /* Initialize RSS indirect table */ + for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) + rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; +} + +static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) +{ + struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; + int ret; + + if (hdev->pdev->revision >= 0x21) { ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo, rss_cfg->rss_hash_key); if (ret) return ret; - rss_cfg->rss_tuple_sets.ipv4_tcp_en = - HCLGEVF_RSS_INPUT_TUPLE_OTHER; - rss_cfg->rss_tuple_sets.ipv4_udp_en = - HCLGEVF_RSS_INPUT_TUPLE_OTHER; - rss_cfg->rss_tuple_sets.ipv4_sctp_en = - HCLGEVF_RSS_INPUT_TUPLE_SCTP; - rss_cfg->rss_tuple_sets.ipv4_fragment_en = - HCLGEVF_RSS_INPUT_TUPLE_OTHER; - rss_cfg->rss_tuple_sets.ipv6_tcp_en = - HCLGEVF_RSS_INPUT_TUPLE_OTHER; - rss_cfg->rss_tuple_sets.ipv6_udp_en = - HCLGEVF_RSS_INPUT_TUPLE_OTHER; - rss_cfg->rss_tuple_sets.ipv6_sctp_en = - HCLGEVF_RSS_INPUT_TUPLE_SCTP; - rss_cfg->rss_tuple_sets.ipv6_fragment_en = - HCLGEVF_RSS_INPUT_TUPLE_OTHER; - ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg); if (ret) return ret; } - /* Initialize RSS indirect table */ - for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) - rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; - ret = hclgevf_set_rss_indir_table(hdev); if (ret) return ret; @@ -2793,6 +2794,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; /* Initialize RSS for this VF */ + hclgevf_rss_init_cfg(hdev); ret = hclgevf_rss_init_hw(hdev); if (ret) { dev_err(&hdev->pdev->dev, @@ -2967,6 +2969,8 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++) rss_indir[i] = i % kinfo->rss_size; + hdev->rss_cfg.rss_size = kinfo->rss_size; + ret = hclgevf_set_rss(handle, rss_indir, NULL, 0); if (ret) dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n", @@ -3220,7 +3224,7 @@ static int hclgevf_init(void) { pr_info("%s is initializing\n", HCLGEVF_NAME); - hclgevf_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, HCLGEVF_NAME); + hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); if (!hclgevf_wq) { pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); return -ENOMEM; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 621b4479fee1..7e29590482ce 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3668,6 +3668,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, skb_push(nskb, -skb_network_offset(nskb) + offset); + skb_release_head_state(nskb); __copy_skb_header(nskb, skb); skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index b01b748df9dd..4f334b425538 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -2572,6 +2572,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) " %zd bytes, size of tnode: %zd bytes.\n", LEAF_SIZE, TNODE_SIZE(0)); + rcu_read_lock(); for (h = 0; h < FIB_TABLE_HASHSZ; h++) { struct hlist_head *head = &net->ipv4.fib_table_hash[h]; struct fib_table *tb; @@ -2591,7 +2592,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) trie_show_usage(seq, t->stats); #endif } + cond_resched_rcu(); } + rcu_read_unlock(); return 0; } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 74e1d964a615..cd4b84310d92 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -142,11 +142,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, cand = t; } - if (flags & TUNNEL_NO_KEY) - goto skip_key_lookup; - hlist_for_each_entry_rcu(t, head, hash_node) { - if (t->parms.i_key != key || + if ((!(flags & TUNNEL_NO_KEY) && t->parms.i_key != key) || t->parms.iph.saddr != 0 || t->parms.iph.daddr != 0 || !(t->dev->flags & IFF_UP)) @@ -158,7 +155,6 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, cand = t; } -skip_key_lookup: if (cand) return cand; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 1a98583a79f4..e67a66fbf27b 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -453,6 +453,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb, unsigned int off = skb_gro_offset(skb); int flush = 1; + NAPI_GRO_CB(skb)->is_flist = 0; if (skb->dev->features & NETIF_F_GRO_FRAGLIST) NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled: 1; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index b65284f210c3..82846aca86d9 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -3611,7 +3611,8 @@ begin: * Drop unicast frames to unauthorised stations unless they are * EAPOL frames from the local station. */ - if (unlikely(!ieee80211_vif_is_mesh(&tx.sdata->vif) && + if (unlikely(ieee80211_is_data(hdr->frame_control) && + !ieee80211_vif_is_mesh(&tx.sdata->vif) && tx.sdata->vif.type != NL80211_IFTYPE_OCB && !is_multicast_ether_addr(hdr->addr1) && !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) && diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index bc734cfaa29e..c87af430107a 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -228,7 +228,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, { struct sctp_association *asoc = t->asoc; struct dst_entry *dst = NULL; - struct flowi6 *fl6 = &fl->u.ip6; + struct flowi _fl; + struct flowi6 *fl6 = &_fl.u.ip6; struct sctp_bind_addr *bp; struct ipv6_pinfo *np = inet6_sk(sk); struct sctp_sockaddr_entry *laddr; @@ -238,7 +239,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, enum sctp_scope scope; __u8 matchlen = 0; - memset(fl6, 0, sizeof(struct flowi6)); + memset(&_fl, 0, sizeof(_fl)); fl6->daddr = daddr->v6.sin6_addr; fl6->fl6_dport = daddr->v6.sin6_port; fl6->flowi6_proto = IPPROTO_SCTP; @@ -276,8 +277,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, rcu_read_unlock(); dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); - if (!asoc || saddr) + if (!asoc || saddr) { + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); goto out; + } bp = &asoc->base.bind_addr; scope = sctp_scope(daddr); @@ -300,6 +304,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if ((laddr->a.sa.sa_family == AF_INET6) && (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) { rcu_read_unlock(); + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); goto out; } } @@ -338,6 +344,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (!IS_ERR_OR_NULL(dst)) dst_release(dst); dst = bdst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); break; } @@ -351,6 +359,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, dst_release(dst); dst = bdst; matchlen = bmatchlen; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); } rcu_read_unlock(); @@ -359,14 +369,12 @@ out: struct rt6_info *rt; rt = (struct rt6_info *)dst; - t->dst = dst; t->dst_cookie = rt6_get_cookie(rt); pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n", &rt->rt6i_dst.addr, rt->rt6i_dst.plen, - &fl6->saddr); + &fl->u.ip6.saddr); } else { t->dst = NULL; - pr_debug("no route\n"); } } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 78af2fcf90cc..092d1afdee0d 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -409,7 +409,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, { struct sctp_association *asoc = t->asoc; struct rtable *rt; - struct flowi4 *fl4 = &fl->u.ip4; + struct flowi _fl; + struct flowi4 *fl4 = &_fl.u.ip4; struct sctp_bind_addr *bp; struct sctp_sockaddr_entry *laddr; struct dst_entry *dst = NULL; @@ -419,7 +420,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, if (t->dscp & SCTP_DSCP_SET_MASK) tos = t->dscp & SCTP_DSCP_VAL_MASK; - memset(fl4, 0x0, sizeof(struct flowi4)); + memset(&_fl, 0x0, sizeof(_fl)); fl4->daddr = daddr->v4.sin_addr.s_addr; fl4->fl4_dport = daddr->v4.sin_port; fl4->flowi4_proto = IPPROTO_SCTP; @@ -438,8 +439,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, &fl4->saddr); rt = ip_route_output_key(sock_net(sk), fl4); - if (!IS_ERR(rt)) + if (!IS_ERR(rt)) { dst = &rt->dst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); + } /* If there is no association or if a source address is passed, no * more validation is required. @@ -502,27 +506,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, false); if (!odev || odev->ifindex != fl4->flowi4_oif) { - if (!dst) + if (!dst) { dst = &rt->dst; - else + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); + } else { dst_release(&rt->dst); + } continue; } dst_release(dst); dst = &rt->dst; + t->dst = dst; + memcpy(fl, &_fl, sizeof(_fl)); break; } out_unlock: rcu_read_unlock(); out: - t->dst = dst; - if (dst) + if (dst) { pr_debug("rt_dst:%pI4, rt_src:%pI4\n", - &fl4->daddr, &fl4->saddr); - else + &fl->u.ip4.daddr, &fl->u.ip4.saddr); + } else { + t->dst = NULL; pr_debug("no route\n"); + } } /* For v4, the source address is cached in the route entry(dst). So no need diff --git a/net/sctp/socket.c b/net/sctp/socket.c index fed26a1e9518..827a9903ee28 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -147,29 +147,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk) skb_orphan(chunk->skb); } +#define traverse_and_process() \ +do { \ + msg = chunk->msg; \ + if (msg == prev_msg) \ + continue; \ + list_for_each_entry(c, &msg->chunks, frag_list) { \ + if ((clear && asoc->base.sk == c->skb->sk) || \ + (!clear && asoc->base.sk != c->skb->sk)) \ + cb(c); \ + } \ + prev_msg = msg; \ +} while (0) + static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, + bool clear, void (*cb)(struct sctp_chunk *)) { + struct sctp_datamsg *msg, *prev_msg = NULL; struct sctp_outq *q = &asoc->outqueue; + struct sctp_chunk *chunk, *c; struct sctp_transport *t; - struct sctp_chunk *chunk; list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) list_for_each_entry(chunk, &t->transmitted, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->retransmit, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->sacked, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->abandoned, transmitted_list) - cb(chunk); + traverse_and_process(); list_for_each_entry(chunk, &q->out_chunk_list, list) - cb(chunk); + traverse_and_process(); } static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk, @@ -9574,9 +9589,9 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, * paths won't try to lock it and then oldsk. */ lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); - sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); + sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w); sctp_assoc_migrate(assoc, newsk); - sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); + sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w); /* If the association on the newsk is already closed before accept() * is called, set RCV_SHUTDOWN flag. |