summaryrefslogtreecommitdiff
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-05-17 22:35:35 +0300
committerDavid S. Miller <davem@davemloft.net>2020-05-17 22:35:35 +0300
commit9740a7ae6d5208897bf3ef49e8595dc4cfd323ee (patch)
tree11c8520bff5181c4954df61a7082f12f441f0867 /net/core/skbuff.c
parenteb682677f59e809d8e06c218b565aeb9723a4ad3 (diff)
parent4930f4831b1547b52c5968e9307fe3d840d7fba0 (diff)
downloadlinux-9740a7ae6d5208897bf3ef49e8595dc4cfd323ee.tar.xz
Merge branch 'mptcp-do-not-block-on-subflow-socket'
Florian Westphal says: ==================== mptcp: do not block on subflow socket This series reworks mptcp_sendmsg logic to avoid blocking on the subflow socket. It does so by removing the wait loop from mptcp_sendmsg_frag helper. In order to do that, it moves prerequisites that are currently handled in mptcp_sendmsg_frag (and cause it to wait until they are met, e.g. frag cache refill) into the callers. The worker can just reschedule in case no subflow socket is ready, since it can't wait -- doing so would block other work items and doesn't make sense anyway because we should not (re)send data in case resources are already low. The sendmsg path can use the existing wait logic until memory becomes available. Because large send requests can result in multiple mptcp_sendmsg_frag calls from mptcp_sendmsg, we may need to restart the socket lookup in case subflow can't accept more data or memory is low. Doing so blocks on the mptcp socket, and existing wait handling releases the msk lock while blocking. Lastly, no need to use GFP_ATOMIC for extension allocation: extend __skb_ext_alloc with gfp_t arg instead of hard-coded ATOMIC and then relax the allocation constraints for mptcp case: those requests occur in process context. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 1bf0c3d278e7..35a133c6d13b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -6087,13 +6087,15 @@ static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
/**
* __skb_ext_alloc - allocate a new skb extensions storage
*
+ * @flags: See kmalloc().
+ *
* Returns the newly allocated pointer. The pointer can later attached to a
* skb via __skb_ext_set().
* Note: caller must handle the skb_ext as an opaque data.
*/
-struct skb_ext *__skb_ext_alloc(void)
+struct skb_ext *__skb_ext_alloc(gfp_t flags)
{
- struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
+ struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
if (new) {
memset(new->offset, 0, sizeof(new->offset));
@@ -6188,7 +6190,7 @@ void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
} else {
newoff = SKB_EXT_CHUNKSIZEOF(*new);
- new = __skb_ext_alloc();
+ new = __skb_ext_alloc(GFP_ATOMIC);
if (!new)
return NULL;
}