summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorIgor Druzhinin <igor.druzhinin@citrix.com>2019-02-28 15:48:03 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-03-10 09:17:19 +0300
commit947fc52b6bf470fda416fac80051c1b9d401ac59 (patch)
tree02355e44be694167e730b2ba3b509ffa4c465dce /drivers/net
parente5e5840183deac7b135b35c199968b3e61949851 (diff)
downloadlinux-947fc52b6bf470fda416fac80051c1b9d401ac59.tar.xz
xen-netback: fix occasional leak of grant ref mappings under memory pressure
[ Upstream commit 99e87f56b48f490fb16b6e0f74691c1e664dea95 ] Zero-copy callback flag is not yet set on frag list skb at the moment xenvif_handle_frag_list() returns -ENOMEM. This eventually results in leaking grant ref mappings since xenvif_zerocopy_callback() is never called for these fragments. Those eventually build up and cause Xen to kill Dom0 as the slots get reused for new mappings: "d0v0 Attempt to implicitly unmap a granted PTE c010000329fce005" That behavior is observed under certain workloads where sudden spikes of page cache writes coexist with active atomic skb allocations from network traffic. Additionally, rework the logic to deal with frag_list deallocation in a single place. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> Signed-off-by: Igor Druzhinin <igor.druzhinin@citrix.com> Acked-by: Wei Liu <wei.liu2@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/xen-netback/netback.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3621e05a7494..d5081ffdc8f0 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1072,11 +1072,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
skb_frag_size_set(&frags[i], len);
}
- /* Copied all the bits from the frag list -- free it. */
- skb_frag_list_init(skb);
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
-
/* Release all the original (foreign) frags. */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+ xenvif_skb_zerocopy_prepare(queue, nskb);
if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
kfree_skb(skb);
continue;
}
+ /* Copied all the bits from the frag list -- free it. */
+ skb_frag_list_init(skb);
+ kfree_skb(nskb);
}
skb->dev = queue->vif->dev;