diff options
author | Shreyas Bhatewara <sbhatewara@vmware.com> | 2015-06-19 23:37:03 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-06-23 16:26:00 +0300 |
commit | c41fcce997d2caa039a46495d40423348c51ad61 (patch) | |
tree | 35437d5c46e22fdc34f12ca2627bf0f4a1e6ad6a | |
parent | e9ba47bfe381888d8dc79123a20b2ec8b6751a47 (diff) | |
download | linux-c41fcce997d2caa039a46495d40423348c51ad61.tar.xz |
vmxnet3: Fix memory leaks in rx path (fwd)
If rcd length was zero, the page used for frag was not being released. It
was being replaced with a newly allocated page. This change takes care
of that memory leak.
Signed-off-by: Guolin Yang <gyang@vmware.com>
Signed-off-by: Shreyas N Bhatewara <sbhatewara@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 39 |
1 files changed, 21 insertions, 18 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index bb352106fc91..ab539758bec1 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -861,6 +861,9 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, , skb_headlen(skb)); } + if (skb->len <= VMXNET3_HDR_COPY_SIZE) + ctx->copy_size = skb->len; + /* make sure headers are accessible directly */ if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) goto err; @@ -1273,36 +1276,36 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, if (skip_page_frags) goto rcd_done; - new_page = alloc_page(GFP_ATOMIC); - if (unlikely(new_page == NULL)) { + if (rcd->len) { + new_page = alloc_page(GFP_ATOMIC); /* Replacement page frag could not be allocated. * Reuse this page. Drop the pkt and free the * skb which contained this page as a frag. Skip * processing all the following non-sop frags. */ - rq->stats.rx_buf_alloc_failure++; - dev_kfree_skb(ctx->skb); - ctx->skb = NULL; - skip_page_frags = true; - goto rcd_done; - } + if (unlikely(!new_page)) { + rq->stats.rx_buf_alloc_failure++; + dev_kfree_skb(ctx->skb); + ctx->skb = NULL; + skip_page_frags = true; + goto rcd_done; + } - if (rcd->len) { dma_unmap_page(&adapter->pdev->dev, rbi->dma_addr, rbi->len, PCI_DMA_FROMDEVICE); vmxnet3_append_frag(ctx->skb, rcd, rbi); - } - /* Immediate refill */ - rbi->page = new_page; - rbi->dma_addr = dma_map_page(&adapter->pdev->dev, - rbi->page, - 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); - rxd->addr = cpu_to_le64(rbi->dma_addr); - rxd->len = rbi->len; + /* Immediate refill */ + rbi->page = new_page; + rbi->dma_addr = dma_map_page(&adapter->pdev->dev + , rbi->page, + 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + rxd->addr = cpu_to_le64(rbi->dma_addr); + rxd->len = rbi->len; + } } |