summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bnx2x
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 02:49:49 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-25 02:49:49 +0300
commite0456717e483bb8a9431b80a5bdc99a928b9b003 (patch)
tree5eb5add2bafd1f20326d70f5cb3b711d00a40b10 /drivers/net/ethernet/broadcom/bnx2x
parent98ec21a01896751b673b6c731ca8881daa8b2c6d (diff)
parent1ea2d020ba477cb7011a7174e8501a9e04a325d4 (diff)
downloadlinux-e0456717e483bb8a9431b80a5bdc99a928b9b003.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Add TX fast path in mac80211, from Johannes Berg. 2) Add TSO/GRO support to ibmveth, from Thomas Falcon 3) Move away from cached routes in ipv6, just like ipv4, from Martin KaFai Lau. 4) Lots of new rhashtable tests, from Thomas Graf. 5) Run ingress qdisc lockless, from Alexei Starovoitov. 6) Allow servers to fetch TCP packet headers for SYN packets of new connections, for fingerprinting. From Eric Dumazet. 7) Add mode parameter to pktgen, for testing receive. From Alexei Starovoitov. 8) Cache access optimizations via simplifications of build_skb(), from Alexander Duyck. 9) Move page frag allocator under mm/, also from Alexander. 10) Add xmit_more support to hv_netvsc, from KY Srinivasan. 11) Add a counter guard in case we try to perform endless reclassify loops in the packet scheduler. 12) Extern flow dissector to be programmable and use it in new "Flower" classifier. From Jiri Pirko. 13) AF_PACKET fanout rollover fixes, performance improvements, and new statistics. From Willem de Bruijn. 14) Add netdev driver for GENEVE tunnels, from John W Linville. 15) Add ingress netfilter hooks and filtering, from Pablo Neira Ayuso. 16) Fix handling of epoll edge triggers in TCP, from Eric Dumazet. 17) Add an ECN retry fallback for the initial TCP handshake, from Daniel Borkmann. 18) Add tail call support to BPF, from Alexei Starovoitov. 19) Add several pktgen helper scripts, from Jesper Dangaard Brouer. 20) Add zerocopy support to AF_UNIX, from Hannes Frederic Sowa. 21) Favor even port numbers for allocation to connect() requests, and odd port numbers for bind(0), in an effort to help avoid ip_local_port_range exhaustion. From Eric Dumazet. 22) Add Cavium ThunderX driver, from Sunil Goutham. 23) Allow bpf programs to access skb_iif and dev->ifindex SKB metadata, from Alexei Starovoitov. 24) Add support for T6 chips in cxgb4vf driver, from Hariprasad Shenai. 25) Double TCP Small Queues default to 256K to accomodate situations like the XEN driver and wireless aggregation. From Wei Liu. 26) Add more entropy inputs to flow dissector, from Tom Herbert. 27) Add CDG congestion control algorithm to TCP, from Kenneth Klette Jonassen. 28) Convert ipset over to RCU locking, from Jozsef Kadlecsik. 29) Track and act upon link status of ipv4 route nexthops, from Andy Gospodarek. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1670 commits) bridge: vlan: flush the dynamically learned entries on port vlan delete bridge: multicast: add a comment to br_port_state_selection about blocking state net: inet_diag: export IPV6_V6ONLY sockopt stmmac: troubleshoot unexpected bits in des0 & des1 net: ipv4 sysctl option to ignore routes when nexthop link is down net: track link-status of ipv4 nexthops net: switchdev: ignore unsupported bridge flags net: Cavium: Fix MAC address setting in shutdown state drivers: net: xgene: fix for ACPI support without ACPI ip: report the original address of ICMP messages net/mlx5e: Prefetch skb data on RX net/mlx5e: Pop cq outside mlx5e_get_cqe net/mlx5e: Remove mlx5e_cq.sqrq back-pointer net/mlx5e: Remove extra spaces net/mlx5e: Avoid TX CQE generation if more xmit packets expected net/mlx5e: Avoid redundant dev_kfree_skb() upon NOP completion net/mlx5e: Remove re-assignment of wq type in mlx5e_enable_rq() net/mlx5e: Use skb_shinfo(skb)->gso_segs rather than counting them net/mlx5e: Static mapping of netdev priv resources to/from netdev TX queues net/mlx4_en: Use HW counters for rx/tx bytes/packets in PF device ...
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h31
3 files changed, 81 insertions, 25 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 1f82a04ce01a..7a4aaa3c01b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -357,6 +357,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned int offset;
};
union db_prod {
@@ -381,9 +382,10 @@ union db_prod {
#define PAGES_PER_SGE_SHIFT 0
#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE PAGE_SIZE
-#define SGE_PAGE_SHIFT PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGE_SHIFT 12
+#define SGE_PAGE_SIZE (1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK (~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr) (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
SGE_PAGES), 0xffff)
@@ -526,6 +528,12 @@ enum bnx2x_tpa_mode_t {
TPA_MODE_GRO
};
+struct bnx2x_alloc_pool {
+ struct page *page;
+ dma_addr_t dma;
+ unsigned int offset;
+};
+
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
@@ -599,6 +607,8 @@ struct bnx2x_fastpath {
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
+
+ struct bnx2x_alloc_pool page_pool;
};
#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ec56a9b65dc3..e2a65334708d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -544,30 +544,49 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+ struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
- if (unlikely(page == NULL)) {
- BNX2X_ERR("Can't alloc sge\n");
- return -ENOMEM;
- }
+ if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
- mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGES, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- __free_pages(page, PAGES_PER_SGE_SHIFT);
- BNX2X_ERR("Can't map sge\n");
- return -ENOMEM;
+ /* put page reference used by the memory pool, since we
+ * won't be using this page as the mempool anymore.
+ */
+ if (pool->page)
+ put_page(pool->page);
+
+ pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+ if (unlikely(!pool->page)) {
+ BNX2X_ERR("Can't alloc sge\n");
+ return -ENOMEM;
+ }
+
+ pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev,
+ pool->dma))) {
+ __free_pages(pool->page, PAGES_PER_SGE_SHIFT);
+ pool->page = NULL;
+ BNX2X_ERR("Can't map sge\n");
+ return -ENOMEM;
+ }
+ pool->offset = 0;
}
- sw_buf->page = page;
+ get_page(pool->page);
+ sw_buf->page = pool->page;
+ sw_buf->offset = pool->offset;
+
+ mapping = pool->dma + sw_buf->offset;
dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+ pool->offset += SGE_PAGE_SIZE;
+
return 0;
}
@@ -629,20 +648,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return err;
}
- /* Unmap the page as we're going to pass it to the stack */
- dma_unmap_page(&bp->pdev->dev,
- dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ skb_fill_page_desc(skb, j, old_rx_pg.page,
+ old_rx_pg.offset, frag_len);
else { /* GRO */
int rem;
int offset = 0;
for (rem = frag_len; rem > 0; rem -= gro_size) {
int len = rem > gro_size ? gro_size : rem;
skb_fill_page_desc(skb, frag_id++,
- old_rx_pg.page, offset, len);
+ old_rx_pg.page,
+ old_rx_pg.offset + offset,
+ len);
if (offset)
get_page(old_rx_pg.page);
offset += len;
@@ -662,7 +683,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
{
if (fp->rx_frag_size)
- put_page(virt_to_head_page(data));
+ skb_free_frag(data);
else
kfree(data);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d7a71758e876..2b30081ec26d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -804,9 +804,13 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
- dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
- __free_pages(page, PAGES_PER_SGE_SHIFT);
+ /* Since many fragments can share the same page, make sure to
+ * only unmap and free the page once.
+ */
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ put_page(page);
sw_buf->page = NULL;
sge->addr_hi = 0;
@@ -964,6 +968,25 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
((u8 *)fw_lo)[1] = mac[4];
}
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+ struct bnx2x_alloc_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ /* Page was not fully fragmented. Unmap unused space */
+ if (pool->offset < PAGE_SIZE) {
+ dma_addr_t dma = pool->dma + pool->offset;
+ int size = PAGE_SIZE - pool->offset;
+
+ dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
+ }
+
+ put_page(pool->page);
+
+ pool->page = NULL;
+}
+
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
@@ -974,6 +997,8 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
for (i = 0; i < last; i++)
bnx2x_free_rx_sge(bp, fp, i);
+
+ bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
}
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)