summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-04-25 04:08:54 +0300
committerJakub Kicinski <kuba@kernel.org>2023-04-25 04:08:54 +0300
commit086c161678b8cc291aa21ff8ef2b53df83ee44aa (patch)
treeb2ce11edb5f102ae29e5c918fac15c6a43cb3623
parent8e8e47d9e84ff9a6f1072177bd7ec1d2f12f4d5c (diff)
parentdf18f2da302f169e1a29098c6ca3b474f1b0269e (diff)
downloadlinux-086c161678b8cc291aa21ff8ef2b53df83ee44aa.tar.xz
Merge branch 'update-coding-style-and-check-alloc_frag'
Haiyang Zhang says: ==================== Update coding style and check alloc_frag Follow up patches for the jumbo frame support. As suggested by Jakub Kicinski, update coding style, and check napi_alloc_frag for possible fallback to single pages. ==================== Link: https://lore.kernel.org/r/1682096818-30056-1-git-send-email-haiyangz@microsoft.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c24
1 files changed, 18 insertions, 6 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index cabecbfa1102..06d6292e09b3 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -553,6 +553,14 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
if (!va)
goto error;
+
+ page = virt_to_head_page(va);
+ /* Check if the frag falls back to single page */
+ if (compound_order(page) <
+ get_order(mpc->rxbpre_alloc_size)) {
+ put_page(page);
+ goto error;
+ }
} else {
page = dev_alloc_page();
if (!page)
@@ -563,7 +571,6 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu)
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
-
if (dma_mapping_error(dev, da)) {
put_page(virt_to_head_page(va));
goto error;
@@ -1505,6 +1512,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
if (!va)
return NULL;
+
+ page = virt_to_head_page(va);
+ /* Check if the frag falls back to single page */
+ if (compound_order(page) < get_order(rxq->alloc_size)) {
+ put_page(page);
+ return NULL;
+ }
} else {
page = dev_alloc_page();
if (!page)
@@ -1515,7 +1529,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
*da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
DMA_FROM_DEVICE);
-
if (dma_mapping_error(dev, *da)) {
put_page(virt_to_head_page(va));
return NULL;
@@ -1525,14 +1538,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
}
/* Allocate frag for rx buffer, and save the old buf */
-static void mana_refill_rxoob(struct device *dev, struct mana_rxq *rxq,
- struct mana_recv_buf_oob *rxoob, void **old_buf)
+static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
+ struct mana_recv_buf_oob *rxoob, void **old_buf)
{
dma_addr_t da;
void *va;
va = mana_get_rxfrag(rxq, dev, &da, true);
-
if (!va)
return;
@@ -1597,7 +1609,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
rxbuf_oob = &rxq->rx_oobs[curr];
WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
- mana_refill_rxoob(dev, rxq, rxbuf_oob, &old_buf);
+ mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf);
/* Unsuccessful refill will have old_buf == NULL.
* In this case, mana_rx_skb() will drop the packet.