summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
diff options
context:
space:
mode:
authorBrett Creeley <brett.creeley@amd.com>2024-05-29 03:02:56 +0300
committerJakub Kicinski <kuba@kernel.org>2024-05-31 04:10:34 +0300
commitd9c04209990be6c03a354b124e012293273e05d5 (patch)
treee10054d1f374fa30c33ba0631f107c84ec831ea1 /drivers/net/ethernet/pensando/ionic/ionic_txrx.c
parent4dde9588c54d1d86a415a5ed7dc6d8a605fe11ce (diff)
downloadlinux-d9c04209990be6c03a354b124e012293273e05d5.tar.xz
ionic: Mark error paths in the data path as unlikely
As the title states, mark unlikely error paths in the data path as unlikely. Signed-off-by: Brett Creeley <brett.creeley@amd.com> Signed-off-by: Shannon Nelson <shannon.nelson@amd.com> Link: https://lore.kernel.org/r/20240529000259.25775-5-shannon.nelson@amd.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/pensando/ionic/ionic_txrx.c')
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index c6aa8fb743be..14aa3844b699 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -582,7 +582,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
buf_info->page_offset,
true);
__netif_tx_unlock(nq);
- if (err) {
+ if (unlikely(err)) {
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
goto out_xdp_abort;
}
@@ -597,7 +597,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
- if (err) {
+ if (unlikely(err)) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
goto out_xdp_abort;
}
@@ -1058,7 +1058,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
@@ -1075,7 +1075,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
@@ -1316,7 +1316,7 @@ static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
- if (err)
+ if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@@ -1340,7 +1340,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
- if (err)
+ if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@@ -1444,7 +1444,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (err) {
+ if (unlikely(err)) {
/* clean up mapping from ionic_tx_map_skb */
ionic_tx_desc_unmap_bufs(q, desc_info);
return err;
@@ -1729,7 +1729,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
linearize:
if (too_many_frags) {
err = skb_linearize(skb);
- if (err)
+ if (unlikely(err))
return err;
q_to_tx_stats(q)->linearize++;
}
@@ -1763,7 +1763,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
else
err = ionic_tx(netdev, q, skb);
- if (err)
+ if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;
@@ -1809,7 +1809,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
else
err = ionic_tx(netdev, q, skb);
- if (err)
+ if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;