diff options
author | Eric Dumazet <edumazet@google.com> | 2020-05-07 19:32:20 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-05-08 04:11:07 +0300 |
commit | 1ddabdfaf70c202b88925edd74c66f4707dbd92e (patch) | |
tree | 054aa6cba9525f277fcc333a4cacfaa8abc0418e | |
parent | fb1eee476b0d3be3e58dac1a3a96f726c6278bed (diff) | |
download | linux-1ddabdfaf70c202b88925edd74c66f4707dbd92e.tar.xz |
netpoll: netpoll_send_skb() returns transmit status
Some callers want to know if the packet has been sent or
dropped, to inform upper stacks.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netpoll.h | 2 | ||||
-rw-r--r-- | net/core/netpoll.c | 11 |
2 files changed, 8 insertions, 5 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e466ddffef61..f47af135bd56 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -63,7 +63,7 @@ int netpoll_setup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); -void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); #ifdef CONFIG_NETPOLL static inline void *netpoll_poll_lock(struct napi_struct *napi) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 34cd34f24423..40d2753aa47d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -305,7 +305,7 @@ static int netpoll_owner_active(struct net_device *dev) } /* call with IRQ disabled */ -static void __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) +static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { netdev_tx_t status = NETDEV_TX_BUSY; struct net_device *dev; @@ -320,7 +320,7 @@ static void __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { dev_kfree_skb_irq(skb); - return; + return NET_XMIT_DROP; } /* don't get messages out of order, and no recursion */ @@ -359,15 +359,18 @@ static void __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } + return NETDEV_TX_OK; } -void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { unsigned long flags; + netdev_tx_t ret; local_irq_save(flags); - __netpoll_send_skb(np, skb); + ret = __netpoll_send_skb(np, skb); local_irq_restore(flags); + return ret; } EXPORT_SYMBOL(netpoll_send_skb); |