summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/sfc/efx.h
diff options
context:
space:
mode:
authorEdward Cree <ecree@solarflare.com>2020-07-27 14:55:55 +0300
committerDavid S. Miller <davem@davemloft.net>2020-07-27 22:26:55 +0300
commit51b35a454efdcd86f578e61ec8bf7596299c5f80 (patch)
tree601ce4c9a7d89a13d07c194331434b6ea9a7eaba /drivers/net/ethernet/sfc/efx.h
parent61060c5dc5c5734942528f31c094606539fffb8b (diff)
downloadlinux-51b35a454efdcd86f578e61ec8bf7596299c5f80.tar.xz
sfc: skeleton EF100 PF driver
No TX or RX path, no MCDI, not even an ifup/down handler. Besides stubs, the bulk of the patch deals with reading the Xilinx extended PCIe capability, which tells us where to find our BAR. Though in the same module, EF100 has its own struct pci_driver, which is named sfc_ef100. A small number of additional nic_type methods are added; those in the TX (tx_enqueue) and RX (rx_packet) paths are called through indirect call wrappers to minimise the performance impact. Signed-off-by: Edward Cree <ecree@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/sfc/efx.h')
-rw-r--r--drivers/net/ethernet/sfc/efx.h16
1 files changed, 13 insertions, 3 deletions
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index e7e7d8d1a07b..a9808e86068d 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -8,7 +8,10 @@
#ifndef EFX_EFX_H
#define EFX_EFX_H
+#include <linux/indirect_call_wrapper.h>
#include "net_driver.h"
+#include "ef100_rx.h"
+#include "ef100_tx.h"
#include "filter.h"
int efx_net_open(struct net_device *net_dev);
@@ -18,13 +21,18 @@ int efx_net_stop(struct net_device *net_dev);
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev);
-netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+ return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
+ ef100_enqueue_skb, __efx_enqueue_skb,
+ tx_queue, skb);
+}
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data);
extern unsigned int efx_piobuf_size;
-extern bool efx_separate_tx_channels;
/* RX */
void __efx_rx_packet(struct efx_channel *channel);
@@ -33,7 +41,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
- __efx_rx_packet(channel);
+ INDIRECT_CALL_2(channel->efx->type->rx_packet,
+ __ef100_rx_packet, __efx_rx_packet,
+ channel);
}
/* Maximum number of TCP segments we support for soft-TSO */