summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/freescale
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r--drivers/net/ethernet/freescale/fec.h264
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1254
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c277
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c211
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h9
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c29
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c32
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c56
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c70
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h31
11 files changed, 1671 insertions, 594 deletions
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index ee41d98b44b6..9af296a1ca99 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -27,8 +27,8 @@
*/
#define FEC_IEVENT 0x004 /* Interrupt event reg */
#define FEC_IMASK 0x008 /* Interrupt mask reg */
-#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
-#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
+#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
#define FEC_ECNTRL 0x024 /* Ethernet control reg */
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
@@ -38,6 +38,12 @@
#define FEC_ADDR_LOW 0x0e4 /* Low 32bits MAC address */
#define FEC_ADDR_HIGH 0x0e8 /* High 16bits MAC address */
#define FEC_OPD 0x0ec /* Opcode + Pause duration */
+#define FEC_TXIC0 0xF0 /* Tx Interrupt Coalescing for ring 0 */
+#define FEC_TXIC1 0xF4 /* Tx Interrupt Coalescing for ring 1 */
+#define FEC_TXIC2 0xF8 /* Tx Interrupt Coalescing for ring 2 */
+#define FEC_RXIC0 0x100 /* Rx Interrupt Coalescing for ring 0 */
+#define FEC_RXIC1 0x104 /* Rx Interrupt Coalescing for ring 1 */
+#define FEC_RXIC2 0x108 /* Rx Interrupt Coalescing for ring 2 */
#define FEC_HASH_TABLE_HIGH 0x118 /* High 32bits hash table */
#define FEC_HASH_TABLE_LOW 0x11c /* Low 32bits hash table */
#define FEC_GRP_HASH_TABLE_HIGH 0x120 /* High 32bits hash table */
@@ -45,14 +51,27 @@
#define FEC_X_WMRK 0x144 /* FIFO transmit water mark */
#define FEC_R_BOUND 0x14c /* FIFO receive bound reg */
#define FEC_R_FSTART 0x150 /* FIFO receive start reg */
-#define FEC_R_DES_START 0x180 /* Receive descriptor ring */
-#define FEC_X_DES_START 0x184 /* Transmit descriptor ring */
+#define FEC_R_DES_START_1 0x160 /* Receive descriptor ring 1 */
+#define FEC_X_DES_START_1 0x164 /* Transmit descriptor ring 1 */
+#define FEC_R_DES_START_2 0x16c /* Receive descriptor ring 2 */
+#define FEC_X_DES_START_2 0x170 /* Transmit descriptor ring 2 */
+#define FEC_R_DES_START_0 0x180 /* Receive descriptor ring */
+#define FEC_X_DES_START_0 0x184 /* Transmit descriptor ring */
#define FEC_R_BUFF_SIZE 0x188 /* Maximum receive buff size */
#define FEC_R_FIFO_RSFL 0x190 /* Receive FIFO section full threshold */
#define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */
#define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */
#define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */
#define FEC_RACC 0x1C4 /* Receive Accelerator function */
+#define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */
+#define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */
+#define FEC_DMA_CFG_1 0x1d8 /* DMA class configuration for ring 1 */
+#define FEC_DMA_CFG_2 0x1dc /* DMA class Configuration for ring 2 */
+#define FEC_R_DES_ACTIVE_1 0x1e0 /* Rx descriptor active for ring 1 */
+#define FEC_X_DES_ACTIVE_1 0x1e4 /* Tx descriptor active for ring 1 */
+#define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */
+#define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */
+#define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
@@ -121,8 +140,12 @@
#define FEC_IEVENT 0x004 /* Interrupt even reg */
#define FEC_IMASK 0x008 /* Interrupt mask reg */
#define FEC_IVEC 0x00c /* Interrupt vec status reg */
-#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
-#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
+#define FEC_R_DES_ACTIVE_0 0x010 /* Receive descriptor reg */
+#define FEC_R_DES_ACTIVE_1 FEC_R_DES_ACTIVE_0
+#define FEC_R_DES_ACTIVE_2 FEC_R_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_0 0x014 /* Transmit descriptor reg */
+#define FEC_X_DES_ACTIVE_1 FEC_X_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_2 FEC_X_DES_ACTIVE_0
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */
@@ -136,11 +159,27 @@
#define FEC_ADDR_HIGH 0x3c4 /* High 16bits MAC address */
#define FEC_GRP_HASH_TABLE_HIGH 0x3c8 /* High 32bits hash table */
#define FEC_GRP_HASH_TABLE_LOW 0x3cc /* Low 32bits hash table */
-#define FEC_R_DES_START 0x3d0 /* Receive descriptor ring */
-#define FEC_X_DES_START 0x3d4 /* Transmit descriptor ring */
+#define FEC_R_DES_START_0 0x3d0 /* Receive descriptor ring */
+#define FEC_R_DES_START_1 FEC_R_DES_START_0
+#define FEC_R_DES_START_2 FEC_R_DES_START_0
+#define FEC_X_DES_START_0 0x3d4 /* Transmit descriptor ring */
+#define FEC_X_DES_START_1 FEC_X_DES_START_0
+#define FEC_X_DES_START_2 FEC_X_DES_START_0
#define FEC_R_BUFF_SIZE 0x3d8 /* Maximum receive buff size */
#define FEC_FIFO_RAM 0x400 /* FIFO RAM buffer */
-
+/* Not existed in real chip
+ * Just for pass build.
+ */
+#define FEC_RCMR_1 0xFFF
+#define FEC_RCMR_2 0xFFF
+#define FEC_DMA_CFG_1 0xFFF
+#define FEC_DMA_CFG_2 0xFFF
+#define FEC_TXIC0 0xFFF
+#define FEC_TXIC1 0xFFF
+#define FEC_TXIC2 0xFFF
+#define FEC_RXIC0 0xFFF
+#define FEC_RXIC1 0xFFF
+#define FEC_RXIC2 0xFFF
#endif /* CONFIG_M5272 */
@@ -233,6 +272,44 @@ struct bufdesc_ex {
/* This device has up to three irqs on some platforms */
#define FEC_IRQ_NUM 3
+/* Maximum number of queues supported
+ * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
+ * User can point the queue number that is less than or equal to 3.
+ */
+#define FEC_ENET_MAX_TX_QS 3
+#define FEC_ENET_MAX_RX_QS 3
+
+#define FEC_R_DES_START(X) ((X == 1) ? FEC_R_DES_START_1 : \
+ ((X == 2) ? \
+ FEC_R_DES_START_2 : FEC_R_DES_START_0))
+#define FEC_X_DES_START(X) ((X == 1) ? FEC_X_DES_START_1 : \
+ ((X == 2) ? \
+ FEC_X_DES_START_2 : FEC_X_DES_START_0))
+#define FEC_R_DES_ACTIVE(X) ((X == 1) ? FEC_R_DES_ACTIVE_1 : \
+ ((X == 2) ? \
+ FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0))
+#define FEC_X_DES_ACTIVE(X) ((X == 1) ? FEC_X_DES_ACTIVE_1 : \
+ ((X == 2) ? \
+ FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0))
+
+#define FEC_DMA_CFG(X) ((X == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
+
+#define DMA_CLASS_EN (1 << 16)
+#define FEC_RCMR(X) ((X == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
+#define IDLE_SLOPE_MASK 0xFFFF
+#define IDLE_SLOPE_1 0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE_2 0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE(X) ((X == 1) ? (IDLE_SLOPE_1 & IDLE_SLOPE_MASK) : \
+ (IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
+#define RCMR_MATCHEN (0x1 << 16)
+#define RCMR_CMP_CFG(v, n) ((v & 0x7) << (n << 2))
+#define RCMR_CMP_1 (RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
+ RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
+#define RCMR_CMP_2 (RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
+ RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
+#define RCMR_CMP(X) ((X == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
+#define FEC_TX_BD_FTYPE(X) ((X & 0xF) << 20)
+
/* The number of Tx and Rx buffers. These are allocated from the page
* pool. The code may assume these are power of two, so it it best
* to keep them that size.
@@ -240,7 +317,7 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
-#define FEC_ENET_RX_PAGES 8
+#define FEC_ENET_RX_PAGES 256
#define FEC_ENET_RX_FRSIZE 2048
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
@@ -256,6 +333,119 @@ struct bufdesc_ex {
#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
#define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+/* Interrupt events/masks. */
+#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
+#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
+#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
+#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
+#define FEC_ENET_TXF_0 ((uint)0x08000000) /* Full frame transmitted */
+#define FEC_ENET_TXF_1 ((uint)0x00000008) /* Full frame transmitted */
+#define FEC_ENET_TXF_2 ((uint)0x00000080) /* Full frame transmitted */
+#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
+#define FEC_ENET_RXF_0 ((uint)0x02000000) /* Full frame received */
+#define FEC_ENET_RXF_1 ((uint)0x00000002) /* Full frame received */
+#define FEC_ENET_RXF_2 ((uint)0x00000020) /* Full frame received */
+#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
+#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
+#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
+#define FEC_ENET_TXF (FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
+#define FEC_ENET_RXF (FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
+#define FEC_ENET_TS_TIMER ((uint)0x00008000)
+
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
+#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
+
+/* ENET interrupt coalescing macro define */
+#define FEC_ITR_CLK_SEL (0x1 << 30)
+#define FEC_ITR_EN (0x1 << 31)
+#define FEC_ITR_ICFT(X) ((X & 0xFF) << 20)
+#define FEC_ITR_ICTT(X) ((X) & 0xFFFF)
+#define FEC_ITR_ICFT_DEFAULT 200 /* Set 200 frame count threshold */
+#define FEC_ITR_ICTT_DEFAULT 1000 /* Set 1000us timer threshold */
+
+#define FEC_VLAN_TAG_LEN 0x04
+#define FEC_ETHTYPE_LEN 0x02
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC (1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME (1 << 1)
+/* Controller uses gasket */
+#define FEC_QUIRK_USE_GASKET (1 << 2)
+/* Controller has GBIT support */
+#define FEC_QUIRK_HAS_GBIT (1 << 3)
+/* Controller has extend desc buffer */
+#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
+/* Controller has hardware checksum support */
+#define FEC_QUIRK_HAS_CSUM (1 << 5)
+/* Controller has hardware vlan support */
+#define FEC_QUIRK_HAS_VLAN (1 << 6)
+/* ENET IP errata ERR006358
+ *
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+ * detected as not set during a prior frame transmission, then the
+ * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+ * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+ * frames not being transmitted until there is a 0-to-1 transition on
+ * ENET_TDAR[TDAR].
+ */
+#define FEC_QUIRK_ERR006358 (1 << 7)
+/* ENET IP hw AVB
+ *
+ * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
+ * - Two class indicators on receive with configurable priority
+ * - Two class indicators and line speed timer on transmit allowing
+ * implementation class credit based shapers externally
+ * - Additional DMA registers provisioned to allow managing up to 3
+ * independent rings
+ */
+#define FEC_QUIRK_HAS_AVB (1 << 8)
+/* There is a TDAR race condition for mutliQ when the software sets TDAR
+ * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
+ * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
+ * The issue exist at i.MX6SX enet IP.
+ */
+#define FEC_QUIRK_ERR007885 (1 << 9)
+/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue:
+ * After set ENET_ATCR[Capture], there need some time cycles before the counter
+ * value is capture in the register clock domain.
+ * The wait-time-cycles is at least 6 clock cycles of the slower clock between
+ * the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz,
+ * register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns
+ * (40ns * 6).
+ */
+#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
+
+struct fec_enet_priv_tx_q {
+ int index;
+ unsigned char *tx_bounce[TX_RING_SIZE];
+ struct sk_buff *tx_skbuff[TX_RING_SIZE];
+
+ dma_addr_t bd_dma;
+ struct bufdesc *tx_bd_base;
+ uint tx_ring_size;
+
+ unsigned short tx_stop_threshold;
+ unsigned short tx_wake_threshold;
+
+ struct bufdesc *cur_tx;
+ struct bufdesc *dirty_tx;
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
+};
+
+struct fec_enet_priv_rx_q {
+ int index;
+ struct sk_buff *rx_skbuff[RX_RING_SIZE];
+
+ dma_addr_t bd_dma;
+ struct bufdesc *rx_bd_base;
+ uint rx_ring_size;
+
+ struct bufdesc *cur_rx;
+};
+
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
@@ -272,36 +462,28 @@ struct fec_enet_private {
struct clk *clk_ipg;
struct clk *clk_ahb;
+ struct clk *clk_ref;
struct clk *clk_enet_out;
struct clk *clk_ptp;
bool ptp_clk_on;
struct mutex ptp_clk_mutex;
+ unsigned int num_tx_queues;
+ unsigned int num_rx_queues;
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
- unsigned char *tx_bounce[TX_RING_SIZE];
- struct sk_buff *tx_skbuff[TX_RING_SIZE];
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
+ struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
- /* CPM dual port RAM relative addresses */
- dma_addr_t bd_dma;
- /* Address of Rx and Tx buffers */
- struct bufdesc *rx_bd_base;
- struct bufdesc *tx_bd_base;
- /* The next free ring entry */
- struct bufdesc *cur_rx, *cur_tx;
- /* The ring entries to be free()ed */
- struct bufdesc *dirty_tx;
+ unsigned int total_tx_ring_size;
+ unsigned int total_rx_ring_size;
- unsigned short bufdesc_size;
- unsigned short tx_ring_size;
- unsigned short rx_ring_size;
- unsigned short tx_stop_threshold;
- unsigned short tx_wake_threshold;
+ unsigned long work_tx;
+ unsigned long work_rx;
+ unsigned long work_ts;
+ unsigned long work_mdio;
- /* Software TSO */
- char *tso_hdrs;
- dma_addr_t tso_hdrs_dma;
+ unsigned short bufdesc_size;
struct platform_device *pdev;
@@ -340,12 +522,34 @@ struct fec_enet_private {
int hwts_tx_en;
struct delayed_work time_keep;
struct regulator *reg_phy;
+
+ unsigned int tx_align;
+ unsigned int rx_align;
+
+ /* hw interrupt coalesce */
+ unsigned int rx_pkts_itr;
+ unsigned int rx_time_itr;
+ unsigned int tx_pkts_itr;
+ unsigned int tx_time_itr;
+ unsigned int itr_clk_rate;
+
+ u32 rx_copybreak;
+
+ /* ptp clock period in ns*/
+ unsigned int ptp_inc;
+
+ /* pps */
+ int pps_channel;
+ unsigned int reload_period;
+ int pps_enable;
+ unsigned int next_counter;
};
void fec_ptp_init(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+uint fec_ptp_check_pps_event(struct fec_enet_private *fep);
/****************************************************************************/
#endif /* FEC_H */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 89355a719625..50a851db2852 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -57,21 +57,19 @@
#include <linux/regulator/consumer.h>
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/prefetch.h>
#include <asm/cacheflush.h>
#include "fec.h"
static void set_multicast_list(struct net_device *ndev);
-
-#if defined(CONFIG_ARM)
-#define FEC_ALIGNMENT 0xf
-#else
-#define FEC_ALIGNMENT 0x3
-#endif
+static void fec_enet_itr_coal_init(struct net_device *ndev);
#define DRIVER_NAME "fec"
+#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
+
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
@@ -80,31 +78,6 @@ static void set_multicast_list(struct net_device *ndev);
#define FEC_ENET_RAFL_V 0x8
#define FEC_ENET_OPD_V 0xFFF0
-/* Controller is ENET-MAC */
-#define FEC_QUIRK_ENET_MAC (1 << 0)
-/* Controller needs driver to swap frame */
-#define FEC_QUIRK_SWAP_FRAME (1 << 1)
-/* Controller uses gasket */
-#define FEC_QUIRK_USE_GASKET (1 << 2)
-/* Controller has GBIT support */
-#define FEC_QUIRK_HAS_GBIT (1 << 3)
-/* Controller has extend desc buffer */
-#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
-/* Controller has hardware checksum support */
-#define FEC_QUIRK_HAS_CSUM (1 << 5)
-/* Controller has hardware vlan support */
-#define FEC_QUIRK_HAS_VLAN (1 << 6)
-/* ENET IP errata ERR006358
- *
- * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
- * detected as not set during a prior frame transmission, then the
- * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
- * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
- * frames not being transmitted until there is a 0-to-1 transition on
- * ENET_TDAR[TDAR].
- */
-#define FEC_QUIRK_ERR006358 (1 << 7)
-
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -128,6 +101,12 @@ static struct platform_device_id fec_devtype[] = {
.name = "mvf600-fec",
.driver_data = FEC_QUIRK_ENET_MAC,
}, {
+ .name = "imx6sx-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+ }, {
/* sentinel */
}
};
@@ -139,6 +118,7 @@ enum imx_fec_type {
IMX28_FEC,
IMX6Q_FEC,
MVF600_FEC,
+ IMX6SX_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -147,6 +127,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -175,21 +156,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-/* Interrupt events/masks. */
-#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
-#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
-#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
-#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
-#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
-#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
-#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
-#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
-#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
-#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
-
-#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
-#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
-
/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
*/
#define PKT_MAXBUF_SIZE 1522
@@ -230,6 +196,8 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_PAUSE_FLAG_AUTONEG 0x1
#define FEC_PAUSE_FLAG_ENABLE 0x2
+#define COPYBREAK_DEFAULT 256
+
#define TSO_HEADER_SIZE 128
/* Max number of allowed TCP segments for software TSO */
#define FEC_MAX_TSO_SEGS 100
@@ -242,22 +210,26 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
static int mii_cnt;
static inline
-struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
+struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
+ struct fec_enet_private *fep,
+ int queue_id)
{
struct bufdesc *new_bd = bdp + 1;
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
struct bufdesc_ex *ex_base;
struct bufdesc *base;
int ring_size;
- if (bdp >= fep->tx_bd_base) {
- base = fep->tx_bd_base;
- ring_size = fep->tx_ring_size;
- ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ if (bdp >= txq->tx_bd_base) {
+ base = txq->tx_bd_base;
+ ring_size = txq->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
} else {
- base = fep->rx_bd_base;
- ring_size = fep->rx_ring_size;
- ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ base = rxq->rx_bd_base;
+ ring_size = rxq->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
}
if (fep->bufdesc_ex)
@@ -269,22 +241,26 @@ struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_priva
}
static inline
-struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
+struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
+ struct fec_enet_private *fep,
+ int queue_id)
{
struct bufdesc *new_bd = bdp - 1;
struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
+ struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id];
+ struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id];
struct bufdesc_ex *ex_base;
struct bufdesc *base;
int ring_size;
- if (bdp >= fep->tx_bd_base) {
- base = fep->tx_bd_base;
- ring_size = fep->tx_ring_size;
- ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ if (bdp >= txq->tx_bd_base) {
+ base = txq->tx_bd_base;
+ ring_size = txq->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)txq->tx_bd_base;
} else {
- base = fep->rx_bd_base;
- ring_size = fep->rx_ring_size;
- ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ base = rxq->rx_bd_base;
+ ring_size = rxq->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)rxq->rx_bd_base;
}
if (fep->bufdesc_ex)
@@ -300,14 +276,15 @@ static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
}
-static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
+static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep,
+ struct fec_enet_priv_tx_q *txq)
{
int entries;
- entries = ((const char *)fep->dirty_tx -
- (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
+ entries = ((const char *)txq->dirty_tx -
+ (const char *)txq->cur_tx) / fep->bufdesc_size - 1;
- return entries > 0 ? entries : entries + fep->tx_ring_size;
+ return entries > 0 ? entries : entries + txq->tx_ring_size;
}
static void *swap_buffer(void *bufaddr, int len)
@@ -324,22 +301,26 @@ static void *swap_buffer(void *bufaddr, int len)
static void fec_dump(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct bufdesc *bdp = fep->tx_bd_base;
- unsigned int index = 0;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+ int index = 0;
netdev_info(ndev, "TX ring dump\n");
pr_info("Nr SC addr len SKB\n");
+ txq = fep->tx_queue[0];
+ bdp = txq->tx_bd_base;
+
do {
pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
index,
- bdp == fep->cur_tx ? 'S' : ' ',
- bdp == fep->dirty_tx ? 'H' : ' ',
+ bdp == txq->cur_tx ? 'S' : ' ',
+ bdp == txq->dirty_tx ? 'H' : ' ',
bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
- fep->tx_skbuff[index]);
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ txq->tx_skbuff[index]);
+ bdp = fec_enet_get_nextdesc(bdp, fep, 0);
index++;
- } while (bdp != fep->tx_bd_base);
+ } while (bdp != txq->tx_bd_base);
}
static inline bool is_ipv4_pkt(struct sk_buff *skb)
@@ -365,14 +346,17 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
}
static int
-fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
+fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb,
+ struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc *bdp = fep->cur_tx;
+ struct bufdesc *bdp = txq->cur_tx;
struct bufdesc_ex *ebdp;
int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned short queue = skb_get_queue_mapping(skb);
int frag, frag_len;
unsigned short status;
unsigned int estatus = 0;
@@ -384,7 +368,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
for (frag = 0; frag < nr_frags; frag++) {
this_frag = &skb_shinfo(skb)->frags[frag];
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
ebdp = (struct bufdesc_ex *)bdp;
status = bdp->cbd_sc;
@@ -404,6 +388,8 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
}
if (fep->bufdesc_ex) {
+ if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
@@ -412,11 +398,11 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], bufaddr, frag_len);
- bufaddr = fep->tx_bounce[index];
+ memcpy(txq->tx_bounce[index], bufaddr, frag_len);
+ bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, frag_len);
@@ -436,21 +422,22 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
bdp->cbd_sc = status;
}
- fep->cur_tx = bdp;
+ txq->cur_tx = bdp;
return 0;
dma_mapping_error:
- bdp = fep->cur_tx;
+ bdp = txq->cur_tx;
for (i = 0; i < frag; i++) {
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE);
}
return NETDEV_TX_OK;
}
-static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
+static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
@@ -461,12 +448,13 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
dma_addr_t addr;
unsigned short status;
unsigned short buflen;
+ unsigned short queue;
unsigned int estatus = 0;
unsigned int index;
int entries_free;
int ret;
- entries_free = fec_enet_get_free_txdesc_num(fep);
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
if (entries_free < MAX_SKB_FRAGS + 1) {
dev_kfree_skb_any(skb);
if (net_ratelimit())
@@ -481,7 +469,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
}
/* Fill in a Tx ring entry */
- bdp = fep->cur_tx;
+ bdp = txq->cur_tx;
status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS;
@@ -489,11 +477,12 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
bufaddr = skb->data;
buflen = skb_headlen(skb);
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ queue = skb_get_queue_mapping(skb);
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], skb->data, buflen);
- bufaddr = fep->tx_bounce[index];
+ memcpy(txq->tx_bounce[index], skb->data, buflen);
+ bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, buflen);
@@ -509,7 +498,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
}
if (nr_frags) {
- ret = fec_enet_txq_submit_frag_skb(skb, ndev);
+ ret = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
if (ret)
return ret;
} else {
@@ -530,6 +519,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
fep->hwts_tx_en))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(queue);
+
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -537,10 +529,10 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc = estatus;
}
- last_bdp = fep->cur_tx;
- index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
+ last_bdp = txq->cur_tx;
+ index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
/* Save skb pointer */
- fep->tx_skbuff[index] = skb;
+ txq->tx_skbuff[index] = skb;
bdp->cbd_datlen = buflen;
bdp->cbd_bufaddr = addr;
@@ -552,27 +544,29 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
bdp->cbd_sc = status;
/* If this was the last BD in the ring, start at the beginning again. */
- bdp = fec_enet_get_nextdesc(last_bdp, fep);
+ bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
skb_tx_timestamp(skb);
- fep->cur_tx = bdp;
+ txq->cur_tx = bdp;
/* Trigger transmission start */
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
return 0;
}
static int
-fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
- struct bufdesc *bdp, int index, char *data,
- int size, bool last_tcp, bool is_last)
+fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
+ struct net_device *ndev,
+ struct bufdesc *bdp, int index, char *data,
+ int size, bool last_tcp, bool is_last)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
+ unsigned short queue = skb_get_queue_mapping(skb);
unsigned short status;
unsigned int estatus = 0;
dma_addr_t addr;
@@ -582,10 +576,10 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- if (((unsigned long) data) & FEC_ALIGNMENT ||
+ if (((unsigned long) data) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], data, size);
- data = fep->tx_bounce[index];
+ memcpy(txq->tx_bounce[index], data, size);
+ data = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, size);
@@ -603,6 +597,8 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
bdp->cbd_bufaddr = addr;
if (fep->bufdesc_ex) {
+ if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
@@ -624,14 +620,16 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
}
static int
-fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
- struct bufdesc *bdp, int index)
+fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
+ unsigned short queue = skb_get_queue_mapping(skb);
void *bufaddr;
unsigned long dmabuf;
unsigned short status;
@@ -641,12 +639,12 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
status &= ~BD_ENET_TX_STATS;
status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
- bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
- dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
+ dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
+ if (((unsigned long)bufaddr) & fep->tx_align ||
id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
- memcpy(fep->tx_bounce[index], skb->data, hdr_len);
- bufaddr = fep->tx_bounce[index];
+ memcpy(txq->tx_bounce[index], skb->data, hdr_len);
+ bufaddr = txq->tx_bounce[index];
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(bufaddr, hdr_len);
@@ -665,6 +663,8 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
bdp->cbd_datlen = hdr_len;
if (fep->bufdesc_ex) {
+ if (id_entry->driver_data & FEC_QUIRK_HAS_AVB)
+ estatus |= FEC_TX_BD_FTYPE(queue);
if (skb->ip_summed == CHECKSUM_PARTIAL)
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
ebdp->cbd_bdu = 0;
@@ -676,17 +676,22 @@ fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
return 0;
}
-static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
+static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
+ struct sk_buff *skb,
+ struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
int total_len, data_left;
- struct bufdesc *bdp = fep->cur_tx;
+ struct bufdesc *bdp = txq->cur_tx;
+ unsigned short queue = skb_get_queue_mapping(skb);
struct tso_t tso;
unsigned int index = 0;
int ret;
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
- if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) {
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "NOT enough BD for TSO!\n");
@@ -706,14 +711,14 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
while (total_len > 0) {
char *hdr;
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
total_len -= data_left;
/* prepare packet headers: MAC + IP + TCP */
- hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
+ hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
+ ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
if (ret)
goto err_release;
@@ -721,10 +726,13 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
int size;
size = min_t(int, tso.size, data_left);
- bdp = fec_enet_get_nextdesc(bdp, fep);
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
- ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
- size, size == data_left,
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
+ index = fec_enet_get_bd_index(txq->tx_bd_base,
+ bdp, fep);
+ ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
+ bdp, index,
+ tso.data, size,
+ size == data_left,
total_len == 0);
if (ret)
goto err_release;
@@ -733,17 +741,22 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
tso_build_data(skb, &tso, size);
}
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
}
/* Save skb pointer */
- fep->tx_skbuff[index] = skb;
+ txq->tx_skbuff[index] = skb;
skb_tx_timestamp(skb);
- fep->cur_tx = bdp;
+ txq->cur_tx = bdp;
/* Trigger transmission start */
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ if (!(id_entry->driver_data & FEC_QUIRK_ERR007885) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) ||
+ !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)))
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue));
return 0;
@@ -757,18 +770,25 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
int entries_free;
+ unsigned short queue;
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
int ret;
+ queue = skb_get_queue_mapping(skb);
+ txq = fep->tx_queue[queue];
+ nq = netdev_get_tx_queue(ndev, queue);
+
if (skb_is_gso(skb))
- ret = fec_enet_txq_submit_tso(skb, ndev);
+ ret = fec_enet_txq_submit_tso(txq, skb, ndev);
else
- ret = fec_enet_txq_submit_skb(skb, ndev);
+ ret = fec_enet_txq_submit_skb(txq, skb, ndev);
if (ret)
return ret;
- entries_free = fec_enet_get_free_txdesc_num(fep);
- if (entries_free <= fep->tx_stop_threshold)
- netif_stop_queue(ndev);
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ if (entries_free <= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
return NETDEV_TX_OK;
}
@@ -778,46 +798,111 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static void fec_enet_bd_init(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned int i;
+ unsigned int q;
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < fep->rx_ring_size; i++) {
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ /* Initialize the receive buffer descriptors. */
+ rxq = fep->rx_queue[q];
+ bdp = rxq->rx_bd_base;
- /* Initialize the BD for every fragment in the page. */
- if (bdp->cbd_bufaddr)
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- else
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ if (bdp->cbd_bufaddr)
+ bdp->cbd_sc = BD_ENET_RX_EMPTY;
+ else
+ bdp->cbd_sc = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ rxq->cur_rx = rxq->rx_bd_base;
+ }
+
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ /* ...and the same for transmit */
+ txq = fep->tx_queue[q];
+ bdp = txq->tx_bd_base;
+ txq->cur_tx = bdp;
+
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ if (txq->tx_skbuff[i]) {
+ dev_kfree_skb_any(txq->tx_skbuff[i]);
+ txq->tx_skbuff[i] = NULL;
+ }
+ bdp->cbd_bufaddr = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep, q);
+ bdp->cbd_sc |= BD_SC_WRAP;
+ txq->dirty_tx = bdp;
}
+}
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep);
- bdp->cbd_sc |= BD_SC_WRAP;
+static void fec_enet_active_rxring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
- fep->cur_rx = fep->rx_bd_base;
+ for (i = 0; i < fep->num_rx_queues; i++)
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(i));
+}
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- fep->cur_tx = bdp;
- for (i = 0; i < fep->tx_ring_size; i++) {
+static void fec_enet_enable_ring(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ int i;
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- if (fep->tx_skbuff[i]) {
- dev_kfree_skb_any(fep->tx_skbuff[i]);
- fep->tx_skbuff[i] = NULL;
- }
- bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(RCMR_MATCHEN | RCMR_CMP(i),
+ fep->hwp + FEC_RCMR(i));
}
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep);
- bdp->cbd_sc |= BD_SC_WRAP;
- fep->dirty_tx = bdp;
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i));
+
+ /* enable DMA1/2 */
+ if (i)
+ writel(DMA_CLASS_EN | IDLE_SLOPE(i),
+ fep->hwp + FEC_DMA_CFG(i));
+ }
+}
+
+static void fec_enet_reset_skb(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct fec_enet_priv_tx_q *txq;
+ int i, j;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+
+ for (j = 0; j < txq->tx_ring_size; j++) {
+ if (txq->tx_skbuff[j]) {
+ dev_kfree_skb_any(txq->tx_skbuff[j]);
+ txq->tx_skbuff[j] = NULL;
+ }
+ }
+ }
}
/*
@@ -831,15 +916,21 @@ fec_restart(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- int i;
u32 val;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+ if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
/*
* enet-mac reset will reset mac address registers too,
@@ -859,22 +950,10 @@ fec_restart(struct net_device *ndev)
fec_enet_bd_init(ndev);
- /* Set receive and transmit descriptor base. */
- writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
- if (fep->bufdesc_ex)
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
- * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
- else
- writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
- * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
-
+ fec_enet_enable_ring(ndev);
- for (i = 0; i <= TX_RING_MOD_MASK; i++) {
- if (fep->tx_skbuff[i]) {
- dev_kfree_skb_any(fep->tx_skbuff[i]);
- fep->tx_skbuff[i] = NULL;
- }
- }
+ /* Reset tx SKB buffers. */
+ fec_enet_reset_skb(ndev);
/* Enable MII mode */
if (fep->full_duplex == DUPLEX_FULL) {
@@ -996,13 +1075,17 @@ fec_restart(struct net_device *ndev)
/* And last, enable the transmit and receive processing */
writel(ecntl, fep->hwp + FEC_ECNTRL);
- writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+ fec_enet_active_rxring(ndev);
if (fep->bufdesc_ex)
fec_ptp_start_cyclecounter(ndev);
/* Enable interrupts we wish to service */
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+
+ /* Init the interrupt coalescing */
+ fec_enet_itr_coal_init(ndev);
+
}
static void
@@ -1021,9 +1104,16 @@ fec_stop(struct net_device *ndev)
netdev_err(ndev, "Graceful transmit stop did not complete!\n");
}
- /* Whack a reset. We should wait for this. */
- writel(1, fep->hwp + FEC_ECNTRL);
- udelay(10);
+ /* Whack a reset. We should wait for this.
+ * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+ * instead of reset MAC itself.
+ */
+ if (id_entry && id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ writel(0, fep->hwp + FEC_ECNTRL);
+ } else {
+ writel(1, fep->hwp + FEC_ECNTRL);
+ udelay(10);
+ }
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
@@ -1081,37 +1171,45 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
}
static void
-fec_enet_tx(struct net_device *ndev)
+fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
{
struct fec_enet_private *fep;
struct bufdesc *bdp;
unsigned short status;
struct sk_buff *skb;
+ struct fec_enet_priv_tx_q *txq;
+ struct netdev_queue *nq;
int index = 0;
int entries_free;
fep = netdev_priv(ndev);
- bdp = fep->dirty_tx;
+
+ queue_id = FEC_ENET_GET_QUQUE(queue_id);
+
+ txq = fep->tx_queue[queue_id];
+ /* get next bdp of dirty_tx */
+ nq = netdev_get_tx_queue(ndev, queue_id);
+ bdp = txq->dirty_tx;
/* get next bdp of dirty_tx */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
/* current queue is empty */
- if (bdp == fep->cur_tx)
+ if (bdp == txq->cur_tx)
break;
- index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
- skb = fep->tx_skbuff[index];
- fep->tx_skbuff[index] = NULL;
- if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
+ skb = txq->tx_skbuff[index];
+ txq->tx_skbuff[index] = NULL;
+ if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
bdp->cbd_datlen, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
continue;
}
@@ -1153,23 +1251,81 @@ fec_enet_tx(struct net_device *ndev)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
- fep->dirty_tx = bdp;
+ txq->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
/* Since we have freed up a buffer, the ring is no longer full
*/
if (netif_queue_stopped(ndev)) {
- entries_free = fec_enet_get_free_txdesc_num(fep);
- if (entries_free >= fep->tx_wake_threshold)
- netif_wake_queue(ndev);
+ entries_free = fec_enet_get_free_txdesc_num(fep, txq);
+ if (entries_free >= txq->tx_wake_threshold)
+ netif_tx_wake_queue(nq);
}
}
/* ERR006538: Keep the transmitter going */
- if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
- writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ if (bdp != txq->cur_tx &&
+ readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0)
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id));
+}
+
+static void
+fec_enet_tx(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ u16 queue_id;
+ /* First process class A queue, then Class B and Best Effort queue */
+ for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
+ clear_bit(queue_id, &fep->work_tx);
+ fec_enet_tx_queue(ndev, queue_id);
+ }
+ return;
+}
+
+static int
+fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int off;
+
+ off = ((unsigned long)skb->data) & fep->rx_align;
+ if (off)
+ skb_reserve(skb, fep->rx_align + 1 - off);
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "Rx DMA memory map failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+ struct bufdesc *bdp, u32 length)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct sk_buff *new_skb;
+
+ if (length > fep->rx_copybreak)
+ return false;
+
+ new_skb = netdev_alloc_skb(ndev, length);
+ if (!new_skb)
+ return false;
+
+ dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ memcpy(new_skb->data, (*skb)->data, length);
+ *skb = new_skb;
+
+ return true;
}
/* During a receive, the cur_rx points to the current incoming buffer.
@@ -1178,14 +1334,16 @@ fec_enet_tx(struct net_device *ndev)
* effectively tossing the packet.
*/
static int
-fec_enet_rx(struct net_device *ndev, int budget)
+fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
+ struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
- struct sk_buff *skb;
+ struct sk_buff *skb_new = NULL;
+ struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
int pkt_received = 0;
@@ -1193,15 +1351,18 @@ fec_enet_rx(struct net_device *ndev, int budget)
bool vlan_packet_rcvd = false;
u16 vlan_tag;
int index = 0;
+ bool is_copybreak;
#ifdef CONFIG_M532x
flush_cache_all();
#endif
+ queue_id = FEC_ENET_GET_QUQUE(queue_id);
+ rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
*/
- bdp = fep->cur_rx;
+ bdp = rxq->cur_rx;
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
@@ -1215,7 +1376,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
if ((status & BD_ENET_RX_LAST) == 0)
netdev_err(ndev, "rcv is not +last\n");
- writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
/* Check for errors. */
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
@@ -1248,11 +1408,28 @@ fec_enet_rx(struct net_device *ndev, int budget)
pkt_len = bdp->cbd_datlen;
ndev->stats.rx_bytes += pkt_len;
- index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
- data = fep->rx_skbuff[index]->data;
- dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
+ skb = rxq->rx_skbuff[index];
+
+ /* The packet length includes FCS, but we don't want to
+ * include that when passing upstream as it messes up
+ * bridging applications.
+ */
+ is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4);
+ if (!is_copybreak) {
+ skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
+ if (unlikely(!skb_new)) {
+ ndev->stats.rx_dropped++;
+ goto rx_processing_done;
+ }
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ }
+ prefetch(skb->data - NET_IP_ALIGN);
+ skb_put(skb, pkt_len - 4);
+ data = skb->data;
if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
swap_buffer(data, pkt_len);
@@ -1264,66 +1441,53 @@ fec_enet_rx(struct net_device *ndev, int budget)
/* If this is a VLAN packet remove the VLAN Tag */
vlan_packet_rcvd = false;
if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
+ fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
/* Push and remove the vlan tag */
struct vlan_hdr *vlan_header =
(struct vlan_hdr *) (data + ETH_HLEN);
vlan_tag = ntohs(vlan_header->h_vlan_TCI);
- pkt_len -= VLAN_HLEN;
vlan_packet_rcvd = true;
+
+ skb_copy_to_linear_data_offset(skb, VLAN_HLEN,
+ data, (2 * ETH_ALEN));
+ skb_pull(skb, VLAN_HLEN);
}
- /* This does 16 byte alignment, exactly what we need.
- * The packet length includes FCS, but we don't want to
- * include that when passing upstream as it messes up
- * bridging applications.
- */
- skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
+ skb->protocol = eth_type_trans(skb, ndev);
- if (unlikely(!skb)) {
- ndev->stats.rx_dropped++;
- } else {
- int payload_offset = (2 * ETH_ALEN);
- skb_reserve(skb, NET_IP_ALIGN);
- skb_put(skb, pkt_len - 4); /* Make room */
-
- /* Extract the frame data without the VLAN header. */
- skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN));
- if (vlan_packet_rcvd)
- payload_offset = (2 * ETH_ALEN) + VLAN_HLEN;
- skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN),
- data + payload_offset,
- pkt_len - 4 - (2 * ETH_ALEN));
-
- skb->protocol = eth_type_trans(skb, ndev);
-
- /* Get receive timestamp from the skb */
- if (fep->hwts_rx_en && fep->bufdesc_ex)
- fec_enet_hwtstamp(fep, ebdp->ts,
- skb_hwtstamps(skb));
-
- if (fep->bufdesc_ex &&
- (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
- if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
- /* don't check it */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb_checksum_none_assert(skb);
- }
+ /* Get receive timestamp from the skb */
+ if (fep->hwts_rx_en && fep->bufdesc_ex)
+ fec_enet_hwtstamp(fep, ebdp->ts,
+ skb_hwtstamps(skb));
+
+ if (fep->bufdesc_ex &&
+ (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
+ if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
+ /* don't check it */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
}
+ }
+
+ /* Handle received VLAN packets */
+ if (vlan_packet_rcvd)
+ __vlan_hwaccel_put_tag(skb,
+ htons(ETH_P_8021Q),
+ vlan_tag);
- /* Handle received VLAN packets */
- if (vlan_packet_rcvd)
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
+ napi_gro_receive(&fep->napi, skb);
- napi_gro_receive(&fep->napi, skb);
+ if (is_copybreak) {
+ dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ } else {
+ rxq->rx_skbuff[index] = skb_new;
+ fec_enet_new_rxbdp(ndev, bdp, skb_new);
}
- dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -1341,19 +1505,56 @@ rx_processing_done:
}
/* Update BD pointer to next entry */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
- writel(0, fep->hwp + FEC_R_DES_ACTIVE);
+ writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id));
}
- fep->cur_rx = bdp;
+ rxq->cur_rx = bdp;
+ return pkt_received;
+}
+static int
+fec_enet_rx(struct net_device *ndev, int budget)
+{
+ int pkt_received = 0;
+ u16 queue_id;
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
+ clear_bit(queue_id, &fep->work_rx);
+ pkt_received += fec_enet_rx_queue(ndev,
+ budget - pkt_received, queue_id);
+ }
return pkt_received;
}
+static bool
+fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
+{
+ if (int_events == 0)
+ return false;
+
+ if (int_events & FEC_ENET_RXF)
+ fep->work_rx |= (1 << 2);
+ if (int_events & FEC_ENET_RXF_1)
+ fep->work_rx |= (1 << 0);
+ if (int_events & FEC_ENET_RXF_2)
+ fep->work_rx |= (1 << 1);
+
+ if (int_events & FEC_ENET_TXF)
+ fep->work_tx |= (1 << 2);
+ if (int_events & FEC_ENET_TXF_1)
+ fep->work_tx |= (1 << 0);
+ if (int_events & FEC_ENET_TXF_2)
+ fep->work_tx |= (1 << 1);
+
+ return true;
+}
+
static irqreturn_t
fec_enet_interrupt(int irq, void *dev_id)
{
@@ -1365,6 +1566,7 @@ fec_enet_interrupt(int irq, void *dev_id)
int_events = readl(fep->hwp + FEC_IEVENT);
writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
+ fec_enet_collect_events(fep, int_events);
if (int_events & napi_mask) {
ret = IRQ_HANDLED;
@@ -1379,6 +1581,9 @@ fec_enet_interrupt(int irq, void *dev_id)
complete(&fep->mdio_done);
}
+ if (fep->ptp_clock)
+ fec_ptp_check_pps_event(fep);
+
return ret;
}
@@ -1621,6 +1826,11 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
}
mutex_unlock(&fep->ptp_clk_mutex);
}
+ if (fep->clk_ref) {
+ ret = clk_prepare_enable(fep->clk_ref);
+ if (ret)
+ goto failed_clk_ref;
+ }
} else {
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
@@ -1632,9 +1842,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
fep->ptp_clk_on = false;
mutex_unlock(&fep->ptp_clk_mutex);
}
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
}
return 0;
+
+failed_clk_ref:
+ if (fep->clk_ref)
+ clk_disable_unprepare(fep->clk_ref);
failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
@@ -1674,13 +1890,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
continue;
if (dev_id--)
continue;
- strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
if (phy_id >= PHY_MAX_ADDR) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -2062,12 +2278,179 @@ static int fec_enet_nway_reset(struct net_device *dev)
return genphy_restart_aneg(phydev);
}
+/* ITR clock source is enet system clock (clk_ahb).
+ * TCTT unit is cycle_ns * 64 cycle
+ * So, the ICTT value = X us / (cycle_ns * 64)
+ */
+static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ return us * (fep->itr_clk_rate / 64000) / 1000;
+}
+
+/* Set threshold for interrupt coalescing */
+static void fec_enet_itr_coal_set(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int rx_itr, tx_itr;
+
+ if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ return;
+
+ /* Must be greater than zero to avoid unpredictable behavior */
+ if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
+ !fep->tx_time_itr || !fep->tx_pkts_itr)
+ return;
+
+ /* Select enet system clock as Interrupt Coalescing
+ * timer Clock Source
+ */
+ rx_itr = FEC_ITR_CLK_SEL;
+ tx_itr = FEC_ITR_CLK_SEL;
+
+ /* set ICFT and ICTT */
+ rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+ rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
+ tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+ tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+
+ rx_itr |= FEC_ITR_EN;
+ tx_itr |= FEC_ITR_EN;
+
+ writel(tx_itr, fep->hwp + FEC_TXIC0);
+ writel(rx_itr, fep->hwp + FEC_RXIC0);
+ writel(tx_itr, fep->hwp + FEC_TXIC1);
+ writel(rx_itr, fep->hwp + FEC_RXIC1);
+ writel(tx_itr, fep->hwp + FEC_TXIC2);
+ writel(rx_itr, fep->hwp + FEC_RXIC2);
+}
+
+static int
+fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+
+ if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ return -EOPNOTSUPP;
+
+ ec->rx_coalesce_usecs = fep->rx_time_itr;
+ ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
+
+ ec->tx_coalesce_usecs = fep->tx_time_itr;
+ ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
+
+ return 0;
+}
+
+static int
+fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+
+ unsigned int cycle;
+
+ if (!(id_entry->driver_data & FEC_QUIRK_HAS_AVB))
+ return -EOPNOTSUPP;
+
+ if (ec->rx_max_coalesced_frames > 255) {
+ pr_err("Rx coalesced frames exceed hardware limiation");
+ return -EINVAL;
+ }
+
+ if (ec->tx_max_coalesced_frames > 255) {
+ pr_err("Tx coalesced frame exceed hardware limiation");
+ return -EINVAL;
+ }
+
+ cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+ if (cycle > 0xFFFF) {
+ pr_err("Rx coalesed usec exceeed hardware limiation");
+ return -EINVAL;
+ }
+
+ cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+ if (cycle > 0xFFFF) {
+ pr_err("Rx coalesed usec exceeed hardware limiation");
+ return -EINVAL;
+ }
+
+ fep->rx_time_itr = ec->rx_coalesce_usecs;
+ fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
+
+ fep->tx_time_itr = ec->tx_coalesce_usecs;
+ fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
+
+ fec_enet_itr_coal_set(ndev);
+
+ return 0;
+}
+
+static void fec_enet_itr_coal_init(struct net_device *ndev)
+{
+ struct ethtool_coalesce ec;
+
+ ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
+ ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
+
+ ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
+ ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
+
+ fec_enet_set_coalesce(ndev, &ec);
+}
+
+static int fec_enet_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = fep->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int fec_enet_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ fep->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
.nway_reset = fec_enet_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_coalesce = fec_enet_get_coalesce,
+ .set_coalesce = fec_enet_set_coalesce,
#ifndef CONFIG_M5272
.get_pauseparam = fec_enet_get_pauseparam,
.set_pauseparam = fec_enet_set_pauseparam,
@@ -2076,6 +2459,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_sset_count = fec_enet_get_sset_count,
#endif
.get_ts_info = fec_enet_get_ts_info,
+ .get_tunable = fec_enet_get_tunable,
+ .set_tunable = fec_enet_set_tunable,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2105,55 +2490,136 @@ static void fec_enet_free_buffers(struct net_device *ndev)
unsigned int i;
struct sk_buff *skb;
struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
+ unsigned int q;
+
+ for (q = 0; q < fep->num_rx_queues; q++) {
+ rxq = fep->rx_queue[q];
+ bdp = rxq->rx_bd_base;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
+ skb = rxq->rx_skbuff[i];
+ rxq->rx_skbuff[i] = NULL;
+ if (skb) {
+ dma_unmap_single(&fep->pdev->dev,
+ bdp->cbd_bufaddr,
+ FEC_ENET_RX_FRSIZE - fep->rx_align,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+ bdp = fec_enet_get_nextdesc(bdp, fep, q);
+ }
+ }
- bdp = fep->rx_bd_base;
- for (i = 0; i < fep->rx_ring_size; i++) {
- skb = fep->rx_skbuff[i];
- fep->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
+ for (q = 0; q < fep->num_tx_queues; q++) {
+ txq = fep->tx_queue[q];
+ bdp = txq->tx_bd_base;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ kfree(txq->tx_bounce[i]);
+ txq->tx_bounce[i] = NULL;
+ skb = txq->tx_skbuff[i];
+ txq->tx_skbuff[i] = NULL;
dev_kfree_skb(skb);
}
- bdp = fec_enet_get_nextdesc(bdp, fep);
}
+}
+
+static void fec_enet_free_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
+ txq = fep->tx_queue[i];
+ dma_free_coherent(NULL,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs,
+ txq->tso_hdrs_dma);
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ if (fep->rx_queue[i])
+ kfree(fep->rx_queue[i]);
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fep->tx_queue[i])
+ kfree(fep->tx_queue[i]);
+}
+
+static int fec_enet_alloc_queue(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int i;
+ int ret = 0;
+ struct fec_enet_priv_tx_q *txq;
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+
+ fep->tx_queue[i] = txq;
+ txq->tx_ring_size = TX_RING_SIZE;
+ fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size;
+
+ txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+ txq->tx_wake_threshold =
+ (txq->tx_ring_size - txq->tx_stop_threshold) / 2;
+
+ txq->tso_hdrs = dma_alloc_coherent(NULL,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma,
+ GFP_KERNEL);
+ if (!txq->tso_hdrs) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
+ }
+
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
+ GFP_KERNEL);
+ if (!fep->rx_queue[i]) {
+ ret = -ENOMEM;
+ goto alloc_failed;
+ }
- bdp = fep->tx_bd_base;
- for (i = 0; i < fep->tx_ring_size; i++) {
- kfree(fep->tx_bounce[i]);
- fep->tx_bounce[i] = NULL;
- skb = fep->tx_skbuff[i];
- fep->tx_skbuff[i] = NULL;
- dev_kfree_skb(skb);
+ fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE;
+ fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size;
}
+ return ret;
+
+alloc_failed:
+ fec_enet_free_queue(ndev);
+ return ret;
}
-static int fec_enet_alloc_buffers(struct net_device *ndev)
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct sk_buff *skb;
struct bufdesc *bdp;
+ struct fec_enet_priv_rx_q *rxq;
- bdp = fep->rx_bd_base;
- for (i = 0; i < fep->rx_ring_size; i++) {
- dma_addr_t addr;
-
+ rxq = fep->rx_queue[queue];
+ bdp = rxq->rx_bd_base;
+ for (i = 0; i < rxq->rx_ring_size; i++) {
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb)
goto err_alloc;
- addr = dma_map_single(&fep->pdev->dev, skb->data,
- FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&fep->pdev->dev, addr)) {
+ if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
dev_kfree_skb(skb);
- if (net_ratelimit())
- netdev_err(ndev, "Rx DMA memory map failed\n");
goto err_alloc;
}
- fep->rx_skbuff[i] = skb;
- bdp->cbd_bufaddr = addr;
+ rxq->rx_skbuff[i] = skb;
bdp->cbd_sc = BD_ENET_RX_EMPTY;
if (fep->bufdesc_ex) {
@@ -2161,17 +2627,32 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_RX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep);
+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP;
+ return 0;
- bdp = fep->tx_bd_base;
- for (i = 0; i < fep->tx_ring_size; i++) {
- fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
- if (!fep->tx_bounce[i])
+ err_alloc:
+ fec_enet_free_buffers(ndev);
+ return -ENOMEM;
+}
+
+static int
+fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+ struct bufdesc *bdp;
+ struct fec_enet_priv_tx_q *txq;
+
+ txq = fep->tx_queue[queue];
+ bdp = txq->tx_bd_base;
+ for (i = 0; i < txq->tx_ring_size; i++) {
+ txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+ if (!txq->tx_bounce[i])
goto err_alloc;
bdp->cbd_sc = 0;
@@ -2182,11 +2663,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(bdp, fep, queue);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep);
+ bdp = fec_enet_get_prevdesc(bdp, fep, queue);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -2196,6 +2677,21 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
return -ENOMEM;
}
+static int fec_enet_alloc_buffers(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned int i;
+
+ for (i = 0; i < fep->num_rx_queues; i++)
+ if (fec_enet_alloc_rxq_buffers(ndev, i))
+ return -ENOMEM;
+
+ for (i = 0; i < fep->num_tx_queues; i++)
+ if (fec_enet_alloc_txq_buffers(ndev, i))
+ return -ENOMEM;
+ return 0;
+}
+
static int
fec_enet_open(struct net_device *ndev)
{
@@ -2213,20 +2709,26 @@ fec_enet_open(struct net_device *ndev)
ret = fec_enet_alloc_buffers(ndev);
if (ret)
- return ret;
+ goto err_enet_alloc;
/* Probe and connect to PHY when open the interface */
ret = fec_enet_mii_probe(ndev);
- if (ret) {
- fec_enet_free_buffers(ndev);
- return ret;
- }
+ if (ret)
+ goto err_enet_mii_probe;
fec_restart(ndev);
napi_enable(&fep->napi);
phy_start(fep->phy_dev);
- netif_start_queue(ndev);
+ netif_tx_start_all_queues(ndev);
+
return 0;
+
+err_enet_mii_probe:
+ fec_enet_free_buffers(ndev);
+err_enet_alloc:
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+ return ret;
}
static int
@@ -2372,20 +2874,12 @@ static void fec_poll_controller(struct net_device *dev)
#endif
#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
-
-static int fec_set_features(struct net_device *netdev,
+static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
{
struct fec_enet_private *fep = netdev_priv(netdev);
netdev_features_t changed = features ^ netdev->features;
- /* Quiesce the device if necessary */
- if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
- napi_disable(&fep->napi);
- netif_tx_lock_bh(netdev);
- fec_stop(netdev);
- }
-
netdev->features = features;
/* Receive checksum has been changed */
@@ -2395,13 +2889,25 @@ static int fec_set_features(struct net_device *netdev,
else
fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
}
+}
+
+static int fec_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct fec_enet_private *fep = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
- /* Resume the device after updates */
if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+ napi_disable(&fep->napi);
+ netif_tx_lock_bh(netdev);
+ fec_stop(netdev);
+ fec_enet_set_netdev_features(netdev, features);
fec_restart(netdev);
- netif_wake_queue(netdev);
+ netif_tx_wake_all_queues(netdev);
netif_tx_unlock_bh(netdev);
napi_enable(&fep->napi);
+ } else {
+ fec_enet_set_netdev_features(netdev, features);
}
return 0;
@@ -2432,39 +2938,38 @@ static int fec_enet_init(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
+ struct fec_enet_priv_tx_q *txq;
+ struct fec_enet_priv_rx_q *rxq;
struct bufdesc *cbd_base;
+ dma_addr_t bd_dma;
int bd_size;
+ unsigned int i;
- /* init the tx & rx ring size */
- fep->tx_ring_size = TX_RING_SIZE;
- fep->rx_ring_size = RX_RING_SIZE;
+#if defined(CONFIG_ARM)
+ fep->rx_align = 0xf;
+ fep->tx_align = 0xf;
+#else
+ fep->rx_align = 0x3;
+ fep->tx_align = 0x3;
+#endif
- fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
- fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
+ fec_enet_alloc_queue(ndev);
if (fep->bufdesc_ex)
fep->bufdesc_size = sizeof(struct bufdesc_ex);
else
fep->bufdesc_size = sizeof(struct bufdesc);
- bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
+ bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) *
fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */
- cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
+ cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma,
GFP_KERNEL);
- if (!cbd_base)
- return -ENOMEM;
-
- fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
- &fep->tso_hdrs_dma, GFP_KERNEL);
- if (!fep->tso_hdrs) {
- dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
+ if (!cbd_base) {
return -ENOMEM;
}
- memset(cbd_base, 0, PAGE_SIZE);
-
- fep->netdev = ndev;
+ memset(cbd_base, 0, bd_size);
/* Get the Ethernet address */
fec_get_mac(ndev);
@@ -2472,12 +2977,36 @@ static int fec_enet_init(struct net_device *ndev)
fec_set_mac_address(ndev, NULL);
/* Set receive and transmit descriptor base. */
- fep->rx_bd_base = cbd_base;
- if (fep->bufdesc_ex)
- fep->tx_bd_base = (struct bufdesc *)
- (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
- else
- fep->tx_bd_base = cbd_base + fep->rx_ring_size;
+ for (i = 0; i < fep->num_rx_queues; i++) {
+ rxq = fep->rx_queue[i];
+ rxq->index = i;
+ rxq->rx_bd_base = (struct bufdesc *)cbd_base;
+ rxq->bd_dma = bd_dma;
+ if (fep->bufdesc_ex) {
+ bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size;
+ cbd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size);
+ } else {
+ bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size;
+ cbd_base += rxq->rx_ring_size;
+ }
+ }
+
+ for (i = 0; i < fep->num_tx_queues; i++) {
+ txq = fep->tx_queue[i];
+ txq->index = i;
+ txq->tx_bd_base = (struct bufdesc *)cbd_base;
+ txq->bd_dma = bd_dma;
+ if (fep->bufdesc_ex) {
+ bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size;
+ cbd_base = (struct bufdesc *)
+ (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size);
+ } else {
+ bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size;
+ cbd_base += txq->tx_ring_size;
+ }
+ }
+
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -2500,6 +3029,11 @@ static int fec_enet_init(struct net_device *ndev)
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
+ if (id_entry->driver_data & FEC_QUIRK_HAS_AVB) {
+ fep->tx_align = 0;
+ fep->rx_align = 0x3f;
+ }
+
ndev->hw_features = ndev->features;
fec_restart(ndev);
@@ -2545,6 +3079,42 @@ static void fec_reset_phy(struct platform_device *pdev)
}
#endif /* CONFIG_OF */
+static void
+fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int err;
+
+ *num_tx = *num_rx = 1;
+
+ if (!np || !of_device_is_available(np))
+ return;
+
+ /* parse the num of tx and rx queues */
+ err = of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
+ if (err)
+ *num_tx = 1;
+
+ err = of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
+ if (err)
+ *num_rx = 1;
+
+ if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
+ dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
+ *num_tx);
+ *num_tx = 1;
+ return;
+ }
+
+ if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
+ dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
+ *num_rx);
+ *num_rx = 1;
+ return;
+ }
+
+}
+
static int
fec_probe(struct platform_device *pdev)
{
@@ -2556,13 +3126,18 @@ fec_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
static int dev_id;
struct device_node *np = pdev->dev.of_node, *phy_node;
+ int num_tx_qs;
+ int num_rx_qs;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
+ fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
+
/* Init network device */
- ndev = alloc_etherdev(sizeof(struct fec_enet_private));
+ ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private),
+ num_tx_qs, num_rx_qs);
if (!ndev)
return -ENOMEM;
@@ -2571,6 +3146,9 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
+ fep->num_rx_queues = num_rx_qs;
+ fep->num_tx_queues = num_tx_qs;
+
#if !defined(CONFIG_M5272)
/* default enable pause frame auto negotiation */
if (pdev->id_entry &&
@@ -2630,6 +3208,8 @@ fec_probe(struct platform_device *pdev)
goto failed_clk;
}
+ fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
+
/* enet_out is optional, depends on board */
fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
if (IS_ERR(fep->clk_enet_out))
@@ -2637,6 +3217,12 @@ fec_probe(struct platform_device *pdev)
fep->ptp_clk_on = false;
mutex_init(&fep->ptp_clk_mutex);
+
+ /* clk_ref is optional, depends on board */
+ fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
+ if (IS_ERR(fep->clk_ref))
+ fep->clk_ref = NULL;
+
fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
fep->bufdesc_ex =
pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
@@ -2684,6 +3270,7 @@ fec_probe(struct platform_device *pdev)
goto failed_irq;
}
+ init_completion(&fep->mdio_done);
ret = fec_enet_mii_init(pdev);
if (ret)
goto failed_mii_init;
@@ -2700,6 +3287,7 @@ fec_probe(struct platform_device *pdev)
if (fep->bufdesc_ex && fep->ptp_clock)
netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+ fep->rx_copybreak = COPYBREAK_DEFAULT;
INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index cca3617a2321..992c8c3db553 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -61,6 +61,24 @@
#define FEC_T_INC_CORR_MASK 0x00007f00
#define FEC_T_INC_CORR_OFFSET 8
+#define FEC_T_CTRL_PINPER 0x00000080
+#define FEC_T_TF0_MASK 0x00000001
+#define FEC_T_TF0_OFFSET 0
+#define FEC_T_TF1_MASK 0x00000002
+#define FEC_T_TF1_OFFSET 1
+#define FEC_T_TF2_MASK 0x00000004
+#define FEC_T_TF2_OFFSET 2
+#define FEC_T_TF3_MASK 0x00000008
+#define FEC_T_TF3_OFFSET 3
+#define FEC_T_TDRE_MASK 0x00000001
+#define FEC_T_TDRE_OFFSET 0
+#define FEC_T_TMODE_MASK 0x0000003C
+#define FEC_T_TMODE_OFFSET 2
+#define FEC_T_TIE_MASK 0x00000040
+#define FEC_T_TIE_OFFSET 6
+#define FEC_T_TF_MASK 0x00000080
+#define FEC_T_TF_OFFSET 7
+
#define FEC_ATIME_CTRL 0x400
#define FEC_ATIME 0x404
#define FEC_ATIME_EVT_OFFSET 0x408
@@ -69,7 +87,143 @@
#define FEC_ATIME_INC 0x414
#define FEC_TS_TIMESTAMP 0x418
+#define FEC_TGSR 0x604
+#define FEC_TCSR(n) (0x608 + n * 0x08)
+#define FEC_TCCR(n) (0x60C + n * 0x08)
+#define MAX_TIMER_CHANNEL 3
+#define FEC_TMODE_TOGGLE 0x05
+#define FEC_HIGH_PULSE 0x0F
+
#define FEC_CC_MULT (1 << 31)
+#define FEC_COUNTER_PERIOD (1 << 31)
+#define PPS_OUPUT_RELOAD_PERIOD NSEC_PER_SEC
+#define FEC_CHANNLE_0 0
+#define DEFAULT_PPS_CHANNEL FEC_CHANNLE_0
+
+/**
+ * fec_ptp_enable_pps
+ * @fep: the fec_enet_private structure handle
+ * @enable: enable the channel pps output
+ *
+ * This function enble the PPS ouput on the timer channel.
+ */
+static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+{
+ unsigned long flags;
+ u32 val, tempval;
+ int inc;
+ struct timespec ts;
+ u64 ns;
+ u32 remainder;
+ val = 0;
+
+ if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
+ dev_err(&fep->pdev->dev, "No ptp stack is running\n");
+ return -EINVAL;
+ }
+
+ if (fep->pps_enable == enable)
+ return 0;
+
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+ inc = fep->ptp_inc;
+
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+ if (enable) {
+ /* clear capture or output compare interrupt status if have.
+ */
+ writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* It is recommended to doulbe check the TMODE field in the
+ * TCSR register to be cleared before the first compare counter
+ * is written into TCCR register. Just add a double check.
+ */
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ do {
+ val &= ~(FEC_T_TMODE_MASK);
+ writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ } while (val & FEC_T_TMODE_MASK);
+
+ /* Dummy read counter to update the counter */
+ timecounter_read(&fep->tc);
+ /* We want to find the first compare event in the next
+ * second point. So we need to know what the ptp time
+ * is now and how many nanoseconds is ahead to get next second.
+ * The remaining nanosecond ahead before the next second would be
+ * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
+ * to current timer would be next second.
+ */
+ tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+ tempval |= FEC_T_CTRL_CAPTURE;
+ writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+ tempval = readl(fep->hwp + FEC_ATIME);
+ /* Convert the ptp local counter to 1588 timestamp */
+ ns = timecounter_cyc2time(&fep->tc, tempval);
+ ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
+ ts.tv_nsec = remainder;
+
+ /* The tempval is less than 3 seconds, and so val is less than
+ * 4 seconds. No overflow for 32bit calculation.
+ */
+ val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
+
+ /* Need to consider the situation that the current time is
+ * very close to the second point, which means NSEC_PER_SEC
+ * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
+ * is still running when we calculate the first compare event, it is
+ * possible that the remaining nanoseonds run out before the compare
+ * counter is calculated and written into TCCR register. To avoid
+ * this possibility, we will set the compare event to be the next
+ * of next second. The current setting is 31-bit timer and wrap
+ * around over 2 seconds. So it is okay to set the next of next
+ * seond for the timer.
+ */
+ val += NSEC_PER_SEC;
+
+ /* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
+ * ptp counter, which maybe cause 32-bit wrap. Since the
+ * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
+ * We can ensure the wrap will not cause issue. If the offset
+ * is bigger than fep->cc.mask would be a error.
+ */
+ val &= fep->cc.mask;
+ writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
+
+ /* Calculate the second the compare event timestamp */
+ fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
+
+ /* * Enable compare event when overflow */
+ val = readl(fep->hwp + FEC_ATIME_CTRL);
+ val |= FEC_T_CTRL_PINPER;
+ writel(val, fep->hwp + FEC_ATIME_CTRL);
+
+ /* Compare channel setting. */
+ val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+ val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
+ val &= ~(1 << FEC_T_TDRE_OFFSET);
+ val &= ~(FEC_T_TMODE_MASK);
+ val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
+ writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+ /* Write the second compare event timestamp and calculate
+ * the third timestamp. Refer the TCCR register detail in the spec.
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
+ fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+ } else {
+ writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
+ }
+
+ fep->pps_enable = enable;
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+ return 0;
+}
+
/**
* fec_ptp_read - read raw cycle counter (to be used by time counter)
* @cc: the cyclecounter structure
@@ -82,12 +236,17 @@ static cycle_t fec_ptp_read(const struct cyclecounter *cc)
{
struct fec_enet_private *fep =
container_of(cc, struct fec_enet_private, cc);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
u32 tempval;
tempval = readl(fep->hwp + FEC_ATIME_CTRL);
tempval |= FEC_T_CTRL_CAPTURE;
writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+ if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
+ udelay(1);
+
return readl(fep->hwp + FEC_ATIME);
}
@@ -113,14 +272,15 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
/* 1ns counter */
writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
- /* use free running count */
- writel(0, fep->hwp + FEC_ATIME_EVT_PERIOD);
+ /* use 31-bit timer counter */
+ writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
- writel(FEC_T_CTRL_ENABLE, fep->hwp + FEC_ATIME_CTRL);
+ writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
+ fep->hwp + FEC_ATIME_CTRL);
memset(&fep->cc, 0, sizeof(fep->cc));
fep->cc.read = fec_ptp_read;
- fep->cc.mask = CLOCKSOURCE_MASK(32);
+ fep->cc.mask = CLOCKSOURCE_MASK(31);
fep->cc.shift = 31;
fep->cc.mult = FEC_CC_MULT;
@@ -143,32 +303,59 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
*/
static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
- u64 diff;
unsigned long flags;
int neg_adj = 0;
- u32 mult = FEC_CC_MULT;
+ u32 i, tmp;
+ u32 corr_inc, corr_period;
+ u32 corr_ns;
+ u64 lhs, rhs;
struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
+ if (ppb == 0)
+ return 0;
+
if (ppb < 0) {
ppb = -ppb;
neg_adj = 1;
}
- diff = mult;
- diff *= ppb;
- diff = div_u64(diff, 1000000000ULL);
+ /* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
+ * Try to find the corr_inc between 1 to fep->ptp_inc to
+ * meet adjustment requirement.
+ */
+ lhs = NSEC_PER_SEC;
+ rhs = (u64)ppb * (u64)fep->ptp_inc;
+ for (i = 1; i <= fep->ptp_inc; i++) {
+ if (lhs >= rhs) {
+ corr_inc = i;
+ corr_period = div_u64(lhs, rhs);
+ break;
+ }
+ lhs += NSEC_PER_SEC;
+ }
+ /* Not found? Set it to high value - double speed
+ * correct in every clock step.
+ */
+ if (i > fep->ptp_inc) {
+ corr_inc = fep->ptp_inc;
+ corr_period = 1;
+ }
+
+ if (neg_adj)
+ corr_ns = fep->ptp_inc - corr_inc;
+ else
+ corr_ns = fep->ptp_inc + corr_inc;
spin_lock_irqsave(&fep->tmreg_lock, flags);
- /*
- * dummy read to set cycle_last in tc to now.
- * So use adjusted mult to calculate when next call
- * timercounter_read.
- */
- timecounter_read(&fep->tc);
- fep->cc.mult = neg_adj ? mult - diff : mult + diff;
+ tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+ tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
+ writel(tmp, fep->hwp + FEC_ATIME_INC);
+ writel(corr_period, fep->hwp + FEC_ATIME_CORR);
+ /* dummy read to update the timer. */
+ timecounter_read(&fep->tc);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
@@ -188,12 +375,19 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
container_of(ptp, struct fec_enet_private, ptp_caps);
unsigned long flags;
u64 now;
+ u32 counter;
spin_lock_irqsave(&fep->tmreg_lock, flags);
now = timecounter_read(&fep->tc);
now += delta;
+ /* Get the timer value based on adjusted timestamp.
+ * Update the counter with the masked value.
+ */
+ counter = now & fep->cc.mask;
+ writel(counter, fep->hwp + FEC_ATIME);
+
/* reset the timecounter */
timecounter_init(&fep->tc, &fep->cc, now);
@@ -244,6 +438,7 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
u64 ns;
unsigned long flags;
+ u32 counter;
mutex_lock(&fep->ptp_clk_mutex);
/* Check the ptp clock */
@@ -254,8 +449,13 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
ns = ts->tv_sec * 1000000000ULL;
ns += ts->tv_nsec;
+ /* Get the timer value based on timestamp.
+ * Update the counter with the masked value.
+ */
+ counter = ns & fep->cc.mask;
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ writel(counter, fep->hwp + FEC_ATIME);
timecounter_init(&fep->tc, &fep->cc, ns);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
mutex_unlock(&fep->ptp_clk_mutex);
@@ -272,6 +472,15 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
static int fec_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct fec_enet_private *fep =
+ container_of(ptp, struct fec_enet_private, ptp_caps);
+ int ret = 0;
+
+ if (rq->type == PTP_CLK_REQ_PPS) {
+ ret = fec_ptp_enable_pps(fep, on);
+
+ return ret;
+ }
return -EOPNOTSUPP;
}
@@ -386,7 +595,7 @@ void fec_ptp_init(struct platform_device *pdev)
fep->ptp_caps.n_ext_ts = 0;
fep->ptp_caps.n_per_out = 0;
fep->ptp_caps.n_pins = 0;
- fep->ptp_caps.pps = 0;
+ fep->ptp_caps.pps = 1;
fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
fep->ptp_caps.adjtime = fec_ptp_adjtime;
fep->ptp_caps.gettime = fec_ptp_gettime;
@@ -394,6 +603,7 @@ void fec_ptp_init(struct platform_device *pdev)
fep->ptp_caps.enable = fec_ptp_enable;
fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+ fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
spin_lock_init(&fep->tmreg_lock);
@@ -409,3 +619,36 @@ void fec_ptp_init(struct platform_device *pdev)
schedule_delayed_work(&fep->time_keep, HZ);
}
+
+/**
+ * fec_ptp_check_pps_event
+ * @fep: the fec_enet_private structure handle
+ *
+ * This function check the pps event and reload the timer compare counter.
+ */
+uint fec_ptp_check_pps_event(struct fec_enet_private *fep)
+{
+ u32 val;
+ u8 channel = fep->pps_channel;
+ struct ptp_clock_event event;
+
+ val = readl(fep->hwp + FEC_TCSR(channel));
+ if (val & FEC_T_TF_MASK) {
+ /* Write the next next compare(not the next according the spec)
+ * value to the register
+ */
+ writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
+ do {
+ writel(val, fep->hwp + FEC_TCSR(channel));
+ } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
+
+ /* Update the counter; */
+ fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+
+ event.type = PTP_CLOCK_PPS;
+ ptp_clock_event(fep->ptp_clock, &event);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 748fd24d3d9e..c92c3b7876ca 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -215,139 +215,23 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
return received;
}
-/* non NAPI receive function */
-static int fs_enet_rx_non_napi(struct net_device *dev)
+static int fs_enet_tx_napi(struct napi_struct *napi, int budget)
{
- struct fs_enet_private *fep = netdev_priv(dev);
- const struct fs_platform_info *fpi = fep->fpi;
- cbd_t __iomem *bdp;
- struct sk_buff *skb, *skbn, *skbt;
- int received = 0;
- u16 pkt_len, sc;
- int curidx;
- /*
- * First, grab all of the stats for the incoming packet.
- * These get messed up if we get called due to a busy condition.
- */
- bdp = fep->cur_rx;
-
- while (((sc = CBDR_SC(bdp)) & BD_ENET_RX_EMPTY) == 0) {
-
- curidx = bdp - fep->rx_bd_base;
-
- /*
- * Since we have allocated space to hold a complete frame,
- * the last indicator should be set.
- */
- if ((sc & BD_ENET_RX_LAST) == 0)
- dev_warn(fep->dev, "rcv is not +last\n");
-
- /*
- * Check for errors.
- */
- if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_CL |
- BD_ENET_RX_NO | BD_ENET_RX_CR | BD_ENET_RX_OV)) {
- fep->stats.rx_errors++;
- /* Frame too long or too short. */
- if (sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
- fep->stats.rx_length_errors++;
- /* Frame alignment */
- if (sc & (BD_ENET_RX_NO | BD_ENET_RX_CL))
- fep->stats.rx_frame_errors++;
- /* CRC Error */
- if (sc & BD_ENET_RX_CR)
- fep->stats.rx_crc_errors++;
- /* FIFO overrun */
- if (sc & BD_ENET_RX_OV)
- fep->stats.rx_crc_errors++;
-
- skb = fep->rx_skbuff[curidx];
-
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
- skbn = skb;
-
- } else {
-
- skb = fep->rx_skbuff[curidx];
-
- dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp),
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE);
-
- /*
- * Process the incoming frame.
- */
- fep->stats.rx_packets++;
- pkt_len = CBDR_DATLEN(bdp) - 4; /* remove CRC */
- fep->stats.rx_bytes += pkt_len + 4;
-
- if (pkt_len <= fpi->rx_copybreak) {
- /* +2 to make IP header L1 cache aligned */
- skbn = netdev_alloc_skb(dev, pkt_len + 2);
- if (skbn != NULL) {
- skb_reserve(skbn, 2); /* align IP header */
- skb_copy_from_linear_data(skb,
- skbn->data, pkt_len);
- /* swap */
- skbt = skb;
- skb = skbn;
- skbn = skbt;
- }
- } else {
- skbn = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
-
- if (skbn)
- skb_align(skbn, ENET_RX_ALIGN);
- }
-
- if (skbn != NULL) {
- skb_put(skb, pkt_len); /* Make room */
- skb->protocol = eth_type_trans(skb, dev);
- received++;
- netif_rx(skb);
- } else {
- fep->stats.rx_dropped++;
- skbn = skb;
- }
- }
-
- fep->rx_skbuff[curidx] = skbn;
- CBDW_BUFADDR(bdp, dma_map_single(fep->dev, skbn->data,
- L1_CACHE_ALIGN(PKT_MAXBUF_SIZE),
- DMA_FROM_DEVICE));
- CBDW_DATLEN(bdp, 0);
- CBDW_SC(bdp, (sc & ~BD_ENET_RX_STATS) | BD_ENET_RX_EMPTY);
-
- /*
- * Update BD pointer to next entry.
- */
- if ((sc & BD_ENET_RX_WRAP) == 0)
- bdp++;
- else
- bdp = fep->rx_bd_base;
-
- (*fep->ops->rx_bd_done)(dev);
- }
-
- fep->cur_rx = bdp;
-
- return 0;
-}
-
-static void fs_enet_tx(struct net_device *dev)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
+ struct fs_enet_private *fep = container_of(napi, struct fs_enet_private,
+ napi_tx);
+ struct net_device *dev = fep->ndev;
cbd_t __iomem *bdp;
struct sk_buff *skb;
int dirtyidx, do_wake, do_restart;
u16 sc;
+ int has_tx_work = 0;
spin_lock(&fep->tx_lock);
bdp = fep->dirty_tx;
+ /* clear TX status bits for napi*/
+ (*fep->ops->napi_clear_tx_event)(dev);
+
do_wake = do_restart = 0;
while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) {
dirtyidx = bdp - fep->tx_bd_base;
@@ -400,7 +284,7 @@ static void fs_enet_tx(struct net_device *dev)
/*
* Free the sk buffer associated with this last transmit.
*/
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb(skb);
fep->tx_skbuff[dirtyidx] = NULL;
/*
@@ -417,6 +301,7 @@ static void fs_enet_tx(struct net_device *dev)
*/
if (!fep->tx_free++)
do_wake = 1;
+ has_tx_work = 1;
}
fep->dirty_tx = bdp;
@@ -424,10 +309,19 @@ static void fs_enet_tx(struct net_device *dev)
if (do_restart)
(*fep->ops->tx_restart)(dev);
+ if (!has_tx_work) {
+ napi_complete(napi);
+ (*fep->ops->napi_enable_tx)(dev);
+ }
+
spin_unlock(&fep->tx_lock);
if (do_wake)
netif_wake_queue(dev);
+
+ if (has_tx_work)
+ return budget;
+ return 0;
}
/*
@@ -453,8 +347,7 @@ fs_enet_interrupt(int irq, void *dev_id)
nr++;
int_clr_events = int_events;
- if (fpi->use_napi)
- int_clr_events &= ~fep->ev_napi_rx;
+ int_clr_events &= ~fep->ev_napi_rx;
(*fep->ops->clear_int_events)(dev, int_clr_events);
@@ -462,23 +355,28 @@ fs_enet_interrupt(int irq, void *dev_id)
(*fep->ops->ev_error)(dev, int_events);
if (int_events & fep->ev_rx) {
- if (!fpi->use_napi)
- fs_enet_rx_non_napi(dev);
- else {
- napi_ok = napi_schedule_prep(&fep->napi);
-
- (*fep->ops->napi_disable_rx)(dev);
- (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
-
- /* NOTE: it is possible for FCCs in NAPI mode */
- /* to submit a spurious interrupt while in poll */
- if (napi_ok)
- __napi_schedule(&fep->napi);
- }
+ napi_ok = napi_schedule_prep(&fep->napi);
+
+ (*fep->ops->napi_disable_rx)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_rx);
+
+ /* NOTE: it is possible for FCCs in NAPI mode */
+ /* to submit a spurious interrupt while in poll */
+ if (napi_ok)
+ __napi_schedule(&fep->napi);
}
- if (int_events & fep->ev_tx)
- fs_enet_tx(dev);
+ if (int_events & fep->ev_tx) {
+ napi_ok = napi_schedule_prep(&fep->napi_tx);
+
+ (*fep->ops->napi_disable_tx)(dev);
+ (*fep->ops->clear_int_events)(dev, fep->ev_napi_tx);
+
+ /* NOTE: it is possible for FCCs in NAPI mode */
+ /* to submit a spurious interrupt while in poll */
+ if (napi_ok)
+ __napi_schedule(&fep->napi_tx);
+ }
}
handled = nr > 0;
@@ -611,7 +509,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
cbd_t __iomem *bdp;
int curidx;
u16 sc;
- unsigned long flags;
#ifdef CONFIG_FS_ENET_MPC5121_FEC
if (((unsigned long)skb->data) & 0x3) {
@@ -626,7 +523,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
#endif
- spin_lock_irqsave(&fep->tx_lock, flags);
+ spin_lock(&fep->tx_lock);
/*
* Fill in a Tx ring entry
@@ -635,7 +532,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) {
netif_stop_queue(dev);
- spin_unlock_irqrestore(&fep->tx_lock, flags);
+ spin_unlock(&fep->tx_lock);
/*
* Ooops. All transmit buffers are full. Bail out.
@@ -691,7 +588,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
(*fep->ops->tx_kickstart)(dev);
- spin_unlock_irqrestore(&fep->tx_lock, flags);
+ spin_unlock(&fep->tx_lock);
return NETDEV_TX_OK;
}
@@ -811,24 +708,24 @@ static int fs_enet_open(struct net_device *dev)
/* not doing this, will cause a crash in fs_enet_rx_napi */
fs_init_bds(fep->ndev);
- if (fep->fpi->use_napi)
- napi_enable(&fep->napi);
+ napi_enable(&fep->napi);
+ napi_enable(&fep->napi_tx);
/* Install our interrupt handler. */
r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
"fs_enet-mac", dev);
if (r != 0) {
dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
- if (fep->fpi->use_napi)
- napi_disable(&fep->napi);
+ napi_disable(&fep->napi);
+ napi_disable(&fep->napi_tx);
return -EINVAL;
}
err = fs_init_phy(dev);
if (err) {
free_irq(fep->interrupt, dev);
- if (fep->fpi->use_napi)
- napi_disable(&fep->napi);
+ napi_disable(&fep->napi);
+ napi_disable(&fep->napi_tx);
return err;
}
phy_start(fep->phydev);
@@ -845,8 +742,8 @@ static int fs_enet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
- if (fep->fpi->use_napi)
- napi_disable(&fep->napi);
+ napi_disable(&fep->napi);
+ napi_disable(&fep->napi_tx);
phy_stop(fep->phydev);
spin_lock_irqsave(&fep->lock, flags);
@@ -1022,7 +919,6 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->rx_ring = 32;
fpi->tx_ring = 32;
fpi->rx_copybreak = 240;
- fpi->use_napi = 1;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
@@ -1102,9 +998,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ;
- if (fpi->use_napi)
- netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
- fpi->napi_weight);
+ netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
+ netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
ndev->ethtool_ops = &fs_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index 1ece4b1a689e..3a4b49e0e717 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -84,6 +84,9 @@ struct fs_ops {
void (*napi_clear_rx_event)(struct net_device *dev);
void (*napi_enable_rx)(struct net_device *dev);
void (*napi_disable_rx)(struct net_device *dev);
+ void (*napi_clear_tx_event)(struct net_device *dev);
+ void (*napi_enable_tx)(struct net_device *dev);
+ void (*napi_disable_tx)(struct net_device *dev);
void (*rx_bd_done)(struct net_device *dev);
void (*tx_kickstart)(struct net_device *dev);
u32 (*get_int_events)(struct net_device *dev);
@@ -119,6 +122,7 @@ struct phy_info {
struct fs_enet_private {
struct napi_struct napi;
+ struct napi_struct napi_tx;
struct device *dev; /* pointer back to the device (must be initialized first) */
struct net_device *ndev;
spinlock_t lock; /* during all ops except TX pckt processing */
@@ -149,6 +153,7 @@ struct fs_enet_private {
/* event masks */
u32 ev_napi_rx; /* mask of NAPI rx events */
+ u32 ev_napi_tx; /* mask of NAPI rx events */
u32 ev_rx; /* rx event mask */
u32 ev_tx; /* tx event mask */
u32 ev_err; /* error event mask */
@@ -191,8 +196,8 @@ void fs_cleanup_bds(struct net_device *dev);
#define DRV_MODULE_NAME "fs_enet"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "1.0"
-#define DRV_MODULE_RELDATE "Aug 8, 2005"
+#define DRV_MODULE_VERSION "1.1"
+#define DRV_MODULE_RELDATE "Sep 22, 2014"
/***************************************************************************/
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index f5383abbf399..08f5b911d96b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -125,6 +125,7 @@ out:
}
#define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB)
+#define FCC_NAPI_TX_EVENT_MSK (FCC_ENET_TXB)
#define FCC_RX_EVENT (FCC_ENET_RXF)
#define FCC_TX_EVENT (FCC_ENET_TXB)
#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE)
@@ -137,6 +138,7 @@ static int setup_data(struct net_device *dev)
return -EINVAL;
fep->ev_napi_rx = FCC_NAPI_RX_EVENT_MSK;
+ fep->ev_napi_tx = FCC_NAPI_TX_EVENT_MSK;
fep->ev_rx = FCC_RX_EVENT;
fep->ev_tx = FCC_TX_EVENT;
fep->ev_err = FCC_ERR_EVENT_MSK;
@@ -446,6 +448,30 @@ static void napi_disable_rx(struct net_device *dev)
C16(fccp, fcc_fccm, FCC_NAPI_RX_EVENT_MSK);
}
+static void napi_clear_tx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ W16(fccp, fcc_fcce, FCC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_enable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ S16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_disable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ fcc_t __iomem *fccp = fep->fcc.fccp;
+
+ C16(fccp, fcc_fccm, FCC_NAPI_TX_EVENT_MSK);
+}
+
static void rx_bd_done(struct net_device *dev)
{
/* nothing */
@@ -572,6 +598,9 @@ const struct fs_ops fs_fcc_ops = {
.napi_clear_rx_event = napi_clear_rx_event,
.napi_enable_rx = napi_enable_rx,
.napi_disable_rx = napi_disable_rx,
+ .napi_clear_tx_event = napi_clear_tx_event,
+ .napi_enable_tx = napi_enable_tx,
+ .napi_disable_tx = napi_disable_tx,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 1eedfba2ad3c..b34214e2df5f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,6 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
}
#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
+#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB)
#define FEC_RX_EVENT (FEC_ENET_RXF)
#define FEC_TX_EVENT (FEC_ENET_TXF)
#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
@@ -126,6 +127,7 @@ static int setup_data(struct net_device *dev)
fep->fec.htlo = 0;
fep->ev_napi_rx = FEC_NAPI_RX_EVENT_MSK;
+ fep->ev_napi_tx = FEC_NAPI_TX_EVENT_MSK;
fep->ev_rx = FEC_RX_EVENT;
fep->ev_tx = FEC_TX_EVENT;
fep->ev_err = FEC_ERR_EVENT_MSK;
@@ -339,6 +341,9 @@ static void restart(struct net_device *dev)
FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
}
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
/*
* Enable interrupts we wish to service.
*/
@@ -415,6 +420,30 @@ static void napi_disable_rx(struct net_device *dev)
FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
}
+static void napi_clear_tx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FW(fecp, ievent, FEC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_enable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FS(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_disable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ struct fec __iomem *fecp = fep->fec.fecp;
+
+ FC(fecp, imask, FEC_NAPI_TX_EVENT_MSK);
+}
+
static void rx_bd_done(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -487,6 +516,9 @@ const struct fs_ops fs_fec_ops = {
.napi_clear_rx_event = napi_clear_rx_event,
.napi_enable_rx = napi_enable_rx,
.napi_disable_rx = napi_disable_rx,
+ .napi_clear_tx_event = napi_clear_tx_event,
+ .napi_enable_tx = napi_enable_tx,
+ .napi_disable_tx = napi_disable_tx,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 90b3b19b7cd3..7a184e8816a4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -116,6 +116,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
}
#define SCC_NAPI_RX_EVENT_MSK (SCCE_ENET_RXF | SCCE_ENET_RXB)
+#define SCC_NAPI_TX_EVENT_MSK (SCCE_ENET_TXB)
#define SCC_RX_EVENT (SCCE_ENET_RXF)
#define SCC_TX_EVENT (SCCE_ENET_TXB)
#define SCC_ERR_EVENT_MSK (SCCE_ENET_TXE | SCCE_ENET_BSY)
@@ -130,6 +131,7 @@ static int setup_data(struct net_device *dev)
fep->scc.htlo = 0;
fep->ev_napi_rx = SCC_NAPI_RX_EVENT_MSK;
+ fep->ev_napi_tx = SCC_NAPI_TX_EVENT_MSK;
fep->ev_rx = SCC_RX_EVENT;
fep->ev_tx = SCC_TX_EVENT | SCCE_ENET_TXE;
fep->ev_err = SCC_ERR_EVENT_MSK;
@@ -353,6 +355,9 @@ static void restart(struct net_device *dev)
if (fep->phydev->duplex)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
+ /* Restore multicast and promiscuous settings */
+ set_multicast_list(dev);
+
S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
}
@@ -398,6 +403,30 @@ static void napi_disable_rx(struct net_device *dev)
C16(sccp, scc_sccm, SCC_NAPI_RX_EVENT_MSK);
}
+static void napi_clear_tx_event(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ W16(sccp, scc_scce, SCC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_enable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ S16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
+}
+
+static void napi_disable_tx(struct net_device *dev)
+{
+ struct fs_enet_private *fep = netdev_priv(dev);
+ scc_t __iomem *sccp = fep->scc.sccp;
+
+ C16(sccp, scc_sccm, SCC_NAPI_TX_EVENT_MSK);
+}
+
static void rx_bd_done(struct net_device *dev)
{
/* nothing */
@@ -471,6 +500,9 @@ const struct fs_ops fs_scc_ops = {
.napi_clear_rx_event = napi_clear_rx_event,
.napi_enable_rx = napi_enable_rx,
.napi_disable_rx = napi_disable_rx,
+ .napi_clear_tx_event = napi_clear_tx_event,
+ .napi_enable_tx = napi_enable_tx,
+ .napi_disable_tx = napi_disable_tx,
.rx_bd_done = rx_bd_done,
.tx_kickstart = tx_kickstart,
.get_int_events = get_int_events,
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 583e71ab7f51..964c6bf37710 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -28,7 +28,9 @@
#include <linux/of_device.h>
#include <asm/io.h>
+#if IS_ENABLED(CONFIG_UCC_GETH)
#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */
+#endif
#include "gianfar.h"
@@ -102,19 +104,22 @@ static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
- u32 status;
+ unsigned int timeout;
/* Set the PHY address and the register address we want to write */
- out_be32(&regs->miimadd, (mii_id << 8) | regnum);
+ iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
/* Write out the value we want */
- out_be32(&regs->miimcon, value);
+ iowrite32be(value, &regs->miimcon);
/* Wait for the transaction to finish */
- status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
- MII_TIMEOUT, 0);
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
- return status ? 0 : -ETIMEDOUT;
+ return timeout ? 0 : -ETIMEDOUT;
}
/*
@@ -131,25 +136,29 @@ static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
- u32 status;
+ unsigned int timeout;
u16 value;
/* Set the PHY address and the register address we want to read */
- out_be32(&regs->miimadd, (mii_id << 8) | regnum);
+ iowrite32be((mii_id << 8) | regnum, &regs->miimadd);
/* Clear miimcom, and then initiate a read */
- out_be32(&regs->miimcom, 0);
- out_be32(&regs->miimcom, MII_READ_COMMAND);
+ iowrite32be(0, &regs->miimcom);
+ iowrite32be(MII_READ_COMMAND, &regs->miimcom);
/* Wait for the transaction to finish, normally less than 100us */
- status = spin_event_timeout(!(in_be32(&regs->miimind) &
- (MIIMIND_NOTVALID | MIIMIND_BUSY)),
- MII_TIMEOUT, 0);
- if (!status)
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) &
+ (MIIMIND_NOTVALID | MIIMIND_BUSY)) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
+
+ if (!timeout)
return -ETIMEDOUT;
/* Grab the value of the register from miimstat */
- value = in_be32(&regs->miimstat);
+ value = ioread32be(&regs->miimstat);
dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum);
return value;
@@ -160,23 +169,26 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio_priv *priv = bus->priv;
struct fsl_pq_mii __iomem *regs = priv->regs;
- u32 status;
+ unsigned int timeout;
mutex_lock(&bus->mdio_lock);
/* Reset the management interface */
- out_be32(&regs->miimcfg, MIIMCFG_RESET);
+ iowrite32be(MIIMCFG_RESET, &regs->miimcfg);
/* Setup the MII Mgmt clock speed */
- out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
+ iowrite32be(MIIMCFG_INIT_VALUE, &regs->miimcfg);
/* Wait until the bus is free */
- status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
- MII_TIMEOUT, 0);
+ timeout = MII_TIMEOUT;
+ while ((ioread32be(&regs->miimind) & MIIMIND_BUSY) && timeout) {
+ cpu_relax();
+ timeout--;
+ }
mutex_unlock(&bus->mdio_lock);
- if (!status) {
+ if (!timeout) {
dev_err(&bus->dev, "timeout waiting for MII bus\n");
return -EBUSY;
}
@@ -433,7 +445,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
tbipa = data->get_tbipa(priv->map);
- out_be32(tbipa, be32_to_cpup(prop));
+ iowrite32be(be32_to_cpup(prop), tbipa);
}
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index fb29d049f4e1..4fdf0aa16978 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -88,8 +88,10 @@
#include <linux/net_tstamp.h>
#include <asm/io.h>
+#ifdef CONFIG_PPC
#include <asm/reg.h>
#include <asm/mpc85xx.h>
+#endif
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
@@ -100,6 +102,8 @@
#include <linux/phy_fixed.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include "gianfar.h"
@@ -161,7 +165,7 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
lstatus |= BD_LFLAG(RXBD_WRAP);
- eieio();
+ gfar_wmb();
bdp->lstatus = lstatus;
}
@@ -334,7 +338,7 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
static void gfar_rx_buff_size_config(struct gfar_private *priv)
{
- int frame_size = priv->ndev->mtu + ETH_HLEN;
+ int frame_size = priv->ndev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
@@ -1061,6 +1065,7 @@ static void gfar_init_filer_table(struct gfar_private *priv)
}
}
+#ifdef CONFIG_PPC
static void __gfar_detect_errata_83xx(struct gfar_private *priv)
{
unsigned int pvr = mfspr(SPRN_PVR);
@@ -1093,6 +1098,7 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)))
priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
}
+#endif
static void gfar_detect_errata(struct gfar_private *priv)
{
@@ -1101,10 +1107,12 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* no plans to fix */
priv->errata |= GFAR_ERRATA_A002;
+#ifdef CONFIG_PPC
if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
__gfar_detect_errata_85xx(priv);
else /* non-mpc85xx parts, i.e. e300 core based */
__gfar_detect_errata_83xx(priv);
+#endif
if (priv->errata)
dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
@@ -1754,26 +1762,32 @@ static void gfar_halt_nodisable(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
+ unsigned int timeout;
+ int stopped;
gfar_ints_disable(priv);
+ if (gfar_is_dma_stopped(priv))
+ return;
+
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
- if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
- (DMACTRL_GRS | DMACTRL_GTS)) {
- int ret;
-
- tempval |= (DMACTRL_GRS | DMACTRL_GTS);
- gfar_write(&regs->dmactrl, tempval);
+ tempval |= (DMACTRL_GRS | DMACTRL_GTS);
+ gfar_write(&regs->dmactrl, tempval);
- do {
- ret = spin_event_timeout(((gfar_read(&regs->ievent) &
- (IEVENT_GRSC | IEVENT_GTSC)) ==
- (IEVENT_GRSC | IEVENT_GTSC)), 1000000, 0);
- if (!ret && !(gfar_read(&regs->ievent) & IEVENT_GRSC))
- ret = __gfar_is_rx_idle(priv);
- } while (!ret);
+retry:
+ timeout = 1000;
+ while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
+ cpu_relax();
+ timeout--;
}
+
+ if (!timeout)
+ stopped = gfar_is_dma_stopped(priv);
+
+ if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
+ !__gfar_is_rx_idle(priv))
+ goto retry;
}
/* Halt the receive and transmit queues */
@@ -2357,18 +2371,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
spin_lock_irqsave(&tx_queue->txlock, flags);
- /* The powerpc-specific eieio() is used, as wmb() has too strong
- * semantics (it requires synchronization between cacheable and
- * uncacheable mappings, which eieio doesn't provide and which we
- * don't need), thus requiring a more expensive sync instruction. At
- * some point, the set of architecture-independent barrier functions
- * should be expanded to include weaker barriers.
- */
- eieio();
+ gfar_wmb();
txbdp_start->lstatus = lstatus;
- eieio(); /* force lstatus write before tx_skbuff */
+ gfar_wmb(); /* force lstatus write before tx_skbuff */
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
@@ -3240,22 +3247,21 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int idx;
- char tmpbuf[ETH_ALEN];
u32 tempval;
u32 __iomem *macptr = &regs->macstnaddr1;
macptr += num*2;
- /* Now copy it into the mac registers backwards, cuz
- * little endian is silly
+ /* For a station address of 0x12345678ABCD in transmission
+ * order (BE), MACnADDR1 is set to 0xCDAB7856 and
+ * MACnADDR2 is set to 0x34120000.
*/
- for (idx = 0; idx < ETH_ALEN; idx++)
- tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
+ tempval = (addr[5] << 24) | (addr[4] << 16) |
+ (addr[3] << 8) | addr[2];
- gfar_write(macptr, *((u32 *) (tmpbuf)));
+ gfar_write(macptr, tempval);
- tempval = *((u32 *) (tmpbuf + 4));
+ tempval = (addr[1] << 24) | (addr[0] << 16);
gfar_write(macptr+1, tempval);
}
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 84632c569f2c..2805cfbf1765 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1226,6 +1226,37 @@ static inline void gfar_write_isrg(struct gfar_private *priv)
}
}
+static inline int gfar_is_dma_stopped(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ return ((gfar_read(&regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)) ==
+ (IEVENT_GRSC | IEVENT_GTSC));
+}
+
+static inline int gfar_is_rx_dma_stopped(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+
+ return gfar_read(&regs->ievent) & IEVENT_GRSC;
+}
+
+static inline void gfar_wmb(void)
+{
+#if defined(CONFIG_PPC)
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
+ * semantics (it requires synchronization between cacheable and
+ * uncacheable mappings, which eieio() doesn't provide and which we
+ * don't need), thus requiring a more expensive sync instruction. At
+ * some point, the set of architecture-independent barrier functions
+ * should be expanded to include weaker barriers.
+ */
+ eieio();
+#else
+ wmb(); /* order write acesses for BD (or FCB) fields */
+#endif
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);