diff options
author | Claudiu Manoil <claudiu.manoil@freescale.com> | 2014-03-07 16:42:46 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-10 21:17:22 +0400 |
commit | 71ff9e3df7e1c5d3293af6b595309124e8c97412 (patch) | |
tree | 4c68117fd4bda6b8ac7e51eca82e5c59a24c1116 /drivers/net/ethernet/freescale | |
parent | aeb12c5ef7cb08d879af22fc0a56cab9e70689ea (diff) | |
download | linux-71ff9e3df7e1c5d3293af6b595309124e8c97412.tar.xz |
gianfar: Use Single-Queue polling for "fsl,etsec2"
For the "fsl,etsec2" compatible models the driver currently
supports 8 Tx and Rx DMA rings (aka HW queues). However, there
are only 2 pairs of Rx/Tx interrupt lines, as these controllers
are integrated in low power SoCs with 2 CPUs at most. As a result,
there are at most 2 NAPI instances that have to service multiple
Tx and Rx queues for these devices. This complicates the NAPI
polling routine having to iterate over the mutiple Rx/Tx queues
hooked to the same interrupt lines. And there's also an overhead
at HW level, as the controller needs to service all the 8 Tx rings
in a round robin manner. The combined overhead shows up for multi
parallel Tx flows transmitted by the kernel stack, when the driver
usually starts returning NETDEV_TX_BUSY leading to NETDEV WATCHDOG
Tx timeout triggering if the Tx path is congested for too long.
As an alternative, this patch makes the driver support only one
Tx/Rx DMA ring per NAPI instance (per interrupt group or pair
of Tx/Rx interrupt lines) by default. The simplified single queue
polling routine (gfar_poll_sq) will be the default napi poll routine
for the etsec2 devices too. Some adjustments needed to be made to
link the Tx/Rx HW queues with each NAPI instance (2 in this case).
The gfar_poll_sq() is already successfully used by older SQ_SG_MODE
(single interrupt group) controllers.
This patch fixes Tx timeout triggering under heavy Tx traffic load
(i.e. iperf -c -P 8) for the "fsl,etsec2" (currently the only
MQ_MG_MODE devices). There's also a significant memory footprint
reduction by supporting 2 Rx/Tx DMA rings (at most), instead of 8,
for these devices.
Signed-off-by: Claudiu Manoil <claudiu.manoil@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/freescale')
-rw-r--r-- | drivers/net/ethernet/freescale/gianfar.c | 70 | ||||
-rw-r--r-- | drivers/net/ethernet/freescale/gianfar.h | 41 |
2 files changed, 79 insertions, 32 deletions
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 1aa2d55aa014..28effbecdab6 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -363,7 +363,10 @@ static void gfar_mac_rx_config(struct gfar_private *priv) if (priv->rx_filer_enable) { rctrl |= RCTRL_FILREN; /* Program the RIR0 reg with the required distribution */ - gfar_write(®s->rir0, DEFAULT_RIR0); + if (priv->poll_mode == GFAR_SQ_POLLING) + gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0); + else /* GFAR_MQ_POLLING */ + gfar_write(®s->rir0, DEFAULT_8RXQ_RIR0); } /* Restore PROMISC mode */ @@ -636,7 +639,6 @@ static int gfar_parse_group(struct device_node *np, struct gfar_private *priv, const char *model) { struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; - u32 *queue_mask; int i; for (i = 0; i < GFAR_NUM_IRQS; i++) { @@ -665,12 +667,20 @@ static int gfar_parse_group(struct device_node *np, grp->priv = priv; spin_lock_init(&grp->grplock); if (priv->mode == MQ_MG_MODE) { - queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); - grp->rx_bit_map = queue_mask ? - *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); - queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); - grp->tx_bit_map = queue_mask ? - *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); + u32 *rxq_mask, *txq_mask; + rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); + txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); + + if (priv->poll_mode == GFAR_SQ_POLLING) { + /* One Q per interrupt group: Q0 to G0, Q1 to G1 */ + grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps); + } else { /* GFAR_MQ_POLLING */ + grp->rx_bit_map = rxq_mask ? + *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps); + grp->tx_bit_map = txq_mask ? + *txq_mask : (DEFAULT_MAPPING >> priv->num_grps); + } } else { grp->rx_bit_map = 0xFF; grp->tx_bit_map = 0xFF; @@ -686,6 +696,8 @@ static int gfar_parse_group(struct device_node *np, * also assign queues to groups */ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) { + if (!grp->rx_queue) + grp->rx_queue = priv->rx_queue[i]; grp->num_rx_queues++; grp->rstat |= (RSTAT_CLEAR_RHALT >> i); priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i); @@ -693,6 +705,8 @@ static int gfar_parse_group(struct device_node *np, } for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) { + if (!grp->tx_queue) + grp->tx_queue = priv->tx_queue[i]; grp->num_tx_queues++; grp->tstat |= (TSTAT_CLEAR_THALT >> i); priv->tqueue |= (TQUEUE_EN0 >> i); @@ -723,9 +737,22 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) if (!np || !of_device_is_available(np)) return -ENODEV; - /* parse the num of tx and rx queues */ + /* parse the num of HW tx and rx queues */ tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); - num_tx_qs = tx_queues ? *tx_queues : 1; + rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); + + if (priv->mode == SQ_SG_MODE) { + num_tx_qs = 1; + num_rx_qs = 1; + } else { /* MQ_MG_MODE */ + if (priv->poll_mode == GFAR_SQ_POLLING) { + num_tx_qs = 2; /* one q per int group */ + num_rx_qs = 2; /* one q per int group */ + } else { /* GFAR_MQ_POLLING */ + num_tx_qs = tx_queues ? *tx_queues : 1; + num_rx_qs = rx_queues ? *rx_queues : 1; + } + } if (num_tx_qs > MAX_TX_QS) { pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", @@ -734,9 +761,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) return -EINVAL; } - rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL); - num_rx_qs = rx_queues ? *rx_queues : 1; - if (num_rx_qs > MAX_RX_QS) { pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", num_rx_qs, MAX_RX_QS); @@ -777,6 +801,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) /* Parse and initialize group specific information */ if (of_device_is_compatible(np, "fsl,etsec2")) { priv->mode = MQ_MG_MODE; + priv->poll_mode = GFAR_SQ_POLLING; for_each_child_of_node(np, child) { err = gfar_parse_group(child, priv, model); if (err) @@ -784,6 +809,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) } } else { priv->mode = SQ_SG_MODE; + priv->poll_mode = GFAR_SQ_POLLING; err = gfar_parse_group(np, priv, model); if (err) goto err_grp_init; @@ -1263,13 +1289,13 @@ static int gfar_probe(struct platform_device *ofdev) dev->ethtool_ops = &gfar_ethtool_ops; /* Register for napi ...We are registering NAPI for each grp */ - if (priv->mode == SQ_SG_MODE) { - netif_napi_add(dev, &priv->gfargrp[0].napi_rx, gfar_poll_rx_sq, - GFAR_DEV_WEIGHT); - netif_napi_add(dev, &priv->gfargrp[0].napi_tx, gfar_poll_tx_sq, - 2); - } else { - for (i = 0; i < priv->num_grps; i++) { + for (i = 0; i < priv->num_grps; i++) { + if (priv->poll_mode == GFAR_SQ_POLLING) { + netif_napi_add(dev, &priv->gfargrp[i].napi_rx, + gfar_poll_rx_sq, GFAR_DEV_WEIGHT); + netif_napi_add(dev, &priv->gfargrp[i].napi_tx, + gfar_poll_tx_sq, 2); + } else { netif_napi_add(dev, &priv->gfargrp[i].napi_rx, gfar_poll_rx, GFAR_DEV_WEIGHT); netif_napi_add(dev, &priv->gfargrp[i].napi_tx, @@ -2819,7 +2845,7 @@ static int gfar_poll_rx_sq(struct napi_struct *napi, int budget) struct gfar_priv_grp *gfargrp = container_of(napi, struct gfar_priv_grp, napi_rx); struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0]; + struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue; int work_done = 0; /* Clear IEVENT, so interrupts aren't called again @@ -2850,7 +2876,7 @@ static int gfar_poll_tx_sq(struct napi_struct *napi, int budget) struct gfar_priv_grp *gfargrp = container_of(napi, struct gfar_priv_grp, napi_tx); struct gfar __iomem *regs = gfargrp->regs; - struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; + struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue; u32 imask; /* Clear IEVENT, so interrupts aren't called again diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 1aeb34e1efa5..84632c569f2c 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -412,7 +412,9 @@ extern const char gfar_driver_version[]; /* This default RIR value directly corresponds * to the 3-bit hash value generated */ -#define DEFAULT_RIR0 0x05397700 +#define DEFAULT_8RXQ_RIR0 0x05397700 +/* Map even hash values to Q0, and odd ones to Q1 */ +#define DEFAULT_2RXQ_RIR0 0x04104100 /* RQFCR register bits */ #define RQFCR_GPI 0x80000000 @@ -907,6 +909,22 @@ enum { MQ_MG_MODE }; +/* GFAR_SQ_POLLING: Single Queue NAPI polling mode + * The driver supports a single pair of RX/Tx queues + * per interrupt group (Rx/Tx int line). MQ_MG mode + * devices have 2 interrupt groups, so the device will + * have a total of 2 Tx and 2 Rx queues in this case. + * GFAR_MQ_POLLING: Multi Queue NAPI polling mode + * The driver supports all the 8 Rx and Tx HW queues + * each queue mapped by the Device Tree to one of + * the 2 interrupt groups. This mode implies significant + * processing overhead (CPU and controller level). + */ +enum gfar_poll_mode { + GFAR_SQ_POLLING = 0, + GFAR_MQ_POLLING +}; + /* * Per TX queue stats */ @@ -1016,17 +1034,20 @@ struct gfar_irqinfo { */ struct gfar_priv_grp { - spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES))); + spinlock_t grplock __aligned(SMP_CACHE_BYTES); struct napi_struct napi_rx; struct napi_struct napi_tx; - struct gfar_private *priv; struct gfar __iomem *regs; - unsigned int rstat; - unsigned long num_rx_queues; - unsigned long rx_bit_map; + struct gfar_priv_tx_q *tx_queue; + struct gfar_priv_rx_q *rx_queue; unsigned int tstat; + unsigned int rstat; + + struct gfar_private *priv; unsigned long num_tx_queues; unsigned long tx_bit_map; + unsigned long num_rx_queues; + unsigned long rx_bit_map; struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS]; }; @@ -1056,8 +1077,6 @@ enum gfar_dev_state { * the buffer descriptor determines the actual condition. */ struct gfar_private { - unsigned int num_rx_queues; - struct device *dev; struct net_device *ndev; enum gfar_errata errata; @@ -1065,6 +1084,7 @@ struct gfar_private { u16 uses_rxfcb; u16 padding; + u32 device_flags; /* HW time stamping enabled flag */ int hwts_rx_en; @@ -1075,10 +1095,11 @@ struct gfar_private { struct gfar_priv_grp gfargrp[MAXGROUPS]; unsigned long state; - u32 device_flags; - unsigned int mode; + unsigned short mode; + unsigned short poll_mode; unsigned int num_tx_queues; + unsigned int num_rx_queues; unsigned int num_grps; /* Network Statistics */ |