summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorJisheng Zhang <Jisheng.Zhang@synaptics.com>2018-04-02 06:23:43 +0300
committerDavid S. Miller <davem@davemloft.net>2018-04-02 18:14:02 +0300
commit4a188a63afdffb5f62cccc508589c19e5297ed05 (patch)
tree3a371e8015fba24582b8b913c4e721afba3256d9 /drivers/net/ethernet/marvell
parent6e00f7dd5e4edc2443f030b226f66fe4f1267667 (diff)
downloadlinux-4a188a63afdffb5f62cccc508589c19e5297ed05.tar.xz
net: mvneta: split rxq/txq init and txq deinit into SW and HW parts
This is to prepare the suspend/resume improvement in next patch. The SW parts can be optimized out during resume. As for rxq handling during suspend, we'd like to drop packets by calling mvneta_rxq_drop_pkts() which is both SW and HW operation, so we don't split rxq deinit. Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c85
1 files changed, 66 insertions, 19 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b26bcdf4cd03..d4fbad235a79 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2797,10 +2797,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp)
/* Rx/Tx queue initialization/cleanup methods */
-/* Create a specified RX queue */
-static int mvneta_rxq_init(struct mvneta_port *pp,
- struct mvneta_rx_queue *rxq)
-
+static int mvneta_rxq_sw_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
{
rxq->size = pp->rx_ring_size;
@@ -2813,6 +2811,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
rxq->last_desc = rxq->size - 1;
+ return 0;
+}
+
+static void mvneta_rxq_hw_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+{
/* Set Rx descriptors queue starting address */
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
@@ -2836,6 +2840,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
mvneta_rxq_short_pool_set(pp, rxq);
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
}
+}
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+ struct mvneta_rx_queue *rxq)
+
+{
+ int ret;
+
+ ret = mvneta_rxq_sw_init(pp, rxq);
+ if (ret < 0)
+ return ret;
+
+ mvneta_rxq_hw_init(pp, rxq);
return 0;
}
@@ -2858,9 +2876,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
rxq->descs_phys = 0;
}
-/* Create and initialize a tx queue */
-static int mvneta_txq_init(struct mvneta_port *pp,
- struct mvneta_tx_queue *txq)
+static int mvneta_txq_sw_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
{
int cpu;
@@ -2873,7 +2890,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
-
/* Allocate memory for TX descriptors */
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2883,14 +2899,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->last_desc = txq->size - 1;
- /* Set maximum bandwidth for enabled TXQs */
- mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
- mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
-
- /* Set Tx descriptors queue starting address */
- mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
- mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
-
txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
GFP_KERNEL);
if (!txq->tx_skb) {
@@ -2911,7 +2919,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs, txq->descs_phys);
return -ENOMEM;
}
- mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
/* Setup XPS mapping */
if (txq_number > 1)
@@ -2924,9 +2931,38 @@ static int mvneta_txq_init(struct mvneta_port *pp,
return 0;
}
+static void mvneta_txq_hw_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ /* Set maximum bandwidth for enabled TXQs */
+ mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
+ mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
+
+ /* Set Tx descriptors queue starting address */
+ mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+ mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+ mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ int ret;
+
+ ret = mvneta_txq_sw_init(pp, txq);
+ if (ret < 0)
+ return ret;
+
+ mvneta_txq_hw_init(pp, txq);
+
+ return 0;
+}
+
/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
-static void mvneta_txq_deinit(struct mvneta_port *pp,
- struct mvneta_tx_queue *txq)
+static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
{
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
@@ -2947,7 +2983,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
txq->descs_phys = 0;
+}
+static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
/* Set minimum bandwidth for disabled TXQs */
mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
@@ -2957,6 +2997,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
}
+static void mvneta_txq_deinit(struct mvneta_port *pp,
+ struct mvneta_tx_queue *txq)
+{
+ mvneta_txq_sw_deinit(pp, txq);
+ mvneta_txq_hw_deinit(pp, txq);
+}
+
/* Cleanup all Tx queues */
static void mvneta_cleanup_txqs(struct mvneta_port *pp)
{