diff options
author | Lendacky, Thomas <Thomas.Lendacky@amd.com> | 2014-07-02 22:04:46 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-07-08 08:38:06 +0400 |
commit | 9867e8fb2c45888cc594457914dcbba599f086c8 (patch) | |
tree | 9a64c6124f280b9368f1f7789ebf1cca855b7886 /drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |
parent | ff42606eed00bc065365f55269d558c06b968594 (diff) | |
download | linux-9867e8fb2c45888cc594457914dcbba599f086c8.tar.xz |
amd-xgbe: Performance enhancements
This patch provides some general performance enhancements for the
driver:
- Modify the default coalescing settings (reduce usec, increase frames)
- Change the AXI burst length to 256 bytes (default was 16 bytes which
was smaller than a cache line)
- Change the AXI cache settings to write-back/write-allocate which
allocate cache entries for received packets during the DMA since the
packet will be processed soon afterwards
- Combine ioread/iowrite when disabling both the Tx and Rx interrupts
- Change to processing the Tx/Rx channels in pairs
- Only recycle the Rx descriptors when a threshold of dirty descriptors
is reached
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-dev.c')
-rw-r--r-- | drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 73 |
1 files changed, 34 insertions, 39 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index e9fed23b2c33..d6a45ced8adb 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1306,56 +1306,48 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD); } -static void xgbe_save_interrupt_status(struct xgbe_channel *channel, - enum xgbe_int_state int_state) +static int xgbe_enable_int(struct xgbe_channel *channel, + enum xgbe_int int_id) { unsigned int dma_ch_ier; - if (int_state == XGMAC_INT_STATE_SAVE) { - channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - channel->saved_ier &= XGBE_DMA_INTERRUPT_MASK; - } else { - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - dma_ch_ier |= channel->saved_ier; - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); - } -} + dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); -static int xgbe_enable_int(struct xgbe_channel *channel, - enum xgbe_int int_id) -{ switch (int_id) { - case XGMAC_INT_DMA_ISR_DC0IS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1); - break; case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); + break; + case XGMAC_INT_DMA_CH_SR_TI_RI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); break; case XGMAC_INT_DMA_ALL: - xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE); + dma_ch_ier |= channel->saved_ier; break; default: return -1; } + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + return 0; } @@ -1364,42 +1356,44 @@ static int xgbe_disable_int(struct xgbe_channel *channel, { unsigned int dma_ch_ier; + dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); + switch (int_id) { - case XGMAC_INT_DMA_ISR_DC0IS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0); - break; case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); + break; + case XGMAC_INT_DMA_CH_SR_TI_RI: + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0); + XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); break; case XGMAC_INT_DMA_ALL: - xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE); - - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); + channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); break; default: return -1; } + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + return 0; } @@ -1453,6 +1447,7 @@ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata) /* Set the System Bus mode */ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); + XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1); } static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata) |