summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-04-20 04:32:26 +0300
committerDave Airlie <airlied@redhat.com>2015-04-20 06:05:20 +0300
commit2c33ce009ca2389dbf0535d0672214d09738e35e (patch)
tree6186a6458c3c160385d794a23eaf07c786a9e61b /drivers/net/ethernet/amd/xgbe/xgbe-dev.c
parentcec32a47010647e8b0603726ebb75b990a4057a4 (diff)
parent09d51602cf84a1264946711dd4ea0dddbac599a1 (diff)
downloadlinux-2c33ce009ca2389dbf0535d0672214d09738e35e.tar.xz
Merge Linus master into drm-next
The merge is clean, but the arm build fails afterwards, due to API changes in the regulator tree. I've included the patch into the merge to fix the build. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-dev.c')
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c81
1 files changed, 52 insertions, 29 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 400757b49872..21d9497518fd 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -853,6 +853,22 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
return 0;
}
+static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ unsigned int pr_mode, am_mode;
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ xgbe_set_promiscuous_mode(pdata, pr_mode);
+ xgbe_set_all_multicast_mode(pdata, am_mode);
+
+ xgbe_add_mac_addresses(pdata);
+
+ return 0;
+}
+
static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
int mmd_reg)
{
@@ -1068,7 +1084,7 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc3 = 0;
/* Make sure ownership is written to the descriptor */
- wmb();
+ dma_wmb();
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
@@ -1101,9 +1117,24 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
DBGPR("<--tx_desc_init\n");
}
-static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
+static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata, unsigned int index)
{
struct xgbe_ring_desc *rdesc = rdata->rdesc;
+ unsigned int rx_usecs = pdata->rx_usecs;
+ unsigned int rx_frames = pdata->rx_frames;
+ unsigned int inte;
+
+ if (!rx_usecs && !rx_frames) {
+ /* No coalescing, interrupt for every descriptor */
+ inte = 1;
+ } else {
+ /* Set interrupt based on Rx frame coalescing setting */
+ if (rx_frames && !((index + 1) % rx_frames))
+ inte = 1;
+ else
+ inte = 0;
+ }
/* Reset the Rx descriptor
* Set buffer 1 (lo) address to header dma address (lo)
@@ -1117,19 +1148,18 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
- rdata->interrupt ? 1 : 0);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
/* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
* for the descriptor
*/
- wmb();
+ dma_wmb();
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
/* Make sure ownership is written to the descriptor */
- wmb();
+ dma_wmb();
}
static void xgbe_rx_desc_init(struct xgbe_channel *channel)
@@ -1138,26 +1168,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
unsigned int start_index = ring->cur;
- unsigned int rx_coalesce, rx_frames;
unsigned int i;
DBGPR("-->rx_desc_init\n");
- rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
- rx_frames = pdata->rx_frames;
-
/* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- /* Set interrupt on completion bit as appropriate */
- if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
- rdata->interrupt = 0;
- else
- rdata->interrupt = 1;
-
/* Initialize Rx descriptor */
- xgbe_rx_desc_reset(rdata);
+ xgbe_rx_desc_reset(pdata, rdata, i);
}
/* Update the total number of Rx descriptors */
@@ -1358,18 +1378,20 @@ static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring_data *rdata;
+ /* Make sure everything is written before the register write */
+ wmb();
+
/* Issue a poll command to Tx DMA by writing address
* of next immediate free descriptor */
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
lower_32_bits(rdata->rdesc_dma));
- /* Start the Tx coalescing timer */
+ /* Start the Tx timer */
if (pdata->tx_usecs && !channel->tx_timer_active) {
channel->tx_timer_active = 1;
- hrtimer_start(&channel->tx_timer,
- ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
- HRTIMER_MODE_REL);
+ mod_timer(&channel->tx_timer,
+ jiffies + usecs_to_jiffies(pdata->tx_usecs));
}
ring->tx.xmit_more = 0;
@@ -1565,7 +1587,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
* is written to the descriptor(s) before setting the OWN bit
* for the first descriptor
*/
- wmb();
+ dma_wmb();
/* Set OWN bit for the first descriptor */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
@@ -1577,7 +1599,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
#endif
/* Make sure ownership is written to the descriptor */
- wmb();
+ dma_wmb();
ring->cur = cur_index + 1;
if (!packet->skb->xmit_more ||
@@ -1613,7 +1635,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
return 1;
/* Make sure descriptor fields are read after reading the OWN bit */
- rmb();
+ dma_rmb();
#ifdef XGMAC_ENABLE_RX_DESC_DUMP
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
@@ -2004,7 +2026,8 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
- netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
+ netdev_notice(pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
pdata->tx_q_count, ((fifo_size + 1) * 256));
}
@@ -2019,7 +2042,8 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
- netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
+ netdev_notice(pdata->netdev,
+ "%d Rx hardware queues, %d byte fifo per queue\n",
pdata->rx_q_count, ((fifo_size + 1) * 256));
}
@@ -2800,6 +2824,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
* Initialize MAC related features
*/
xgbe_config_mac_address(pdata);
+ xgbe_config_rx_mode(pdata);
xgbe_config_jumbo_enable(pdata);
xgbe_config_flow_control(pdata);
xgbe_config_mac_speed(pdata);
@@ -2819,10 +2844,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->tx_complete = xgbe_tx_complete;
- hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
- hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
- hw_if->add_mac_addresses = xgbe_add_mac_addresses;
hw_if->set_mac_address = xgbe_set_mac_address;
+ hw_if->config_rx_mode = xgbe_config_rx_mode;
hw_if->enable_rx_csum = xgbe_enable_rx_csum;
hw_if->disable_rx_csum = xgbe_disable_rx_csum;