summaryrefslogtreecommitdiff
path: root/drivers/net/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c110
1 files changed, 64 insertions, 46 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 79aa811c403c..286ecc0e6ab7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -187,15 +187,14 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer
*tx_buffer_info)
{
- if (tx_buffer_info->dma) {
- pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
- tx_buffer_info->length, PCI_DMA_TODEVICE);
- tx_buffer_info->dma = 0;
- }
+ tx_buffer_info->dma = 0;
if (tx_buffer_info->skb) {
+ skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(tx_buffer_info->skb);
tx_buffer_info->skb = NULL;
}
+ tx_buffer_info->time_stamp = 0;
/* tx_buffer_info must be completely set up in the transmit path */
}
@@ -204,15 +203,11 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
unsigned int eop)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 head, tail;
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of eop */
- head = IXGBE_READ_REG(hw, tx_ring->head);
- tail = IXGBE_READ_REG(hw, tx_ring->tail);
adapter->detect_tx_hung = false;
- if ((head != tail) &&
- tx_ring->tx_buffer_info[eop].time_stamp &&
+ if (tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
!(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
/* detected Tx unit hang */
@@ -227,7 +222,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
" time_stamp <%lx>\n"
" jiffies <%lx>\n",
tx_ring->queue_index,
- head, tail,
+ IXGBE_READ_REG(hw, tx_ring->head),
+ IXGBE_READ_REG(hw, tx_ring->tail),
tx_ring->next_to_use, eop,
tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
return true;
@@ -2934,6 +2930,7 @@ err_tx_ring_allocation:
**/
static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_hw *hw = &adapter->hw;
int err = 0;
int vector, v_budget;
@@ -2948,12 +2945,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
/*
* At the same time, hardware can only support a maximum of
- * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
- * we can easily reach upwards of 64 Rx descriptor queues and
- * 32 Tx queues. Thus, we cap it off in those rare cases where
- * the cpu count also exceeds our vector limit.
+ * hw.mac->max_msix_vectors vectors. With features
+ * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
+ * descriptor queues supported by our device. Thus, we cap it off in
+ * those rare cases where the cpu count also exceeds our vector limit.
*/
- v_budget = min(v_budget, MAX_MSIX_COUNT);
+ v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
@@ -3169,11 +3166,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif
/* default flow control settings */
- hw->fc.requested_mode = ixgbe_fc_none;
+ hw->fc.requested_mode = ixgbe_fc_full;
+ hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
+ hw->fc.disable_fc_autoneg = false;
/* enable itr by default in dynamic mode */
adapter->itr_setting = 1;
@@ -3489,10 +3488,10 @@ err_up:
ixgbe_release_hw_control(adapter);
ixgbe_free_irq(adapter);
err_req_irq:
- ixgbe_free_all_rx_resources(adapter);
err_setup_rx:
- ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
err_setup_tx:
+ ixgbe_free_all_tx_resources(adapter);
ixgbe_reset(adapter);
return err;
@@ -4163,32 +4162,39 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct sk_buff *skb, unsigned int first)
{
struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned int len = skb->len;
+ unsigned int len = skb_headlen(skb);
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
-
- len -= skb->data_len;
+ dma_addr_t *map;
i = tx_ring->next_to_use;
+ if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
+ dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
+ return 0;
+ }
+
+ map = skb_shinfo(skb)->dma_maps;
+
while (len) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_single(adapter->pdev,
- skb->data + offset,
- size, PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = map[0] + offset;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
offset += size;
count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
+
+ if (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
}
for (f = 0; f < nr_frags; f++) {
@@ -4196,33 +4202,27 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
- offset = frag->page_offset;
+ offset = 0;
while (len) {
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
tx_buffer_info = &tx_ring->tx_buffer_info[i];
size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
tx_buffer_info->length = size;
- tx_buffer_info->dma = pci_map_page(adapter->pdev,
- frag->page,
- offset,
- size,
- PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = map[f + 1] + offset;
tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
offset += size;
count++;
- i++;
- if (i == tx_ring->count)
- i = 0;
}
}
- if (i == 0)
- i = tx_ring->count - 1;
- else
- i = i - 1;
+
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
@@ -4388,13 +4388,19 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
- ixgbe_tx_queue(adapter, tx_ring, tx_flags,
- ixgbe_tx_map(adapter, tx_ring, skb, first),
- skb->len, hdr_len);
+ count = ixgbe_tx_map(adapter, tx_ring, skb, first);
- netdev->trans_start = jiffies;
+ if (count) {
+ ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
+ hdr_len);
+ netdev->trans_start = jiffies;
+ ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
- ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+ } else {
+ dev_kfree_skb_any(skb);
+ tx_ring->tx_buffer_info[first].time_stamp = 0;
+ tx_ring->next_to_use = first;
+ }
return NETDEV_TX_OK;
}
@@ -4987,8 +4993,20 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
}
+
#endif /* CONFIG_IXGBE_DCA */
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ return adapter->netdev->name;
+}
+#endif
module_exit(ixgbe_exit_module);
/* ixgbe_main.c */