From 185095fb80ce57c0f3db8738e36ad7c02dc34d33 Mon Sep 17 00:00:00 2001 From: Bruce Allan Date: Thu, 7 Jun 2012 02:23:37 +0000 Subject: e1000e: use more informative logging macros when netdev not yet registered Based on a report from Ethan Zhao, before calling register_netdev() the driver should be using logging macros that do not display the potentially confusing "(unregistered net_device)" yet still display the useful driver name and PCI bus/device/function. Reported-by: Ethan Zhao Signed-off-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 11 +++++--- drivers/net/ethernet/intel/e1000e/param.c | 43 +++++++++++++++++++----------- 2 files changed, 34 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 31d37a2b5ba8..ba86b3f8a404 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6238,7 +6238,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, } if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) - e_info("PHY reset is blocked due to SOL/IDER session.\n"); + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); /* Set initial default active device features */ netdev->features = (NETIF_F_SG | @@ -6288,7 +6289,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) break; if (i == 2) { - e_err("The NVM Checksum Is Not Valid\n"); + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } @@ -6298,13 +6299,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev, /* copy the MAC address */ if (e1000e_read_mac_addr(&adapter->hw)) - e_err("NVM Read Error while reading MAC address\n"); + dev_err(&pdev->dev, + "NVM Read Error while reading MAC address\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { - e_err("Invalid MAC Address: %pM\n", netdev->perm_addr); + dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", + netdev->perm_addr); err = -EIO; goto err_eeprom; } diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 55cc1565bc2f..dfbfa7fd98c3 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -199,16 +199,19 @@ static int __devinit e1000_validate_option(unsigned int *value, case enable_option: switch (*value) { case OPTION_ENABLED: - e_info("%s Enabled\n", opt->name); + dev_info(&adapter->pdev->dev, "%s Enabled\n", + opt->name); return 0; case OPTION_DISABLED: - e_info("%s Disabled\n", opt->name); + dev_info(&adapter->pdev->dev, "%s Disabled\n", + opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { - e_info("%s set to %i\n", opt->name, *value); + dev_info(&adapter->pdev->dev, "%s set to %i\n", + opt->name, *value); return 0; } break; @@ -220,7 +223,8 @@ static int __devinit e1000_validate_option(unsigned int *value, ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') - e_info("%s\n", ent->str); + dev_info(&adapter->pdev->dev, "%s\n", + ent->str); return 0; } } @@ -230,8 +234,8 @@ static int __devinit e1000_validate_option(unsigned int *value, BUG(); } - e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, - opt->err); + dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); *value = opt->def; return -1; } @@ -251,8 +255,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) int bd = adapter->bd_number; if (bd >= E1000_MAX_NIC) { - e_notice("Warning: no configuration for board #%i\n", bd); - e_notice("Using defaults for all values\n"); + dev_notice(&adapter->pdev->dev, + "Warning: no configuration for board #%i\n", bd); + dev_notice(&adapter->pdev->dev, + "Using defaults for all values\n"); } { /* Transmit Interrupt Delay */ @@ -366,27 +372,32 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) * default values */ if (adapter->itr > 4) - e_info("%s set to default %d\n", opt.name, - adapter->itr); + dev_info(&adapter->pdev->dev, + "%s set to default %d\n", opt.name, + adapter->itr); } adapter->itr_setting = adapter->itr; switch (adapter->itr) { case 0: - e_info("%s turned off\n", opt.name); + dev_info(&adapter->pdev->dev, "%s turned off\n", + opt.name); break; case 1: - e_info("%s set to dynamic mode\n", opt.name); + dev_info(&adapter->pdev->dev, + "%s set to dynamic mode\n", opt.name); adapter->itr = 20000; break; case 3: - e_info("%s set to dynamic conservative mode\n", - opt.name); + dev_info(&adapter->pdev->dev, + "%s set to dynamic conservative mode\n", + opt.name); adapter->itr = 20000; break; case 4: - e_info("%s set to simplified (2000-8000 ints) mode\n", - opt.name); + dev_info(&adapter->pdev->dev, + "%s set to simplified (2000-8000 ints) mode\n", + opt.name); break; default: /* -- cgit v1.2.3 From 4f8a91ad9aafbc7c70097a16404fa89aa41c8deb Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 28 Mar 2012 11:42:45 +0000 Subject: ixgbe: align flow control DV macros with datasheet The flow control DV macros are used to calculate the flow control high and low thresholds. This patch annotates these macros slightly better and fixes the issues below. The macro variables are renamed LINK to _max_frame_link and TC to _max_frame_tc. This was to avoid confusion and make them more readable. It was found that people auditing the code read TC to be 'traffic class' in the 802.1Q definition instead of the max frame size of the tc. Hopefully it is clear now. This audit also found the following real deviations from the theoretical values. Fixed in this patch. * I multiplied the DV calculations by (36/25) which always evaluates to 1. This does not match the intended theoretical value of 1.44. * IXGBE_BT2KB added 1023 to account for rounding however this really should be 8 * 1023 - 1 to account for division by 8k. * x2 multiplication of max frame in DV calculations to account for updated hardware recommendations. With this patch the DV values are inline with the recommendations in the 82599 and 82598 data sheets. Its worth noting I did not see any dropped frames with flow control on in my experiments without this patch. However aligning with the hardware specs and recommendations seems like a good idea here to account for worst case scenarios. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 37 ++++++++++++++++----------- 1 file changed, 22 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 204848d2448c..1085c0739a3c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2419,7 +2419,7 @@ typedef u32 ixgbe_physical_layer; */ /* BitTimes (BT) conversion */ -#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024)) +#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) #define IXGBE_B2BT(BT) (BT * 8) /* Calculate Delay to respond to PFC */ @@ -2450,24 +2450,31 @@ typedef u32 ixgbe_physical_layer; #define IXGBE_PCI_DELAY 10000 /* Calculate X540 delay value in bit times */ -#define IXGBE_FILL_RATE (36 / 25) - -#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \ - (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ - (2 * IXGBE_CABLE_DC) + \ - (2 * IXGBE_ID_X540) + \ - IXGBE_HD + IXGBE_B2BT(TC))) +#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID_X540) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) /* Calculate 82599, 82598 delay value in bit times */ -#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \ - (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ - (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \ - IXGBE_HD + IXGBE_B2BT(TC))) +#define IXGBE_DV(_max_frame_link, _max_frame_tc) \ + ((36 * \ + (IXGBE_B2BT(_max_frame_link) + \ + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID) + \ + IXGBE_HD) / 25 + 1) + \ + 2 * IXGBE_B2BT(_max_frame_tc)) /* Calculate low threshold delay values */ -#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \ - (IXGBE_FILL_RATE * IXGBE_PCI_DELAY)) -#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC)) +#define IXGBE_LOW_DV_X540(_max_frame_tc) \ + (2 * IXGBE_B2BT(_max_frame_tc) + \ + (36 * IXGBE_PCI_DELAY / 25) + 1) +#define IXGBE_LOW_DV(_max_frame_tc) \ + (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) /* Software ATR hash keys */ #define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 -- cgit v1.2.3 From 6cbc52ef10ba3c7fbd8954270c025ffbf643ab05 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 16 May 2012 07:06:38 +0000 Subject: ixgbe: do not compile ixgbe_sysfs.c when CONFIG_IXGBE_HWMON is not set ixgbe_sysfs.c is only needed when CONFIG_IXGBE_HWMON is configured in the kernel. Signed-off-by: Emil Tantilov Acked-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/Makefile | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 0bdf06bc5c49..5fd5d04c26c9 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -34,11 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o + ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o - +ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index 1d80b1cefa6a..2334fce47018 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -37,7 +37,6 @@ #include #include -#ifdef CONFIG_IXGBE_HWMON /* hwmon callback functions */ static ssize_t ixgbe_hwmon_show_location(struct device *dev, struct device_attribute *attr, @@ -241,5 +240,4 @@ err: exit: return rc; } -#endif /* CONFIG_IXGBE_HWMON */ -- cgit v1.2.3 From 0ede4a606af1778b24b84b7feed3c0bed2751a34 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 22 May 2012 06:08:32 +0000 Subject: ixgbe: ptp code cleanup This patch fixes two minor nits from Richard Cochran. The first is a case of ambitious line wrapping that wasn't necessary. The second is to re-order the flag checks for PPS support. Previously, the hardware test was done first, and the interrupt flag test was done second. Now, test the interrupt flag and use the unlikely macro. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8 +++----- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 13 +++++++------ 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 17ad6a3c1be1..1675b662da06 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -790,12 +790,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer->gso_segs; #ifdef CONFIG_IXGBE_PTP - if (unlikely(tx_buffer->tx_flags & - IXGBE_TX_FLAGS_TSTAMP)) - ixgbe_ptp_tx_hwtstamp(q_vector, - tx_buffer->skb); - + if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP)) + ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb); #endif + /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index ddc6a4d19302..174f41fd1d2f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -307,13 +307,14 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) !(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) return; - switch (hw->mac.type) { - case ixgbe_mac_X540: - if (eicr & IXGBE_EICR_TIMESYNC) + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) { + switch (hw->mac.type) { + case ixgbe_mac_X540: ptp_clock_event(adapter->ptp_clock, &event); - break; - default: - break; + break; + default: + break; + } } } -- cgit v1.2.3 From c19197a7866fee6ff9985d9dc9962bc2ccbd3d7b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 22 May 2012 06:08:37 +0000 Subject: ixgbe: PTP Fix hwtstamp mode settings When enabling the hwtstamp mode for Rx timestamping the V2 ptp event type specific modes (Delay Request and Sync) have been rolled into the V2 all event packet modes, in order to more accurately represent what hardware is doing. Hardware always timestamps the Path delay packets when a V2 mode is selected, regardless of what type was selected (in order to always support Path delay mode). However this means the user selected modes of timestamping only Sync or Delay Request is not truly supported. This patch correctly sets the mode for the hwtstamp config and returns to the user that all V2 event packets will be timestamped. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 174f41fd1d2f..5ed8cffee178 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -540,6 +540,11 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, * type has to be specified. Matching the kind of event packet is * not supported, with the exception of "all V2 events regardless of * level 2 or 4". + * + * Since hardware always timestamps Path delay packets when timestamping V2 + * packets, regardless of the type specified in the register, only use V2 + * Event mode. This more accurately tells the user what the hardware is going + * to do anyways. */ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, int cmd) @@ -583,27 +588,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG; is_l4 = true; break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_EVENT: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; -- cgit v1.2.3 From 1d1a79b5b94b0aa84e1e78dd9acdcffb12274848 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 22 May 2012 06:18:08 +0000 Subject: ixgbe: Check PTP Rx timestamps via BPF filter This patch fixes a potential Rx timestamp deadlock that causes the Rx timestamping to stall indefinitely. The issue could occur when a PTP packet is timestamped by hardware but never reaches the Rx queue. In order to prevent a permanent loss of timestamping, the RXSTMP(L/H) registers have to be read to unlock them. (This used to only occur when a packet that was timestamped reached the software.) However the registers can't be read early otherwise there is no way to correlate them to the packet. This patch introduces a filter function which can be used to determine if a packet should have been timestamped. Supplied with the filter setup by the hwtstamp ioctl, check to make sure the PTP protocol and message type match the expected values. If so, then read the timestamp registers (to free them.) At this point check the descriptor bit, if the bit is set then we know this packet correlates to the timestamp stored in the RXTSTAMP registers. Otherwise, assume that packet was dropped by the hardware, and ignore this timestamp value. However, we have at least unlocked the rxtstamp registers for future timestamping. Due to the way the driver handles skb data, it cannot be directly accessed. In order to work around this, a copy of the skb data into a linear buffer is made. From this buffer it becomes possible to read the data correctly Signed-off-by: Jacob Keller Reviewed-by: Richard Cochran Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 3 +- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 113 +++++++++++++++++++++++--- 3 files changed, 104 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 3ef3c5284e52..41f9f6e2a4c1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -561,6 +561,7 @@ struct ixgbe_adapter { spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; + int rx_hwtstamp_filter; u32 base_incval; u32 cycle_speed; #endif /* CONFIG_IXGBE_PTP */ @@ -718,6 +719,7 @@ extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, struct sk_buff *skb); extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, int cmd); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1675b662da06..b0ddfd47e473 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1397,8 +1397,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); #ifdef CONFIG_IXGBE_PTP - if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)) - ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); + ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb); #endif if ((dev->features & NETIF_F_HW_VLAN_RX) && diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 5ed8cffee178..cb7d1b2982c5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -26,6 +26,7 @@ *******************************************************************************/ #include "ixgbe.h" #include +#include /* * The 82599 and the X540 do not have true 64bit nanosecond scale @@ -100,6 +101,10 @@ #define NSECS_PER_SEC 1000000000ULL #endif +static struct sock_filter ptp_filter[] = { + PTP_FILTER +}; + /** * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) * @cc - the cyclecounter structure @@ -425,6 +430,68 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) } } +/** + * ixgbe_ptp_match - determine if this skb matches a ptp packet + * @skb: pointer to the skb + * @hwtstamp: pointer to the hwtstamp_config to check + * + * Determine whether the skb should have been timestamped, assuming the + * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet + * should have a timestamp waiting in the registers, and 0 otherwise. + * + * V1 packets have to check the version type to determine whether they are + * correct. However, we can't directly access the data because it might be + * fragmented in the SKB, in paged memory. In order to work around this, we + * use skb_copy_bits which will properly copy the data whether it is in the + * paged memory fragments or not. We have to copy the IP header as well as the + * message type. + */ +static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter) +{ + struct iphdr iph; + u8 msgtype; + unsigned int type, offset; + + if (rx_filter == HWTSTAMP_FILTER_NONE) + return 0; + + type = sk_run_filter(skb, ptp_filter); + + if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT)) + return type & PTP_CLASS_V2; + + /* For the remaining cases actually check message type */ + switch (type) { + case PTP_CLASS_V1_IPV4: + skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph)); + offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL; + break; + case PTP_CLASS_V1_IPV6: + offset = OFF_PTP6 + OFF_PTP_CONTROL; + break; + default: + /* other cases invalid or handled above */ + return 0; + } + + /* Make sure our buffer is long enough */ + if (skb->len < offset) + return 0; + + skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype)); + + switch (rx_filter) { + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG); + break; + default: + return 0; + } +} + /** * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp * @q_vector: structure containing interrupt and ring information @@ -474,6 +541,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, /** * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp * @q_vector: structure containing interrupt and ring information + * @rx_desc: the rx descriptor * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns @@ -481,6 +549,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector, * is passed up the network stack */ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, + union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { struct ixgbe_adapter *adapter; @@ -498,21 +567,33 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, hw = &adapter->hw; tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + + /* Check if we have a valid timestamp and make sure the skb should + * have been timestamped */ + if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) || + !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter))) + return; + + /* + * Always read the registers, in order to clear a possible fault + * because of stagnant RX timestamp values for a packet that never + * reached the queue. + */ regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; /* - * If this bit is set, then the RX registers contain the time stamp. No - * other packet will be time stamped until we read these registers, so - * read the registers to make them available again. Because only one - * packet can be time stamped at a time, we know that the register - * values must belong to this one here and therefore we don't need to - * compare any of the additional attributes stored for it. + * If the timestamp bit is set in the packet's descriptor, we know the + * timestamp belongs to this packet. No other packet can be + * timestamped until the registers for timestamping have been read. + * Therefor only one packet with this bit can be in the queue at a + * time, and the rx timestamp values that were in the registers belong + * to this packet. * * If nothing went wrong, then it should have a skb_shared_tx that we * can turn into a skb_shared_hwtstamps. */ - if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) + if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) return; spin_lock_irqsave(&adapter->tmreg_lock, flags); @@ -598,19 +679,20 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; is_l2 = true; is_l4 = true; + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_ALL: default: /* - * register RXMTRL must be set, therefore it is not - * possible to time stamp both V1 Sync and Delay_Req messages - * and hardware does not support timestamping all packets - * => return error + * register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and + * Delay_Req messages and hardware does not support + * timestamping all packets => return error */ + config.rx_filter = HWTSTAMP_FILTER_NONE; return -ERANGE; } @@ -620,6 +702,9 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, return 0; } + /* Store filter value for later use */ + adapter->rx_hwtstamp_filter = config.rx_filter; + /* define ethertype filter for timestamped packets */ if (is_l2) IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), @@ -855,6 +940,10 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) return; } + /* initialize the ptp filter */ + if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) + e_dev_warn("ptp_filter_init failed\n"); + spin_lock_init(&adapter->tmreg_lock); ixgbe_ptp_start_cyclecounter(adapter); -- cgit v1.2.3 From 7500673be30f9467cd1a0e065c93e75c5213b44d Mon Sep 17 00:00:00 2001 From: Tushar Dave Date: Tue, 12 Jun 2012 13:03:29 +0000 Subject: e1000: Combining Bitwise OR in one expression. Signed-off-by: Tushar Dave Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000/e1000_main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 7483ca0a6282..183a4a3224ba 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1078,18 +1078,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->features |= netdev->hw_features; - netdev->hw_features |= NETIF_F_RXCSUM; - netdev->hw_features |= NETIF_F_RXALL; - netdev->hw_features |= NETIF_F_RXFCS; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; } - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_HW_CSUM; - netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); netdev->priv_flags |= IFF_UNICAST_FLT; -- cgit v1.2.3 From f00b0da776fda1abc481578e3932a668f603d72d Mon Sep 17 00:00:00 2001 From: Lior Levy Date: Sat, 4 Jun 2011 06:05:03 +0000 Subject: igb: A fix to VF TX rate limit There is a need to configure MMW_SIZE in register RTTBCNRM with a correct value. For 82576 device, the value should be 0x14. Signed-off-by: Lior Levy Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_regs.h | 1 + drivers/net/ethernet/intel/igb/igb_main.c | 5 +++++ 2 files changed, 6 insertions(+) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 35d1e4f2c92c..10efcd88dca0 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -117,6 +117,7 @@ /* TX Rate Limit Registers */ #define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ +#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ #define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ /* Split and Replication RX Control - RW */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index dd3bfe8cd36c..64090549722d 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6997,6 +6997,11 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, } wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ + /* + * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM + * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. + */ + wr32(E1000_RTTBCNRM, 0x14); wr32(E1000_RTTBCNRC, bcnrc_val); } -- cgit v1.2.3 From d3eef8c8a033a5ee56ab5d923068eb8cd5d53887 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 16 May 2012 01:46:00 +0000 Subject: igb: Add switch case for supported hardware to igb_ptp_remove. PTP initialization is only done on supported parts, so remove needs same checks or it will cause crashes on systems with igb devices that don't support PTP. This patch adds those checks to the exit function. Signed-off-by: Carolyn Wyborny Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ptp.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index d5ee7fa50723..c846ea9131a3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -330,7 +330,17 @@ void igb_ptp_init(struct igb_adapter *adapter) void igb_ptp_remove(struct igb_adapter *adapter) { - cancel_delayed_work_sync(&adapter->overflow_work); + switch (adapter->hw.mac.type) { + case e1000_i211: + case e1000_i210: + case e1000_i350: + case e1000_82580: + case e1000_82576: + cancel_delayed_work_sync(&adapter->overflow_work); + break; + default: + return; + } if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); -- cgit v1.2.3 From cb41145ee78585282af56a9203f391c0d84366b1 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 4 Apr 2012 17:43:59 +0000 Subject: igb: Support the get_ts_info ethtool method. Based on original patch from Richard Cochran Original patch caused build errors without CONFIG_IGB_1588_CLOCK and CONFIG_PPS enabled, since the added code was not properly wrapped. CC: Richard Cochran Signed-off-by: Carolyn Wyborny Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ethtool.c | 35 ++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 812d4f963bd1..59d0f04d59af 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2271,6 +2271,38 @@ static void igb_ethtool_complete(struct net_device *netdev) pm_runtime_put(&adapter->pdev->dev); } +#ifdef CONFIG_IGB_PTP +static int igb_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = + (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL) | + (1 << HWTSTAMP_FILTER_SOME) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +} + +#endif static const struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, @@ -2299,6 +2331,9 @@ static const struct ethtool_ops igb_ethtool_ops = { .set_coalesce = igb_set_coalesce, .begin = igb_ethtool_begin, .complete = igb_ethtool_complete, +#ifdef CONFIG_IGB_PTP + .get_ts_info = igb_ethtool_get_ts_info, +#endif }; void igb_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From 374a542dee0a10c5f81edc2af17a97b06805ecd9 Mon Sep 17 00:00:00 2001 From: Matthew Vick Date: Fri, 18 May 2012 04:54:58 +0000 Subject: igb: Streamline RSS queue and queue pairing assignment logic. Rather than spread out the complexity of the RSS queue and queue pairing assignment logic, place it all in one location for simplicity and readability. Signed-off-by: Matthew Vick Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 9 ++-- drivers/net/ethernet/intel/igb/igb_main.c | 79 ++++++++++++++++++++----------- 2 files changed, 55 insertions(+), 33 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index ae6d3f393a54..3ced7b546f7f 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -65,13 +65,10 @@ struct igb_adapter; #define MAX_Q_VECTORS 8 /* Transmit and receive queues */ -#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \ - (hw->mac.type > e1000_82575 ? 8 : 4))) -#define IGB_MAX_RX_QUEUES_I210 4 +#define IGB_MAX_RX_QUEUES 8 +#define IGB_MAX_RX_QUEUES_82575 4 #define IGB_MAX_RX_QUEUES_I211 2 -#define IGB_MAX_TX_QUEUES 16 -#define IGB_MAX_TX_QUEUES_I210 4 -#define IGB_MAX_TX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 8 #define IGB_MAX_VF_MC_ENTRIES 30 #define IGB_MAX_VF_FUNCTIONS 8 #define IGB_MAX_VFTA_ENTRIES 128 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 64090549722d..a7c9c5d77e0b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1048,11 +1048,6 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter) if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) numvecs += adapter->num_tx_queues; - /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */ - if ((adapter->hw.mac.type == e1000_i210) - || (adapter->hw.mac.type == e1000_i211)) - numvecs = 4; - /* store the number of vectors reserved for queues */ adapter->num_q_vectors = numvecs; @@ -2338,6 +2333,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; + u32 max_rss_queues; pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); @@ -2370,40 +2366,69 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) } else adapter->vfs_allocated_count = max_vfs; break; - case e1000_i210: - case e1000_i211: - adapter->vfs_allocated_count = 0; - break; default: break; } #endif /* CONFIG_PCI_IOV */ + + /* Determine the maximum number of RSS queues supported. */ switch (hw->mac.type) { + case e1000_i211: + max_rss_queues = IGB_MAX_RX_QUEUES_I211; + break; + case e1000_82575: case e1000_i210: - adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210, - num_online_cpus()); + max_rss_queues = IGB_MAX_RX_QUEUES_82575; + break; + case e1000_i350: + /* I350 cannot do RSS and SR-IOV at the same time */ + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 1; + break; + } + /* fall through */ + case e1000_82576: + if (!!adapter->vfs_allocated_count) { + max_rss_queues = 2; + break; + } + /* fall through */ + case e1000_82580: + default: + max_rss_queues = IGB_MAX_RX_QUEUES; break; + } + + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + + /* Determine if we need to pair queues. */ + switch (hw->mac.type) { + case e1000_82575: case e1000_i211: - adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211, - num_online_cpus()); + /* Device supports enough interrupts without queue pairing. */ break; + case e1000_82576: + /* + * If VFs are going to be allocated with RSS queues then we + * should pair the queues in order to conserve interrupts due + * to limited supply. + */ + if ((adapter->rss_queues > 1) && + (adapter->vfs_allocated_count > 6)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + /* fall through */ + case e1000_82580: + case e1000_i350: + case e1000_i210: default: - adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, - num_online_cpus()); + /* + * If rss_queues > half of max_rss_queues, pair the queues in + * order to conserve interrupts due to limited supply. + */ + if (adapter->rss_queues > (max_rss_queues / 2)) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; break; } - /* i350 cannot do RSS and SR-IOV at the same time */ - if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) - adapter->rss_queues = 1; - - /* - * if rss_queues > 4 or vfs are going to be allocated with rss_queues - * then we should combine the queues into a queue pair in order to - * conserve interrupts due to limited supply - */ - if ((adapter->rss_queues > 4) || - ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; /* Setup and initialize a copy of the hw vlan table array */ adapter->shadow_vfta = kzalloc(sizeof(u32) * -- cgit v1.2.3 From d67974f0deb1f309fc13821f45e52c63402bfb24 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Thu, 14 Jun 2012 16:04:19 +0000 Subject: igb: Update firmware info output Our NVM image creation tools have evolved over the years and there are multiple versions contained in them, depending on the tool used to create them. This patch outputs the NVM versions available in ethtool -i output. rc2: (not sure why others show in log but not in the message) Added additional call to igb_set_fw_version per Community feedback. Signed-off-by: Carolyn Wyborny Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 16 +++++++ drivers/net/ethernet/intel/igb/igb_ethtool.c | 17 +++---- drivers/net/ethernet/intel/igb/igb_main.c | 66 ++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 3ced7b546f7f..9e572dd29ab2 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -75,6 +75,20 @@ struct igb_adapter; #define IGB_82576_VF_DEV_ID 0x10CA #define IGB_I350_VF_DEV_ID 0x1520 +/* NVM version defines */ +#define IGB_MAJOR_MASK 0xF000 +#define IGB_MINOR_MASK 0x0FF0 +#define IGB_BUILD_MASK 0x000F +#define IGB_COMB_VER_MASK 0x00FF +#define IGB_MAJOR_SHIFT 12 +#define IGB_MINOR_SHIFT 4 +#define IGB_COMB_VER_SHFT 8 +#define IGB_NVM_VER_INVALID 0xFFFF +#define IGB_ETRACK_SHIFT 16 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; @@ -368,6 +382,7 @@ struct igb_adapter { spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; + char fw_version[32]; }; #define IGB_FLAG_HAS_MSI (1 << 0) @@ -417,6 +432,7 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); extern bool igb_has_link(struct igb_adapter *adapter); extern void igb_set_ethtool_ops(struct net_device *); extern void igb_power_up_link(struct igb_adapter *); +extern void igb_set_fw_version(struct igb_adapter *); #ifdef CONFIG_IGB_PTP extern void igb_ptp_init(struct igb_adapter *adapter); extern void igb_ptp_remove(struct igb_adapter *adapter); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 59d0f04d59af..a19c84cad0e9 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -710,6 +710,7 @@ static int igb_set_eeprom(struct net_device *netdev, if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) hw->nvm.ops.update(hw); + igb_set_fw_version(adapter); kfree(eeprom_buff); return ret_val; } @@ -718,20 +719,16 @@ static void igb_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igb_adapter *adapter = netdev_priv(netdev); - u16 eeprom_data; strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); - /* EEPROM image version # is reported as firmware version # for - * 82575 controllers */ - adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d-%d", - (eeprom_data & 0xF000) >> 12, - (eeprom_data & 0x0FF0) >> 4, - eeprom_data & 0x000F); - + /* + * EEPROM image version # is reported as firmware version # for + * 82575 controllers + */ + strlcpy(drvinfo->fw_version, adapter->fw_version, + sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = IGB_STATS_LEN; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a7c9c5d77e0b..1e0b94ba2ef1 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1815,6 +1815,69 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_features = igb_set_features, }; +/** + * igb_set_fw_version - Configure version string for ethtool + * @adapter: adapter struct + * + **/ +void igb_set_fw_version(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; + u16 major, build, patch, fw_version; + u32 etrack_id; + + hw->nvm.ops.read(hw, 5, 1, &fw_version); + if (adapter->hw.mac.type != e1000_i211) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); + etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl; + + /* combo image version needs to be found */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != IGB_NVM_VER_INVALID)) { + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* Only display Option Rom if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != IGB_NVM_VER_INVALID) && + (comb_verl != IGB_NVM_VER_INVALID))) { + major = comb_verl >> IGB_COMB_VER_SHFT; + build = (comb_verl << IGB_COMB_VER_SHFT) | + (comb_verh >> IGB_COMB_VER_SHFT); + patch = comb_verh & IGB_COMB_VER_MASK; + snprintf(adapter->fw_version, + sizeof(adapter->fw_version), + "%d.%d%d, 0x%08x, %d.%d.%d", + (fw_version & IGB_MAJOR_MASK) >> + IGB_MAJOR_SHIFT, + (fw_version & IGB_MINOR_MASK) >> + IGB_MINOR_SHIFT, + (fw_version & IGB_BUILD_MASK), + etrack_id, major, build, patch); + goto out; + } + } + snprintf(adapter->fw_version, sizeof(adapter->fw_version), + "%d.%d%d, 0x%08x", + (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT, + (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT, + (fw_version & IGB_BUILD_MASK), etrack_id); + } else { + snprintf(adapter->fw_version, sizeof(adapter->fw_version), + "%d.%d%d", + (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT, + (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT, + (fw_version & IGB_BUILD_MASK)); + } +out: + return; +} + /** * igb_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -2025,6 +2088,9 @@ static int __devinit igb_probe(struct pci_dev *pdev, goto err_eeprom; } + /* get firmware version for ethtool -i */ + igb_set_fw_version(adapter); + setup_timer(&adapter->watchdog_timer, igb_watchdog, (unsigned long) adapter); setup_timer(&adapter->phy_info_timer, igb_update_phy_info, -- cgit v1.2.3 From 200e5fd50ee80ec6ab3156bdbc46a41da0a82d10 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Thu, 31 May 2012 23:39:30 +0000 Subject: igb: Version bump This patch updates the igb version to 4.0.1. Signed-off-by: Carolyn Wyborny Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1e0b94ba2ef1..01ced68d3aac 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -59,9 +59,9 @@ #endif #include "igb.h" -#define MAJ 3 -#define MIN 4 -#define BUILD 7 +#define MAJ 4 +#define MIN 0 +#define BUILD 1 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; -- cgit v1.2.3 From a49fda3eaa4fe70fdd14681060a7c6c6246dc927 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 8 Jun 2012 06:59:09 +0000 Subject: ixgbe: add support for 1G SX modules This patch adds support for 1G Fiber PHY modules (SFP+ modules). This support comes along side support for 1G Copper PHY modules, but uses a different PHY type (ixgbe_sfp_type_1g_sx_core). Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 4 +++- drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 23 +++++++++++++++++++---- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 2 ++ 3 files changed, 24 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index dee64d2703f0..e7dddfd97cb9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -241,7 +241,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, /* Determine 1G link capabilities off of SFP+ type */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; goto out; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 24117709d6a2..71659edf81aa 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -907,6 +907,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) * 8 SFP_act_lmt_DA_CORE1 - 82599-specific * 9 SFP_1g_cu_CORE0 - 82599-specific * 10 SFP_1g_cu_CORE1 - 82599-specific + * 11 SFP_1g_sx_CORE0 - 82599-specific + * 12 SFP_1g_sx_CORE1 - 82599-specific */ if (hw->mac.type == ixgbe_mac_82598EB) { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) @@ -957,6 +959,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) else hw->phy.sfp_type = ixgbe_sfp_type_1g_cu_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_sx_core1; } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; } @@ -1049,7 +1058,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) /* Verify supported 1G SFP modules */ if (comp_codes_10g == 0 && !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; @@ -1064,7 +1075,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->mac.ops.get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || - (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { + (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) || + (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) || + (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_sfp_intel) { status = 0; @@ -1128,10 +1141,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, * SR modules */ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || - sfp_type == ixgbe_sfp_type_1g_cu_core0) + sfp_type == ixgbe_sfp_type_1g_cu_core0 || + sfp_type == ixgbe_sfp_type_1g_sx_core0) sfp_type = ixgbe_sfp_type_srlr_core0; else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || - sfp_type == ixgbe_sfp_type_1g_cu_core1) + sfp_type == ixgbe_sfp_type_1g_cu_core1 || + sfp_type == ixgbe_sfp_type_1g_sx_core1) sfp_type = ixgbe_sfp_type_srlr_core1; /* Read offset to PHY init contents */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 1085c0739a3c..7416d22ec227 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2604,6 +2604,8 @@ enum ixgbe_sfp_type { ixgbe_sfp_type_da_act_lmt_core1 = 8, ixgbe_sfp_type_1g_cu_core0 = 9, ixgbe_sfp_type_1g_cu_core1 = 10, + ixgbe_sfp_type_1g_sx_core0 = 11, + ixgbe_sfp_type_1g_sx_core1 = 12, ixgbe_sfp_type_not_present = 0xFFFE, ixgbe_sfp_type_unknown = 0xFFFF }; -- cgit v1.2.3 From db01896398ae6beba41fc14f1a90d55fd21e6738 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 8 Jun 2012 06:59:17 +0000 Subject: ixgbe: clean up ixgbe_get_settings ethtool function This patch cleans up the method used for determining the link speed of devices. The old method re-wrote some logic already existing in a mac.ops function which should be used instead. The result is much simpler to understand and removes a strange double-check of logic, as well as reducing code redundancy. Signed-off-by: Jacob Keller Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 147 ++++++++++------------- 1 file changed, 62 insertions(+), 85 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 3178f1ec3711..bbc7da5cdb4d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -154,100 +154,60 @@ static int ixgbe_get_settings(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + ixgbe_link_speed supported_link; u32 link_speed = 0; + bool autoneg; bool link_up; - ecmd->supported = SUPPORTED_10000baseT_Full; - ecmd->autoneg = AUTONEG_ENABLE; - ecmd->transceiver = XCVR_EXTERNAL; - if ((hw->phy.media_type == ixgbe_media_type_copper) || - (hw->phy.multispeed_fiber)) { - ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg); - - switch (hw->mac.type) { - case ixgbe_mac_X540: - ecmd->supported |= SUPPORTED_100baseT_Full; - break; - default: - break; - } - - ecmd->advertising = ADVERTISED_Autoneg; - if (hw->phy.autoneg_advertised) { - if (hw->phy.autoneg_advertised & - IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.autoneg_advertised & - IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw->phy.autoneg_advertised & - IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - } else { - /* - * Default advertised modes in case - * phy.autoneg_advertised isn't set. - */ - ecmd->advertising |= (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full); - if (hw->mac.type == ixgbe_mac_X540) - ecmd->advertising |= ADVERTISED_100baseT_Full; - } - - if (hw->phy.media_type == ixgbe_media_type_copper) { - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; - ecmd->port = PORT_TP; - } else { - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; - } - } else if (hw->phy.media_type == ixgbe_media_type_backplane) { - /* Set as FIBRE until SERDES defined in kernel */ - if (hw->device_id == IXGBE_DEV_ID_82598_BX) { - ecmd->supported = (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; - } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || - (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { - ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_Autoneg | - ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - } else { - ecmd->supported |= (SUPPORTED_1000baseT_Full | - SUPPORTED_FIBRE); - ecmd->advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_1000baseT_Full | - ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - } + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->supported |= SUPPORTED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) + ecmd->supported |= SUPPORTED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_100_FULL) + ecmd->supported |= SUPPORTED_100baseT_Full; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; } else { - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); - ecmd->port = PORT_FIBRE; - ecmd->autoneg = AUTONEG_DISABLE; + /* default modes in case phy.autoneg_advertised isn't set */ + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; } - /* Get PHY type */ + if (autoneg) { + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + } else + ecmd->autoneg = AUTONEG_DISABLE; + + ecmd->transceiver = XCVR_EXTERNAL; + + /* Determine the remaining settings based on the PHY type. */ switch (adapter->hw.phy.type) { case ixgbe_phy_tn: case ixgbe_phy_aq: case ixgbe_phy_cu_unknown: - /* Copper 10G-BASET */ + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; ecmd->port = PORT_TP; break; case ixgbe_phy_qt: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_FIBRE; break; case ixgbe_phy_nl: @@ -257,42 +217,59 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: - switch (adapter->hw.phy.sfp_type) { /* SFP+ devices, further checking needed */ + switch (adapter->hw.phy.sfp_type) { case ixgbe_sfp_type_da_cu: case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_DA; break; case ixgbe_sfp_type_sr: case ixgbe_sfp_type_lr: case ixgbe_sfp_type_srlr_core0: case ixgbe_sfp_type_srlr_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_FIBRE; break; case ixgbe_sfp_type_not_present: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_NONE; break; case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; ecmd->port = PORT_TP; - ecmd->supported = SUPPORTED_TP; - ecmd->advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_TP); + break; + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; break; case ixgbe_sfp_type_unknown: default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_OTHER; break; } break; case ixgbe_phy_xaui: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_NONE; break; case ixgbe_phy_unknown: case ixgbe_phy_generic: case ixgbe_phy_sfp_unsupported: default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_OTHER; break; } -- cgit v1.2.3 From f73332fc39e35a6ac14f892390adcd34a63b00d3 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 21 Jun 2012 02:15:10 +0000 Subject: ixgbe: simplify padding and length checks The check for length <= 0 is bogus because length is unsigned, and network stack never sends zero length packets (unless it is totally broken). The check for really small packets can be optimized (using unlikely) and calling skb_pad directly. Signed-off-by: Stephen Hemminger Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b0ddfd47e473..69a660b5621a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6380,17 +6380,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; - if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* * The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (skb->len < 17) { - if (skb_padto(skb, 17)) + if (unlikely(skb->len < 17)) { + if (skb_pad(skb, 17 - skb->len)) return NETDEV_TX_OK; skb->len = 17; } -- cgit v1.2.3 From 49ce9c2cda18f62b13055dc715e7b514157c2da8 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 10 Jul 2012 10:56:00 +0000 Subject: drivers/net/ethernet: Fix (nearly-)kernel-doc comments for various functions Fix incorrect start markers, wrapped summary lines, missing section breaks, incorrect separators, and some name mismatches. Delete a few that are content-free. Signed-off-by: Ben Hutchings Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c501.c | 2 +- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 68 ++++---- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 70 ++++---- drivers/net/ethernet/atheros/atl1e/atl1e_param.c | 2 +- drivers/net/ethernet/atheros/atlx/atl1.c | 41 ++--- drivers/net/ethernet/atheros/atlx/atl2.c | 56 +++---- drivers/net/ethernet/atheros/atlx/atlx.c | 10 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 2 +- drivers/net/ethernet/brocade/bna/bfa_cee.c | 36 +---- drivers/net/ethernet/chelsio/cxgb3/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb3/t3_hw.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 2 +- drivers/net/ethernet/ethoc.c | 4 +- drivers/net/ethernet/freescale/fec.c | 4 +- drivers/net/ethernet/intel/e1000/e1000_hw.c | 8 +- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- drivers/net/ethernet/intel/igb/igb_main.c | 1 + drivers/net/ethernet/intel/igbvf/netdev.c | 1 + drivers/net/ethernet/intel/igbvf/vf.c | 5 +- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 6 +- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 3 +- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 10 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 41 ++--- drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 38 ++--- drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c | 8 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 +- drivers/net/ethernet/micrel/ks8851_mll.c | 2 +- drivers/net/ethernet/micrel/ksz884x.c | 2 +- drivers/net/ethernet/neterion/s2io.c | 10 +- drivers/net/ethernet/nvidia/forcedeth.c | 2 +- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c | 12 +- .../ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c | 10 +- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 42 ++--- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c | 4 +- drivers/net/ethernet/sfc/net_driver.h | 2 +- drivers/net/ethernet/sfc/rx.c | 1 + drivers/net/ethernet/smsc/smc911x.c | 6 +- drivers/net/ethernet/smsc/smc91x.c | 6 +- drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 2 +- drivers/net/ethernet/tehuti/tehuti.c | 177 ++++++++++++--------- drivers/net/ethernet/ti/davinci_emac.c | 70 ++++---- drivers/net/ethernet/toshiba/spider_net.c | 6 +- drivers/net/ethernet/xilinx/ll_temac_main.c | 2 +- 44 files changed, 378 insertions(+), 410 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c index bf73e1a02293..2038eaabaea4 100644 --- a/drivers/net/ethernet/3com/3c501.c +++ b/drivers/net/ethernet/3com/3c501.c @@ -143,7 +143,7 @@ static int irq = 5; static int mem_start; /** - * el1_probe: - probe for a 3c501 + * el1_probe - probe for a 3c501 * @dev: The device structure passed in to probe. * * This can be called from two places. The network layer will probe using diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 85717cb306d1..42c13d8280c6 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -166,7 +166,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) msleep(5); } -/* +/** * atl1c_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ @@ -179,7 +179,7 @@ static inline void atl1c_irq_enable(struct atl1c_adapter *adapter) } } -/* +/** * atl1c_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ @@ -192,7 +192,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter) synchronize_irq(adapter->pdev->irq); } -/* +/** * atl1c_irq_reset - reset interrupt confiure on the NIC * @adapter: board private structure */ @@ -220,7 +220,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl) return data; } -/* +/** * atl1c_phy_config - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ @@ -361,7 +361,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter) } -/* +/** * atl1c_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ @@ -374,7 +374,7 @@ static void atl1c_tx_timeout(struct net_device *netdev) schedule_work(&adapter->common_task); } -/* +/** * atl1c_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * @@ -453,7 +453,7 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter) atl1c_vlan_mode(adapter->netdev, adapter->netdev->features); } -/* +/** * atl1c_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure @@ -518,7 +518,7 @@ static int atl1c_set_features(struct net_device *netdev, return 0; } -/* +/** * atl1c_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -577,12 +577,6 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id, atl1c_write_phy_reg(&adapter->hw, reg_num, val); } -/* - * atl1c_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ static int atl1c_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { @@ -633,12 +627,6 @@ out: return retval; } -/* - * atl1c_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -651,7 +639,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } -/* +/** * atl1c_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * @@ -755,7 +743,7 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw) i++; } } -/* +/** * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) * @adapter: board private structure to initialize * @@ -853,7 +841,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev, buffer_info->skb = NULL; ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); } -/* +/** * atl1c_clean_tx_ring - Free Tx-skb * @adapter: board private structure */ @@ -878,7 +866,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, tpd_ring->next_to_use = 0; } -/* +/** * atl1c_clean_rx_ring - Free rx-reservation skbs * @adapter: board private structure */ @@ -931,7 +919,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) } } -/* +/** * atl1c_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * @@ -954,7 +942,7 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) } } -/* +/** * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * @@ -1363,7 +1351,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed) return; } -/* +/** * atl1c_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * @@ -1477,7 +1465,7 @@ static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) } } -/* +/** * atl1c_get_stats - Get System Network Statistics * @netdev: network interface device structure * @@ -1558,11 +1546,10 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, return true; } -/* +/** * atl1c_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure - * @pt_regs: CPU registers structure */ static irqreturn_t atl1c_intr(int irq, void *data) { @@ -1813,9 +1800,8 @@ rrs_checked: atl1c_alloc_rx_buffer(adapter); } -/* +/** * atl1c_clean - NAPI Rx polling callback - * @adapter: board private structure */ static int atl1c_clean(struct napi_struct *napi, int budget) { @@ -2270,7 +2256,7 @@ static void atl1c_down(struct atl1c_adapter *adapter) atl1c_reset_dma_ring(adapter); } -/* +/** * atl1c_open - Called when a network interface is made active * @netdev: network interface device structure * @@ -2309,7 +2295,7 @@ err_up: return err; } -/* +/** * atl1c_close - Disables a network interface * @netdev: network interface device structure * @@ -2432,7 +2418,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) return 0; } -/* +/** * atl1c_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1c_pci_tbl @@ -2579,7 +2565,7 @@ err_dma: return err; } -/* +/** * atl1c_remove - Device Removal Routine * @pdev: PCI device information struct * @@ -2605,7 +2591,7 @@ static void __devexit atl1c_remove(struct pci_dev *pdev) free_netdev(netdev); } -/* +/** * atl1c_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state @@ -2633,7 +2619,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_NEED_RESET; } -/* +/** * atl1c_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * @@ -2661,7 +2647,7 @@ static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } -/* +/** * atl1c_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * @@ -2704,7 +2690,7 @@ static struct pci_driver atl1c_driver = { .driver.pm = &atl1c_pm_ops, }; -/* +/** * atl1c_init_module - Driver Registration Routine * * atl1c_init_module is the first routine called when the driver is @@ -2715,7 +2701,7 @@ static int __init atl1c_init_module(void) return pci_register_driver(&atl1c_driver); } -/* +/** * atl1c_exit_module - Driver Exit Cleanup Routine * * atl1c_exit_module is called just before the driver is removed diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 0aed82e1bb3e..a98acc8a956f 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -89,7 +89,7 @@ static const u16 atl1e_pay_load_size[] = { 128, 256, 512, 1024, 2048, 4096, }; -/* +/** * atl1e_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ @@ -102,7 +102,7 @@ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter) } } -/* +/** * atl1e_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ @@ -114,7 +114,7 @@ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter) synchronize_irq(adapter->pdev->irq); } -/* +/** * atl1e_irq_reset - reset interrupt confiure on the NIC * @adapter: board private structure */ @@ -126,7 +126,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter) AT_WRITE_FLUSH(&adapter->hw); } -/* +/** * atl1e_phy_config - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ @@ -210,7 +210,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter) return 0; } -/* +/** * atl1e_link_chg_task - deal with link change event Out of interrupt context * @netdev: network interface device structure */ @@ -259,7 +259,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter) cancel_work_sync(&adapter->link_chg_task); } -/* +/** * atl1e_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ @@ -271,7 +271,7 @@ static void atl1e_tx_timeout(struct net_device *netdev) schedule_work(&adapter->reset_task); } -/* +/** * atl1e_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * @@ -345,7 +345,7 @@ static void atl1e_restore_vlan(struct atl1e_adapter *adapter) atl1e_vlan_mode(adapter->netdev, adapter->netdev->features); } -/* +/** * atl1e_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure @@ -397,7 +397,7 @@ static int atl1e_set_features(struct net_device *netdev, return 0; } -/* +/** * atl1e_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -449,12 +449,6 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id, atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); } -/* - * atl1e_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ static int atl1e_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { @@ -505,12 +499,6 @@ out: } -/* - * atl1e_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -541,7 +529,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev) msleep(1); } -/* +/** * atl1e_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize * @@ -551,7 +539,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter) return 0; } -/* +/** * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) * @adapter: board private structure to initialize * @@ -635,7 +623,7 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter) return 0; } -/* +/** * atl1e_clean_tx_ring - Free Tx-skb * @adapter: board private structure */ @@ -678,7 +666,7 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) ring_count); } -/* +/** * atl1e_clean_rx_ring - Free rx-reservation skbs * @adapter: board private structure */ @@ -761,7 +749,7 @@ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter) } } -/* +/** * atl1e_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * @@ -786,7 +774,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) } } -/* +/** * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * @@ -1075,7 +1063,7 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) AT_WRITE_REG(hw, REG_MAC_CTRL, value); } -/* +/** * atl1e_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * @@ -1145,7 +1133,7 @@ static int atl1e_configure(struct atl1e_adapter *adapter) return 0; } -/* +/** * atl1e_get_stats - Get System Network Statistics * @netdev: network interface device structure * @@ -1257,11 +1245,10 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) return true; } -/* +/** * atl1e_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure - * @pt_regs: CPU registers structure */ static irqreturn_t atl1e_intr(int irq, void *data) { @@ -1489,9 +1476,8 @@ fatal_err: schedule_work(&adapter->reset_task); } -/* +/** * atl1e_clean - NAPI Rx polling callback - * @adapter: board private structure */ static int atl1e_clean(struct napi_struct *napi, int budget) { @@ -1956,7 +1942,7 @@ void atl1e_down(struct atl1e_adapter *adapter) atl1e_clean_rx_ring(adapter); } -/* +/** * atl1e_open - Called when a network interface is made active * @netdev: network interface device structure * @@ -2002,7 +1988,7 @@ err_req_irq: return err; } -/* +/** * atl1e_close - Disables a network interface * @netdev: network interface device structure * @@ -2238,7 +2224,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) return 0; } -/* +/** * atl1e_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1e_pci_tbl @@ -2392,7 +2378,7 @@ err_dma: return err; } -/* +/** * atl1e_remove - Device Removal Routine * @pdev: PCI device information struct * @@ -2424,7 +2410,7 @@ static void __devexit atl1e_remove(struct pci_dev *pdev) pci_disable_device(pdev); } -/* +/** * atl1e_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device * @state: The current pci connection state @@ -2452,7 +2438,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) return PCI_ERS_RESULT_NEED_RESET; } -/* +/** * atl1e_io_slot_reset - called after the pci bus has been reset. * @pdev: Pointer to PCI device * @@ -2479,7 +2465,7 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_RECOVERED; } -/* +/** * atl1e_io_resume - called when traffic can start flowing again. * @pdev: Pointer to PCI device * @@ -2523,7 +2509,7 @@ static struct pci_driver atl1e_driver = { .err_handler = &atl1e_err_handler }; -/* +/** * atl1e_init_module - Driver Registration Routine * * atl1e_init_module is the first routine called when the driver is @@ -2534,7 +2520,7 @@ static int __init atl1e_init_module(void) return pci_register_driver(&atl1e_driver); } -/* +/** * atl1e_exit_module - Driver Exit Cleanup Routine * * atl1e_exit_module is called just before the driver is removed diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c index 0ce60b6e7ef0..b5086f1e637f 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c @@ -168,7 +168,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, return -1; } -/* +/** * atl1e_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 149a294d54e9..f2402f355cec 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -195,7 +195,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, return -1; } -/* +/** * atl1_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * @@ -937,7 +937,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw) iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); } -/* +/** * atl1_sw_init - Initialize general software structures (struct atl1_adapter) * @adapter: board private structure to initialize * @@ -1014,12 +1014,6 @@ static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, atl1_write_phy_reg(&adapter->hw, reg_num, val); } -/* - * atl1_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl1_adapter *adapter = netdev_priv(netdev); @@ -1036,7 +1030,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return retval; } -/* +/** * atl1_setup_mem_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * @@ -1147,7 +1141,7 @@ static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) atomic_set(&rrd_ring->next_to_clean, 0); } -/* +/** * atl1_clean_rx_ring - Free RFD Buffers * @adapter: board private structure */ @@ -1187,7 +1181,7 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter) atomic_set(&rrd_ring->next_to_clean, 0); } -/* +/** * atl1_clean_tx_ring - Free Tx Buffers * @adapter: board private structure */ @@ -1227,7 +1221,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter) atomic_set(&tpd_ring->next_to_clean, 0); } -/* +/** * atl1_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * @@ -1470,7 +1464,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw) iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } -/* +/** * atl1_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * @@ -1844,7 +1838,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, } } -/* +/** * atl1_alloc_rx_buffers - Replace used receive buffers * @adapter: address of board private structure */ @@ -2489,11 +2483,10 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter) return 1; } -/* +/** * atl1_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure - * @pt_regs: CPU registers structure */ static irqreturn_t atl1_intr(int irq, void *data) { @@ -2574,7 +2567,7 @@ static irqreturn_t atl1_intr(int irq, void *data) } -/* +/** * atl1_phy_config - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ @@ -2693,7 +2686,7 @@ static void atl1_reset_dev_task(struct work_struct *work) netif_device_attach(netdev); } -/* +/** * atl1_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -2727,7 +2720,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -/* +/** * atl1_open - Called when a network interface is made active * @netdev: network interface device structure * @@ -2762,7 +2755,7 @@ err_up: return err; } -/* +/** * atl1_close - Disables a network interface * @netdev: network interface device structure * @@ -2930,7 +2923,7 @@ static const struct net_device_ops atl1_netdev_ops = { #endif }; -/* +/** * atl1_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1_pci_tbl @@ -3111,7 +3104,7 @@ err_request_regions: return err; } -/* +/** * atl1_remove - Device Removal Routine * @pdev: PCI device information struct * @@ -3158,7 +3151,7 @@ static struct pci_driver atl1_driver = { .driver.pm = ATL1_PM_OPS, }; -/* +/** * atl1_exit_module - Driver Exit Cleanup Routine * * atl1_exit_module is called just before the driver is removed @@ -3169,7 +3162,7 @@ static void __exit atl1_exit_module(void) pci_unregister_driver(&atl1_driver); } -/* +/** * atl1_init_module - Driver Registration Routine * * atl1_init_module is the first routine called when the driver is diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 6762dc406b25..7c0b7e2bcb66 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -75,7 +75,7 @@ static void atl2_set_ethtool_ops(struct net_device *netdev); static void atl2_check_options(struct atl2_adapter *adapter); -/* +/** * atl2_sw_init - Initialize general software structures (struct atl2_adapter) * @adapter: board private structure to initialize * @@ -123,7 +123,7 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter) return 0; } -/* +/** * atl2_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * @@ -177,7 +177,7 @@ static void init_ring_ptrs(struct atl2_adapter *adapter) adapter->txs_next_clear = 0; } -/* +/** * atl2_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * @@ -283,7 +283,7 @@ static int atl2_configure(struct atl2_adapter *adapter) return value; } -/* +/** * atl2_setup_ring_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * @@ -340,7 +340,7 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter) return 0; } -/* +/** * atl2_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ @@ -350,7 +350,7 @@ static inline void atl2_irq_enable(struct atl2_adapter *adapter) ATL2_WRITE_FLUSH(&adapter->hw); } -/* +/** * atl2_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ @@ -599,11 +599,10 @@ static inline void atl2_clear_phy_int(struct atl2_adapter *adapter) spin_unlock(&adapter->stats_lock); } -/* +/** * atl2_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure - * @pt_regs: CPU registers structure */ static irqreturn_t atl2_intr(int irq, void *data) { @@ -679,7 +678,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter) netdev); } -/* +/** * atl2_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * @@ -692,7 +691,7 @@ static void atl2_free_ring_resources(struct atl2_adapter *adapter) adapter->ring_dma); } -/* +/** * atl2_open - Called when a network interface is made active * @netdev: network interface device structure * @@ -798,7 +797,7 @@ static void atl2_free_irq(struct atl2_adapter *adapter) #endif } -/* +/** * atl2_close - Disables a network interface * @netdev: network interface device structure * @@ -918,7 +917,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } -/* +/** * atl2_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -943,7 +942,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu) return 0; } -/* +/** * atl2_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure @@ -969,12 +968,6 @@ static int atl2_set_mac(struct net_device *netdev, void *p) return 0; } -/* - * atl2_mii_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl2_adapter *adapter = netdev_priv(netdev); @@ -1011,12 +1004,6 @@ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return 0; } -/* - * atl2_ioctl - - * @netdev: - * @ifreq: - * @cmd: - */ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -1033,7 +1020,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } -/* +/** * atl2_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ @@ -1045,7 +1032,7 @@ static void atl2_tx_timeout(struct net_device *netdev) schedule_work(&adapter->reset_task); } -/* +/** * atl2_watchdog - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ @@ -1070,7 +1057,7 @@ static void atl2_watchdog(unsigned long data) } } -/* +/** * atl2_phy_config - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ @@ -1274,9 +1261,8 @@ static int atl2_check_link(struct atl2_adapter *adapter) return 0; } -/* +/** * atl2_link_chg_task - deal with link change event Out of interrupt context - * @netdev: network interface device structure */ static void atl2_link_chg_task(struct work_struct *work) { @@ -1341,7 +1327,7 @@ static const struct net_device_ops atl2_netdev_ops = { #endif }; -/* +/** * atl2_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl2_pci_tbl @@ -1501,7 +1487,7 @@ err_dma: return err; } -/* +/** * atl2_remove - Device Removal Routine * @pdev: PCI device information struct * @@ -1728,7 +1714,7 @@ static struct pci_driver atl2_driver = { .shutdown = atl2_shutdown, }; -/* +/** * atl2_init_module - Driver Registration Routine * * atl2_init_module is the first routine called when the driver is @@ -1743,7 +1729,7 @@ static int __init atl2_init_module(void) } module_init(atl2_init_module); -/* +/** * atl2_exit_module - Driver Exit Cleanup Routine * * atl2_exit_module is called just before the driver is removed @@ -2997,7 +2983,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt) return -1; } -/* +/** * atl2_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index b4f3aa49a7fc..77ffbc4a5071 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -64,7 +64,7 @@ static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } -/* +/** * atlx_set_mac - Change the Ethernet Address of the NIC * @netdev: network interface device structure * @p: pointer to an address structure @@ -115,7 +115,7 @@ static void atlx_check_for_link(struct atlx_adapter *adapter) schedule_work(&adapter->link_chg_task); } -/* +/** * atlx_set_multi - Multicast and Promiscuous mode set * @netdev: network interface device structure * @@ -162,7 +162,7 @@ static inline void atlx_imr_set(struct atlx_adapter *adapter, ioread32(adapter->hw.hw_addr + REG_IMR); } -/* +/** * atlx_irq_enable - Enable default interrupt generation settings * @adapter: board private structure */ @@ -172,7 +172,7 @@ static void atlx_irq_enable(struct atlx_adapter *adapter) adapter->int_enabled = true; } -/* +/** * atlx_irq_disable - Mask off interrupt generation on the NIC * @adapter: board private structure */ @@ -193,7 +193,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter) spin_unlock_irqrestore(&adapter->lock, flags); } -/* +/** * atlx_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 53659f321d51..dfa757e74296 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -90,7 +90,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp); * bnx2x_config_rss_pf - configure RSS parameters in a PF. * * @bp: driver handle - * @rss_obj RSS object to use + * @rss_obj: RSS object to use * @ind_table: indirection table to configure * @config_hash: re-configure RSS hash keys configuration */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index 689e5e19cc0b..8532a8159025 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -52,13 +52,7 @@ bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg) } /** - * bfa_cee_attr_meminfo() - * - * @brief Returns the size of the DMA memory needed by CEE attributes - * - * @param[in] void - * - * @return Size of DMA region + * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes */ static u32 bfa_cee_attr_meminfo(void) @@ -66,13 +60,7 @@ bfa_cee_attr_meminfo(void) return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ); } /** - * bfa_cee_stats_meminfo() - * - * @brief Returns the size of the DMA memory needed by CEE stats - * - * @param[in] void - * - * @return Size of DMA region + * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats */ static u32 bfa_cee_stats_meminfo(void) @@ -81,14 +69,10 @@ bfa_cee_stats_meminfo(void) } /** - * bfa_cee_get_attr_isr() + * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w * - * @brief CEE ISR for get-attributes responses from f/w - * - * @param[in] cee - Pointer to the CEE module - * status - Return status from the f/w - * - * @return void + * @cee: Pointer to the CEE module + * @status: Return status from the f/w */ static void bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) @@ -105,14 +89,10 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) } /** - * bfa_cee_get_attr_isr() + * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w * - * @brief CEE ISR for get-stats responses from f/w - * - * @param[in] cee - Pointer to the CEE module - * status - Return status from the f/w - * - * @return void + * @cee: Pointer to the CEE module + * @status: Return status from the f/w */ static void bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status) diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index cfb60e1f51da..dd901c5061b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2877,7 +2877,7 @@ static void sge_timer_tx(unsigned long data) mod_timer(&qs->tx_reclaim_timer, jiffies + next_period); } -/* +/** * sge_timer_rx - perform periodic maintenance of an SGE qset * @data: the SGE queue set to maintain * diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c index 44ac2f40b644..bff8a3cdd3df 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c @@ -1076,7 +1076,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end) return 0; } -/* +/** * t3_load_fw - download firmware * @adapter: the adapter * @fw_data: the firmware image to write diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 32e1dd566a14..fa947dfa4c30 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2010,7 +2010,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } -/* +/** * t4_mem_win_read_len - read memory through PCIE memory window * @adap: the adapter * @addr: address of first byte requested aligned on 32b. diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index a38167810546..20297881f8eb 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -902,7 +902,7 @@ static const struct net_device_ops ethoc_netdev_ops = { }; /** - * ethoc_probe() - initialize OpenCores ethernet MAC + * ethoc_probe - initialize OpenCores ethernet MAC * pdev: platform device */ static int __devinit ethoc_probe(struct platform_device *pdev) @@ -1140,7 +1140,7 @@ out: } /** - * ethoc_remove() - shutdown OpenCores ethernet MAC + * ethoc_remove - shutdown OpenCores ethernet MAC * @pdev: platform device */ static int __devexit ethoc_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c index dafd797a6069..fffd20528b5d 100644 --- a/drivers/net/ethernet/freescale/fec.c +++ b/drivers/net/ethernet/freescale/fec.c @@ -1389,8 +1389,8 @@ fec_set_mac_address(struct net_device *ndev, void *p) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * fec_poll_controller: FEC Poll controller function +/** + * fec_poll_controller - FEC Poll controller function * @dev: The FEC network adapter * * Polled functionality used by netconsole and others in non interrupt mode diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c index c526279e4927..3d6839528761 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_hw.c +++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c @@ -399,7 +399,7 @@ void e1000_set_media_type(struct e1000_hw *hw) } /** - * e1000_reset_hw: reset the hardware completely + * e1000_reset_hw - reset the hardware completely * @hw: Struct containing variables accessed by shared code * * Reset the transmit and receive units; mask and clear all interrupts. @@ -546,7 +546,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw) } /** - * e1000_init_hw: Performs basic configuration of the adapter. + * e1000_init_hw - Performs basic configuration of the adapter. * @hw: Struct containing variables accessed by shared code * * Assumes that the controller has previously been reset and is in a @@ -2591,7 +2591,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw) * @hw: Struct containing variables accessed by shared code * @speed: Speed of the connection * @duplex: Duplex setting of the connection - + * * Detects the current speed and duplex settings of the hardware. */ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) @@ -2959,7 +2959,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, * @hw: Struct containing variables accessed by shared code * @reg_addr: address of the PHY register to write * @data: data to write to the PHY - + * * Writes a value to a PHY register */ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a166efc2fead..ca477e87eb87 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2159,7 +2159,7 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter) } /** - * @e1000_alloc_ring - allocate memory for a ring structure + * e1000_alloc_ring_dma - allocate memory for a ring structure **/ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, struct e1000_ring *ring) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 01ced68d3aac..60e307548f4e 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5777,6 +5777,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info + * * returns true if ring is completely cleaned **/ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 8ec74b07f940..0696abfe9944 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -766,6 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter) /** * igbvf_clean_tx_irq - Reclaim resources after transmit completes * @adapter: board private structure + * * returns true if ring is completely cleaned **/ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 30a6cc426037..eea0e10ce12f 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -283,7 +283,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) return err; } -/** e1000_rlpml_set_vf - Set the maximum receive packet length +/** + * e1000_rlpml_set_vf - Set the maximum receive packet length * @hw: pointer to the HW structure * @max_size: value to assign to max frame size **/ @@ -302,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) * e1000_rar_set_vf - set device MAC address * @hw: pointer to the HW structure * @addr: pointer to the receive address - * @index receive address array register + * @index: receive address array register **/ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) { diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 5fce363d810a..aab649f8c5f0 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -2276,9 +2276,9 @@ static void ixgb_netpoll(struct net_device *dev) #endif /** - * ixgb_io_error_detected() - called when PCI error is detected - * @pdev pointer to pci device with error - * @state pci channel state after error + * ixgb_io_error_detected - called when PCI error is detected + * @pdev: pointer to pci device with error + * @state: pci channel state after error * * This callback is called by the PCI subsystem whenever * a PCI bus error is detected. diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 77ac41feb0fe..bb7fde45c057 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3132,7 +3132,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, } /** - * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from + * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from * the EEPROM * @hw: pointer to hardware structure * @wwnn_prefix: the alternative WWNN prefix @@ -3325,6 +3325,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, * ixgbe_calculate_checksum - Calculate checksum for buffer * @buffer: pointer to EEPROM * @length: size of EEPROM to calculate a checksum for + * * Calculates the checksum for some buffer on a specified length. The * checksum calculated is returned. **/ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index bc07933d67da..0ee4dbf4a752 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -38,7 +38,7 @@ /** * ixgbe_fcoe_clear_ddp - clear the given ddp context - * @ddp - ptr to the ixgbe_fcoe_ddp + * @ddp: ptr to the ixgbe_fcoe_ddp * * Returns : none * diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index c377706e81a8..f36c3c38dbcb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -252,7 +252,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) } /** - * ixgbe_set_sriov_queues: Allocate queues for IOV use + * ixgbe_set_sriov_queues - Allocate queues for IOV use * @adapter: board private structure to initialize * * IOV doesn't actually use anything, so just NAK the @@ -265,7 +265,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) } /** - * ixgbe_set_rss_queues: Allocate queues for RSS + * ixgbe_set_rss_queues - Allocate queues for RSS * @adapter: board private structure to initialize * * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try @@ -288,7 +288,7 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) } /** - * ixgbe_set_fdir_queues: Allocate queues for Flow Director + * ixgbe_set_fdir_queues - Allocate queues for Flow Director * @adapter: board private structure to initialize * * Flow Director is an advanced Rx filter, attempting to get Rx flows back @@ -323,7 +323,7 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) #ifdef IXGBE_FCOE /** - * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) + * ixgbe_set_fcoe_queues - Allocate queues for Fiber Channel over Ethernet (FCoE) * @adapter: board private structure to initialize * * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. @@ -410,7 +410,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) #endif /** - * ixgbe_set_num_queues: Allocate queues for device, feature dependent + * ixgbe_set_num_queues - Allocate queues for device, feature dependent * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5afbb37a738c..9313f5c84fad 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -516,7 +516,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); } -/* +/** * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes @@ -3591,7 +3591,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) } #ifdef CONFIG_IXGBE_DCB -/* +/** * ixgbe_configure_dcb - Configure DCB hardware * @adapter: ixgbe adapter struct * @@ -3658,11 +3658,11 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) /* Additional bittime to account for IXGBE framing */ #define IXGBE_ETH_FRAMING 20 -/* +/** * ixgbe_hpbthresh - calculate high water mark for flow control * * @adapter: board private structure to calculate for - * @pb - packet buffer to calculate + * @pb: packet buffer to calculate */ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) { @@ -3722,11 +3722,11 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) return marker; } -/* +/** * ixgbe_lpbthresh - calculate low water mark for for flow control * * @adapter: board private structure to calculate for - * @pb - packet buffer to calculate + * @pb: packet buffer to calculate */ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter) { @@ -5243,7 +5243,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) /** * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) { @@ -5279,7 +5279,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) /** * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure * * This function serves two purposes. First it strobes the interrupt lines * in order to make certain interrupts are occurring. Secondly it sets the @@ -5327,8 +5327,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_update_link - update the link status - * @adapter - pointer to the device adapter structure - * @link_speed - pointer to a u32 to store the link_speed + * @adapter: pointer to the device adapter structure + * @link_speed: pointer to a u32 to store the link_speed **/ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) { @@ -5371,7 +5371,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_link_is_up - update netif_carrier status and * print link up message - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) { @@ -5431,7 +5431,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_link_is_down - update netif_carrier status and * print link down message - * @adapter - pointer to the adapter structure + * @adapter: pointer to the adapter structure **/ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) { @@ -5459,7 +5459,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_flush_tx - flush queues on link down - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) { @@ -5508,7 +5508,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) /** * ixgbe_watchdog_subtask - check and bring link up - * @adapter - pointer to the device adapter structure + * @adapter: pointer to the device adapter structure **/ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) { @@ -5532,7 +5532,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) /** * ixgbe_sfp_detection_subtask - poll for SFP+ cable - * @adapter - the ixgbe adapter structure + * @adapter: the ixgbe adapter structure **/ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) { @@ -5599,7 +5599,7 @@ sfp_out: /** * ixgbe_sfp_link_config_subtask - set up link SFP after module install - * @adapter - the ixgbe adapter structure + * @adapter: the ixgbe adapter structure **/ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) { @@ -6586,8 +6586,9 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, } #ifdef CONFIG_IXGBE_DCB -/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. - * #adapter: pointer to ixgbe_adapter +/** + * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to ixgbe_adapter * @tc: number of traffic classes currently enabled * * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm @@ -6622,8 +6623,8 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) return; } -/* ixgbe_setup_tc - routine to configure net_device for multiple traffic - * classes. +/** + * ixgbe_setup_tc - configure net_device for multiple traffic classes * * @netdev: net device to configure * @tc: number of traffic classes to enable diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index daddd844691f..3456d5617143 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -107,7 +107,7 @@ static struct sock_filter ptp_filter[] = { /** * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) - * @cc - the cyclecounter structure + * @cc: the cyclecounter structure * * this function reads the cyclecounter registers and is called by the * cyclecounter structure used to construct a ns counter from the @@ -128,8 +128,8 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) /** * ixgbe_ptp_adjfreq - * @ptp - the ptp clock structure - * @ppb - parts per billion adjustment from base + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base * * adjust the frequency of the ptp cycle counter by the * indicated ppb from the base frequency. @@ -175,8 +175,8 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) /** * ixgbe_ptp_adjtime - * @ptp - the ptp clock structure - * @delta - offset to adjust the cycle counter by + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by * * adjust the timer by resetting the timecounter structure. */ @@ -203,8 +203,8 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) /** * ixgbe_ptp_gettime - * @ptp - the ptp clock structure - * @ts - timespec structure to hold the current time value + * @ptp: the ptp clock structure + * @ts: timespec structure to hold the current time value * * read the timecounter and return the correct value on ns, * after converting it into a struct timespec. @@ -229,8 +229,8 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) /** * ixgbe_ptp_settime - * @ptp - the ptp clock structure - * @ts - the timespec containing the new time for the cycle counter + * @ptp: the ptp clock structure + * @ts: the timespec containing the new time for the cycle counter * * reset the timecounter to use a new base value instead of the kernel * wall timer value. @@ -256,9 +256,9 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, /** * ixgbe_ptp_enable - * @ptp - the ptp clock structure - * @rq - the requested feature to change - * @on - whether to enable or disable the feature + * @ptp: the ptp clock structure + * @rq: the requested feature to change + * @on: whether to enable or disable the feature * * enable (or disable) ancillary features of the phc subsystem. * our driver only supports the PPS feature on the X540 @@ -294,8 +294,8 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp, /** * ixgbe_ptp_check_pps_event - * @adapter - the private adapter structure - * @eicr - the interrupt cause register value + * @adapter: the private adapter structure + * @eicr: the interrupt cause register value * * This function is called by the interrupt routine when checking for * interrupts. It will check and handle a pps event. @@ -325,8 +325,8 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) /** * ixgbe_ptp_enable_sdp - * @hw - the hardware private structure - * @shift - the clock shift for calculating nanoseconds + * @hw: the hardware private structure + * @shift: the clock shift for calculating nanoseconds * * this function enables the clock out feature on the sdp0 for the * X540 device. It will create a 1second periodic output that can be @@ -399,7 +399,7 @@ static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift) /** * ixgbe_ptp_disable_sdp - * @hw - the private hardware structure + * @hw: the private hardware structure * * this function disables the auxiliary SDP clock out feature */ @@ -769,7 +769,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, /** * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw - * @adapter - pointer to the adapter structure + * @adapter: pointer to the adapter structure * * this function initializes the timecounter and cyclecounter * structures for use in generated a ns counter from the arbitrary @@ -905,7 +905,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) /** * ixgbe_ptp_init - * @adapter - the ixgbe private adapter structure + * @adapter: the ixgbe private adapter structure * * This function performs the required steps for enabling ptp * support. If ptp support has already been loaded it simply calls the diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c index 2334fce47018..16ddf14e8ba4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c @@ -95,11 +95,11 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, return sprintf(buf, "%u\n", value); } -/* +/** * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file. - * @ adapter: pointer to the adapter structure - * @ offset: offset in the eeprom sensor data table - * @ type: type of sensor data to display + * @adapter: pointer to the adapter structure + * @offset: offset in the eeprom sensor data table + * @type: type of sensor data to display * * For each file we want in hwmon's sysfs interface we need a device_attribute * This is included in our hwmon_attr struct that contains the references to diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index f69ec4288b10..0368160286f9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -115,7 +115,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); } -/* +/** * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors * @adapter: pointer to adapter struct * @direction: 0 for Rx, 1 for Tx, -1 for other causes @@ -1942,8 +1942,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, } } -/* - * ixgbevf_set_num_queues: Allocate queues for device, feature dependent +/** + * ixgbevf_set_num_queues - Allocate queues for device, feature dependent * @adapter: board private structure to initialize * * This is the top level queue allocation routine. The order here is very diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 875dd5e264eb..76bf1598c609 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1103,7 +1103,7 @@ static void ks_set_grpaddr(struct ks_net *ks) } } /* ks_set_grpaddr */ -/* +/** * ks_clear_mcast - clear multicast information * * @ks : The chip information diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index eaf9ff0262a9..24a4c5716cf5 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -3913,7 +3913,7 @@ static void hw_start_rx(struct ksz_hw *hw) hw->rx_stop = 2; } -/* +/** * hw_stop_rx - stop receiving * @hw: The hardware instance. * diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index e7cd587d8ae7..d958c2299372 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -3377,7 +3377,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit, } while (cnt < 20); return ret; } -/* +/** * check_pci_device_id - Checks if the device id is supported * @id : device id * Description: Function to check if the pci device id is supported by driver. @@ -5238,7 +5238,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset) } /** - * s2io_set_mac_addr driver entry point + * s2io_set_mac_addr - driver entry point */ static int s2io_set_mac_addr(struct net_device *dev, void *p) @@ -6088,7 +6088,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data) } /** - * s2io-link_test - verifies the link state of the nic + * s2io_link_test - verifies the link state of the nic * @sp ; private member of the device structure, which is a pointer to the * s2io_nic structure. * @data: variable that returns the result of each of the test conducted by @@ -6116,9 +6116,9 @@ static int s2io_link_test(struct s2io_nic *sp, uint64_t *data) /** * s2io_rldram_test - offline test for access to the RldRam chip on the NIC - * @sp - private member of the device structure, which is a pointer to the + * @sp: private member of the device structure, which is a pointer to the * s2io_nic structure. - * @data - variable that returns the result of each of the test + * @data: variable that returns the result of each of the test * conducted by the driver. * Description: * This is one of the offline test that tests the read and write diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 928913c4f3ff..c503fbebdf7e 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3218,7 +3218,7 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex) } /** - * nv_update_linkspeed: Setup the MAC according to the link partner + * nv_update_linkspeed - Setup the MAC according to the link partner * @dev: Network device to be configured * * The function queries the PHY and checks if there is a link partner. diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c index e48f084ad226..5ae03e815ee9 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c @@ -60,7 +60,7 @@ static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw) /** * pch_gbe_plat_init_hw - Initialize hardware * @hw: Pointer to the HW structure - * Returns + * Returns: * 0: Successfully * Negative value: Failed-EBUSY */ @@ -108,7 +108,7 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw) /** * pch_gbe_hal_setup_init_funcs - Initializes function pointers * @hw: Pointer to the HW structure - * Returns + * Returns: * 0: Successfully * ENOSYS: Function is not registered */ @@ -137,7 +137,7 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw) /** * pch_gbe_hal_init_hw - Initialize hardware * @hw: Pointer to the HW structure - * Returns + * Returns: * 0: Successfully * ENOSYS: Function is not registered */ @@ -155,7 +155,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw) * @hw: Pointer to the HW structure * @offset: The register to read * @data: The buffer to store the 16-bit read. - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -172,7 +172,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset, * @hw: Pointer to the HW structure * @offset: The register to read * @data: The value to write. - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -211,7 +211,7 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw) /** * pch_gbe_hal_read_mac_addr - Reads MAC address * @hw: Pointer to the HW structure - * Returns + * Returns: * 0: Successfully * ENOSYS: Function is not registered */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c index ac4e72d529e5..9dbf38c10a68 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c @@ -77,7 +77,7 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = { * pch_gbe_get_settings - Get device-specific settings * @netdev: Network interface device structure * @ecmd: Ethtool command - * Returns + * Returns: * 0: Successful. * Negative value: Failed. */ @@ -100,7 +100,7 @@ static int pch_gbe_get_settings(struct net_device *netdev, * pch_gbe_set_settings - Set device-specific settings * @netdev: Network interface device structure * @ecmd: Ethtool command - * Returns + * Returns: * 0: Successful. * Negative value: Failed. */ @@ -220,7 +220,7 @@ static void pch_gbe_get_wol(struct net_device *netdev, * pch_gbe_set_wol - Turn Wake-on-Lan on or off * @netdev: Network interface device structure * @wol: Pointer of wake-on-Lan information straucture - * Returns + * Returns: * 0: Successful. * Negative value: Failed. */ @@ -248,7 +248,7 @@ static int pch_gbe_set_wol(struct net_device *netdev, /** * pch_gbe_nway_reset - Restart autonegotiation * @netdev: Network interface device structure - * Returns + * Returns: * 0: Successful. * Negative value: Failed. */ @@ -398,7 +398,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev, * pch_gbe_set_pauseparam - Set pause paramters * @netdev: Network interface device structure * @pause: Pause parameters structure - * Returns + * Returns: * 0: Successful. * Negative value: Failed. */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 3787c64ee71c..b1006563f736 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -301,7 +301,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) /** * pch_gbe_mac_read_mac_addr - Read MAC address * @hw: Pointer to the HW structure - * Returns + * Returns: * 0: Successful. */ s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw) @@ -483,7 +483,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw, /** * pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings * @hw: Pointer to the HW structure - * Returns + * Returns: * 0: Successful. * Negative value: Failed. */ @@ -639,7 +639,7 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw) /** * pch_gbe_alloc_queues - Allocate memory for all rings * @adapter: Board private structure to initialize - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -670,7 +670,7 @@ static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter) /** * pch_gbe_init_phy - Initialize PHY * @adapter: Board private structure to initialize - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -720,7 +720,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter) * @netdev: Network interface device structure * @addr: Phy ID * @reg: Access location - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -1364,7 +1364,7 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw) * pch_gbe_intr - Interrupt Handler * @irq: Interrupt number * @data: Pointer to a network interface device structure - * Returns + * Returns: * - IRQ_HANDLED: Our interrupt * - IRQ_NONE: Not our interrupt */ @@ -1566,7 +1566,7 @@ static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter, * pch_gbe_clean_tx - Reclaim resources after transmit completes * @adapter: Board private structure * @tx_ring: Tx descriptor ring - * Returns + * Returns: * true: Cleaned the descriptor * false: Not cleaned the descriptor */ @@ -1660,7 +1660,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, * @rx_ring: Rx descriptor ring * @work_done: Completed count * @work_to_do: Request count - * Returns + * Returns: * true: Cleaned the descriptor * false: Not cleaned the descriptor */ @@ -1775,7 +1775,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, * pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors) * @adapter: Board private structure * @tx_ring: Tx descriptor ring (for a specific queue) to setup - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -1822,7 +1822,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter, * pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors) * @adapter: Board private structure * @rx_ring: Rx descriptor ring (for a specific queue) to setup - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -1899,7 +1899,7 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter, /** * pch_gbe_request_irq - Allocate an interrupt line * @adapter: Board private structure - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -1932,7 +1932,7 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter) /** * pch_gbe_up - Up GbE network device * @adapter: Board private structure - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -2018,7 +2018,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter) /** * pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter) * @adapter: Board private structure to initialize - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -2057,7 +2057,7 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter) /** * pch_gbe_open - Called when a network interface is made active * @netdev: Network interface device structure - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -2097,7 +2097,7 @@ err_setup_tx: /** * pch_gbe_stop - Disables a network interface * @netdev: Network interface device structure - * Returns + * Returns: * 0: Successfully */ static int pch_gbe_stop(struct net_device *netdev) @@ -2117,7 +2117,7 @@ static int pch_gbe_stop(struct net_device *netdev) * pch_gbe_xmit_frame - Packet transmitting start * @skb: Socket buffer structure * @netdev: Network interface device structure - * Returns + * Returns: * - NETDEV_TX_OK: Normal end * - NETDEV_TX_BUSY: Error end */ @@ -2225,7 +2225,7 @@ static void pch_gbe_set_multi(struct net_device *netdev) * pch_gbe_set_mac - Change the Ethernet Address of the NIC * @netdev: Network interface device structure * @addr: Pointer to an address structure - * Returns + * Returns: * 0: Successfully * -EADDRNOTAVAIL: Failed */ @@ -2256,7 +2256,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr) * pch_gbe_change_mtu - Change the Maximum Transfer Unit * @netdev: Network interface device structure * @new_mtu: New value for maximum frame size - * Returns + * Returns: * 0: Successfully * -EINVAL: Failed */ @@ -2309,7 +2309,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu) * pch_gbe_set_features - Reset device after features changed * @netdev: Network interface device structure * @features: New features - * Returns + * Returns: * 0: HW state updated successfully */ static int pch_gbe_set_features(struct net_device *netdev, @@ -2334,7 +2334,7 @@ static int pch_gbe_set_features(struct net_device *netdev, * @netdev: Network interface device structure * @ifr: Pointer to ifr structure * @cmd: Control command - * Returns + * Returns: * 0: Successfully * Negative value: Failed */ @@ -2369,7 +2369,7 @@ static void pch_gbe_tx_timeout(struct net_device *netdev) * pch_gbe_napi_poll - NAPI receive and transfer polling callback * @napi: Pointer of polling device struct * @budget: The maximum number of a packet - * Returns + * Returns: * false: Exit the polling mode * true: Continue the polling mode */ diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c index 29e23bec809c..8653c3b81f84 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c @@ -139,7 +139,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload"); /** * pch_gbe_option - Force the MAC's flow control settings * @hw: Pointer to the HW structure - * Returns + * Returns: * 0: Successful. * Negative value: Failed. */ @@ -220,7 +220,7 @@ static const struct pch_gbe_opt_list fc_list[] = { * @value: value * @opt: option * @adapter: Board private structure - * Returns + * Returns: * 0: Successful. * Negative value: Failed. */ diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 0e575359af17..a1965c07d1e3 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -527,7 +527,7 @@ struct efx_phy_operations { }; /** - * @enum efx_phy_mode - PHY operating mode flags + * enum efx_phy_mode - PHY operating mode flags * @PHY_MODE_NORMAL: on and should pass traffic * @PHY_MODE_TX_DISABLED: on with TX disabled * @PHY_MODE_LOW_POWER: set to low power through MDIO diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 243e91f3dff9..fca61fea38e0 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -336,6 +336,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, /** * efx_fast_push_rx_descriptors - push new RX descriptors quickly * @rx_queue: RX descriptor queue + * * This will aim to fill the RX descriptor queue up to * @rx_queue->@max_fill. If there is insufficient atomic * memory to do so, a slow fill will be scheduled. diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index 8814b2f5d46f..8d15f7a74b45 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -773,7 +773,7 @@ static int smc911x_phy_fixed(struct net_device *dev) return 1; } -/* +/** * smc911x_phy_reset - reset the phy * @dev: net device * @phy: phy address @@ -819,7 +819,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy) return reg & PMT_CTRL_PHY_RST_; } -/* +/** * smc911x_phy_powerdown - powerdown phy * @dev: net device * @phy: phy address @@ -837,7 +837,7 @@ static void smc911x_phy_powerdown(struct net_device *dev, int phy) SMC_SET_PHY_BMCR(lp, phy, bmcr); } -/* +/** * smc911x_phy_check_media - check the media status and adjust BMCR * @dev: net device * @init: set true for initialisation diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index fee449355014..318adc935a53 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -942,7 +942,7 @@ static int smc_phy_fixed(struct net_device *dev) return 1; } -/* +/** * smc_phy_reset - reset the phy * @dev: net device * @phy: phy address @@ -976,7 +976,7 @@ static int smc_phy_reset(struct net_device *dev, int phy) return bmcr & BMCR_RESET; } -/* +/** * smc_phy_powerdown - powerdown phy * @dev: net device * @@ -1000,7 +1000,7 @@ static void smc_phy_powerdown(struct net_device *dev) smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); } -/* +/** * smc_phy_check_media - check the media status and adjust TCR * @dev: net device * @init: set true for initialisation diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index cf826e6b6aa1..13afb8edfadc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -125,7 +125,7 @@ err_out_req_reg_failed: } /** - * stmmac_dvr_remove + * stmmac_pci_remove * * @pdev: platform device pointer * Description: this function calls the main to free the net resources diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 447a6932cab3..6ce9edd95c04 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -137,14 +137,15 @@ static void print_eth_id(struct net_device *ndev) #define bdx_disable_interrupts(priv) \ do { WRITE_REG(priv, regIMR, 0); } while (0) -/* bdx_fifo_init - * create TX/RX descriptor fifo for host-NIC communication. +/** + * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication. + * @priv: NIC private structure + * @f: fifo to initialize + * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB + * @reg_XXX: offsets of registers relative to base address + * * 1K extra space is allocated at the end of the fifo to simplify * processing of descriptors that wraps around fifo's end - * @priv - NIC private structure - * @f - fifo to initialize - * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB - * @reg_XXX - offsets of registers relative to base address * * Returns 0 on success, negative value on failure * @@ -177,9 +178,10 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type, RET(0); } -/* bdx_fifo_free - free all resources used by fifo - * @priv - NIC private structure - * @f - fifo to release +/** + * bdx_fifo_free - free all resources used by fifo + * @priv: NIC private structure + * @f: fifo to release */ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f) { @@ -192,9 +194,9 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f) RET(); } -/* +/** * bdx_link_changed - notifies OS about hw link state. - * @bdx_priv - hw adapter structure + * @priv: hw adapter structure */ static void bdx_link_changed(struct bdx_priv *priv) { @@ -233,10 +235,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr) } -/* bdx_isr - Interrupt Service Routine for Bordeaux NIC - * @irq - interrupt number - * @ndev - network device - * @regs - CPU registers +/** + * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC + * @irq: interrupt number + * @dev: network device * * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise * @@ -307,8 +309,10 @@ static int bdx_poll(struct napi_struct *napi, int budget) return work_done; } -/* bdx_fw_load - loads firmware to NIC - * @priv - NIC private structure +/** + * bdx_fw_load - loads firmware to NIC + * @priv: NIC private structure + * * Firmware is loaded via TXD fifo, so it must be initialized first. * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC * can have few of them). So all drivers use semaphore register to choose one @@ -380,8 +384,9 @@ static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv) RET(); } -/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines - * @priv - NIC private structure +/** + * bdx_hw_start - inits registers and starts HW's Rx and Tx engines + * @priv: NIC private structure */ static int bdx_hw_start(struct bdx_priv *priv) { @@ -691,12 +696,13 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) RET(-EOPNOTSUPP); } -/* +/** * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid - * by passing VLAN filter table to hardware - * @ndev network device - * @vid VLAN vid - * @op add or kill operation + * @ndev: network device + * @vid: VLAN vid + * @op: add or kill operation + * + * Passes VLAN filter table to hardware */ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) { @@ -722,10 +728,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable) RET(); } -/* +/** * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table - * @ndev network device - * @vid VLAN vid to add + * @ndev: network device + * @vid: VLAN vid to add */ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) { @@ -733,10 +739,10 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid) return 0; } -/* +/** * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table - * @ndev network device - * @vid VLAN vid to kill + * @ndev: network device + * @vid: VLAN vid to kill */ static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid) { @@ -974,8 +980,9 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n) * Rx Init * *************************************************************************/ -/* bdx_rx_init - initialize RX all related HW and SW resources - * @priv - NIC private structure +/** + * bdx_rx_init - initialize RX all related HW and SW resources + * @priv: NIC private structure * * Returns 0 on success, negative value on failure * @@ -1016,9 +1023,10 @@ err_mem: return -ENOMEM; } -/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo - * @priv - NIC private structure - * @f - RXF fifo +/** + * bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo + * @priv: NIC private structure + * @f: RXF fifo */ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f) { @@ -1045,8 +1053,10 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f) } } -/* bdx_rx_free - release all Rx resources - * @priv - NIC private structure +/** + * bdx_rx_free - release all Rx resources + * @priv: NIC private structure + * * It assumes that Rx is desabled in HW */ static void bdx_rx_free(struct bdx_priv *priv) @@ -1067,9 +1077,11 @@ static void bdx_rx_free(struct bdx_priv *priv) * Rx Engine * *************************************************************************/ -/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs - * @priv - nic's private structure - * @f - RXF fifo that needs skbs +/** + * bdx_rx_alloc_skbs - fill rxf fifo with new skbs + * @priv: nic's private structure + * @f: RXF fifo that needs skbs + * * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo. * skb's virtual and physical addresses are stored in skb db. * To calculate free space, func uses cached values of RPTR and WPTR @@ -1179,13 +1191,15 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd) RET(); } -/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS +/** + * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS * NOTE: a special treatment is given to non-continuous descriptors * that start near the end, wraps around and continue at the beginning. a second * part is copied right after the first, and then descriptor is interpreted as * normal. fifo has an extra space to allow such operations - * @priv - nic's private structure - * @f - RXF fifo that needs skbs + * @priv: nic's private structure + * @f: RXF fifo that needs skbs + * @budget: maximum number of packets to receive */ /* TBD: replace memcpy func call by explicite inline asm */ @@ -1375,9 +1389,10 @@ static inline int bdx_tx_db_size(struct txdb *db) return db->size - taken; } -/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap - * @d - tx data base - * @ptr - read or write pointer +/** + * __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap + * @db: tx data base + * @pptr: read or write pointer */ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr) { @@ -1394,8 +1409,9 @@ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr) *pptr = db->start; } -/* bdx_tx_db_inc_rptr - increment read pointer - * @d - tx data base +/** + * bdx_tx_db_inc_rptr - increment read pointer + * @db: tx data base */ static inline void bdx_tx_db_inc_rptr(struct txdb *db) { @@ -1403,8 +1419,9 @@ static inline void bdx_tx_db_inc_rptr(struct txdb *db) __bdx_tx_db_ptr_next(db, &db->rptr); } -/* bdx_tx_db_inc_rptr - increment write pointer - * @d - tx data base +/** + * bdx_tx_db_inc_wptr - increment write pointer + * @db: tx data base */ static inline void bdx_tx_db_inc_wptr(struct txdb *db) { @@ -1413,9 +1430,11 @@ static inline void bdx_tx_db_inc_wptr(struct txdb *db) a result of write */ } -/* bdx_tx_db_init - creates and initializes tx db - * @d - tx data base - * @sz_type - size of tx fifo +/** + * bdx_tx_db_init - creates and initializes tx db + * @d: tx data base + * @sz_type: size of tx fifo + * * Returns 0 on success, error code otherwise */ static int bdx_tx_db_init(struct txdb *d, int sz_type) @@ -1441,8 +1460,9 @@ static int bdx_tx_db_init(struct txdb *d, int sz_type) return 0; } -/* bdx_tx_db_close - closes tx db and frees all memory - * @d - tx data base +/** + * bdx_tx_db_close - closes tx db and frees all memory + * @d: tx data base */ static void bdx_tx_db_close(struct txdb *d) { @@ -1463,9 +1483,11 @@ static struct { u16 qwords; /* qword = 64 bit */ } txd_sizes[MAX_SKB_FRAGS + 1]; -/* txdb_map_skb - creates and stores dma mappings for skb's data blocks - * @priv - NIC private structure - * @skb - socket buffer to map +/** + * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks + * @priv: NIC private structure + * @skb: socket buffer to map + * @txdd: TX descriptor to use * * It makes dma mappings for skb's data blocks and writes them to PBL of * new tx descriptor. It also stores them in the tx db, so they could be @@ -1562,9 +1584,10 @@ err_mem: return -ENOMEM; } -/* +/** * bdx_tx_space - calculates available space in TX fifo - * @priv - NIC private structure + * @priv: NIC private structure + * * Returns available space in TX fifo in bytes */ static inline int bdx_tx_space(struct bdx_priv *priv) @@ -1579,9 +1602,10 @@ static inline int bdx_tx_space(struct bdx_priv *priv) return fsize; } -/* bdx_tx_transmit - send packet to NIC - * @skb - packet to send - * ndev - network device assigned to NIC +/** + * bdx_tx_transmit - send packet to NIC + * @skb: packet to send + * @ndev: network device assigned to NIC * Return codes: * o NETDEV_TX_OK everything ok. * o NETDEV_TX_BUSY Cannot transmit packet, try later @@ -1699,8 +1723,10 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, return NETDEV_TX_OK; } -/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. - * @priv - bdx adapter +/** + * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. + * @priv: bdx adapter + * * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS * that those packets were sent */ @@ -1761,7 +1787,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv) spin_unlock(&priv->tx_lock); } -/* bdx_tx_free_skbs - frees all skbs from TXD fifo. +/** + * bdx_tx_free_skbs - frees all skbs from TXD fifo. * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod */ static void bdx_tx_free_skbs(struct bdx_priv *priv) @@ -1790,10 +1817,11 @@ static void bdx_tx_free(struct bdx_priv *priv) bdx_tx_db_close(&priv->txdb); } -/* bdx_tx_push_desc - push descriptor to TxD fifo - * @priv - NIC private structure - * @data - desc's data - * @size - desc's size +/** + * bdx_tx_push_desc - push descriptor to TxD fifo + * @priv: NIC private structure + * @data: desc's data + * @size: desc's size * * Pushes desc to TxD fifo and overlaps it if needed. * NOTE: this func does not check for available space. this is responsibility @@ -1819,10 +1847,11 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size) WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR); } -/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way - * @priv - NIC private structure - * @data - desc's data - * @size - desc's size +/** + * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way + * @priv: NIC private structure + * @data: desc's data + * @size: desc's size * * NOTE: this func does check for available space and, if necessary, waits for * NIC to read existing data before writing new one. diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 4da93a5d7ec6..9cdd6197a176 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -375,7 +375,7 @@ static char *emac_rxhost_errcodes[16] = { #define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) /** - * emac_dump_regs: Dump important EMAC registers to debug terminal + * emac_dump_regs - Dump important EMAC registers to debug terminal * @priv: The DaVinci EMAC private adapter structure * * Executes ethtool set cmd & sets phy mode @@ -466,7 +466,7 @@ static void emac_dump_regs(struct emac_priv *priv) } /** - * emac_get_drvinfo: Get EMAC driver information + * emac_get_drvinfo - Get EMAC driver information * @ndev: The DaVinci EMAC network adapter * @info: ethtool info structure containing name and version * @@ -481,7 +481,7 @@ static void emac_get_drvinfo(struct net_device *ndev, } /** - * emac_get_settings: Get EMAC settings + * emac_get_settings - Get EMAC settings * @ndev: The DaVinci EMAC network adapter * @ecmd: ethtool command * @@ -500,7 +500,7 @@ static int emac_get_settings(struct net_device *ndev, } /** - * emac_set_settings: Set EMAC settings + * emac_set_settings - Set EMAC settings * @ndev: The DaVinci EMAC network adapter * @ecmd: ethtool command * @@ -518,7 +518,7 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) } /** - * emac_get_coalesce : Get interrupt coalesce settings for this device + * emac_get_coalesce - Get interrupt coalesce settings for this device * @ndev : The DaVinci EMAC network adapter * @coal : ethtool coalesce settings structure * @@ -536,7 +536,7 @@ static int emac_get_coalesce(struct net_device *ndev, } /** - * emac_set_coalesce : Set interrupt coalesce settings for this device + * emac_set_coalesce - Set interrupt coalesce settings for this device * @ndev : The DaVinci EMAC network adapter * @coal : ethtool coalesce settings structure * @@ -631,7 +631,7 @@ static const struct ethtool_ops ethtool_ops = { }; /** - * emac_update_phystatus: Update Phy status + * emac_update_phystatus - Update Phy status * @priv: The DaVinci EMAC private adapter structure * * Updates phy status and takes action for network queue if required @@ -697,7 +697,7 @@ static void emac_update_phystatus(struct emac_priv *priv) } /** - * hash_get: Calculate hash value from mac address + * hash_get - Calculate hash value from mac address * @addr: mac address to delete from hash table * * Calculates hash value from mac address @@ -723,9 +723,9 @@ static u32 hash_get(u8 *addr) } /** - * hash_add: Hash function to add mac addr from hash table + * hash_add - Hash function to add mac addr from hash table * @priv: The DaVinci EMAC private adapter structure - * mac_addr: mac address to delete from hash table + * @mac_addr: mac address to delete from hash table * * Adds mac address to the internal hash table * @@ -765,9 +765,9 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr) } /** - * hash_del: Hash function to delete mac addr from hash table + * hash_del - Hash function to delete mac addr from hash table * @priv: The DaVinci EMAC private adapter structure - * mac_addr: mac address to delete from hash table + * @mac_addr: mac address to delete from hash table * * Removes mac address from the internal hash table * @@ -807,7 +807,7 @@ static int hash_del(struct emac_priv *priv, u8 *mac_addr) #define EMAC_ALL_MULTI_CLR 3 /** - * emac_add_mcast: Set multicast address in the EMAC adapter (Internal) + * emac_add_mcast - Set multicast address in the EMAC adapter (Internal) * @priv: The DaVinci EMAC private adapter structure * @action: multicast operation to perform * mac_addr: mac address to set @@ -855,7 +855,7 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr) } /** - * emac_dev_mcast_set: Set multicast address in the EMAC adapter + * emac_dev_mcast_set - Set multicast address in the EMAC adapter * @ndev: The DaVinci EMAC network adapter * * Set multicast addresses in EMAC adapter @@ -901,7 +901,7 @@ static void emac_dev_mcast_set(struct net_device *ndev) *************************************************************************/ /** - * emac_int_disable: Disable EMAC module interrupt (from adapter) + * emac_int_disable - Disable EMAC module interrupt (from adapter) * @priv: The DaVinci EMAC private adapter structure * * Disable EMAC interrupt on the adapter @@ -931,7 +931,7 @@ static void emac_int_disable(struct emac_priv *priv) } /** - * emac_int_enable: Enable EMAC module interrupt (from adapter) + * emac_int_enable - Enable EMAC module interrupt (from adapter) * @priv: The DaVinci EMAC private adapter structure * * Enable EMAC interrupt on the adapter @@ -967,7 +967,7 @@ static void emac_int_enable(struct emac_priv *priv) } /** - * emac_irq: EMAC interrupt handler + * emac_irq - EMAC interrupt handler * @irq: interrupt number * @dev_id: EMAC network adapter data structure ptr * @@ -1060,7 +1060,7 @@ static void emac_tx_handler(void *token, int len, int status) } /** - * emac_dev_xmit: EMAC Transmit function + * emac_dev_xmit - EMAC Transmit function * @skb: SKB pointer * @ndev: The DaVinci EMAC network adapter * @@ -1111,7 +1111,7 @@ fail_tx: } /** - * emac_dev_tx_timeout: EMAC Transmit timeout function + * emac_dev_tx_timeout - EMAC Transmit timeout function * @ndev: The DaVinci EMAC network adapter * * Called when system detects that a skb timeout period has expired @@ -1138,7 +1138,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev) } /** - * emac_set_type0addr: Set EMAC Type0 mac address + * emac_set_type0addr - Set EMAC Type0 mac address * @priv: The DaVinci EMAC private adapter structure * @ch: RX channel number * @mac_addr: MAC address to set in device @@ -1165,7 +1165,7 @@ static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr) } /** - * emac_set_type1addr: Set EMAC Type1 mac address + * emac_set_type1addr - Set EMAC Type1 mac address * @priv: The DaVinci EMAC private adapter structure * @ch: RX channel number * @mac_addr: MAC address to set in device @@ -1187,7 +1187,7 @@ static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr) } /** - * emac_set_type2addr: Set EMAC Type2 mac address + * emac_set_type2addr - Set EMAC Type2 mac address * @priv: The DaVinci EMAC private adapter structure * @ch: RX channel number * @mac_addr: MAC address to set in device @@ -1213,7 +1213,7 @@ static void emac_set_type2addr(struct emac_priv *priv, u32 ch, } /** - * emac_setmac: Set mac address in the adapter (internal function) + * emac_setmac - Set mac address in the adapter (internal function) * @priv: The DaVinci EMAC private adapter structure * @ch: RX channel number * @mac_addr: MAC address to set in device @@ -1242,7 +1242,7 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr) } /** - * emac_dev_setmac_addr: Set mac address in the adapter + * emac_dev_setmac_addr - Set mac address in the adapter * @ndev: The DaVinci EMAC network adapter * @addr: MAC address to set in device * @@ -1277,7 +1277,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) } /** - * emac_hw_enable: Enable EMAC hardware for packet transmission/reception + * emac_hw_enable - Enable EMAC hardware for packet transmission/reception * @priv: The DaVinci EMAC private adapter structure * * Enables EMAC hardware for packet processing - enables PHY, enables RX @@ -1347,7 +1347,7 @@ static int emac_hw_enable(struct emac_priv *priv) } /** - * emac_poll: EMAC NAPI Poll function + * emac_poll - EMAC NAPI Poll function * @ndev: The DaVinci EMAC network adapter * @budget: Number of receive packets to process (as told by NAPI layer) * @@ -1430,7 +1430,7 @@ static int emac_poll(struct napi_struct *napi, int budget) #ifdef CONFIG_NET_POLL_CONTROLLER /** - * emac_poll_controller: EMAC Poll controller function + * emac_poll_controller - EMAC Poll controller function * @ndev: The DaVinci EMAC network adapter * * Polled functionality used by netconsole and others in non interrupt mode @@ -1489,7 +1489,7 @@ static void emac_adjust_link(struct net_device *ndev) *************************************************************************/ /** - * emac_devioctl: EMAC adapter ioctl + * emac_devioctl - EMAC adapter ioctl * @ndev: The DaVinci EMAC network adapter * @ifrq: request parameter * @cmd: command parameter @@ -1516,7 +1516,7 @@ static int match_first_device(struct device *dev, void *data) } /** - * emac_dev_open: EMAC device open + * emac_dev_open - EMAC device open * @ndev: The DaVinci EMAC network adapter * * Called when system wants to start the interface. We init TX/RX channels @@ -1649,7 +1649,7 @@ rollback: } /** - * emac_dev_stop: EMAC device stop + * emac_dev_stop - EMAC device stop * @ndev: The DaVinci EMAC network adapter * * Called when system wants to stop or down the interface. We stop the network @@ -1691,7 +1691,7 @@ static int emac_dev_stop(struct net_device *ndev) } /** - * emac_dev_getnetstats: EMAC get statistics function + * emac_dev_getnetstats - EMAC get statistics function * @ndev: The DaVinci EMAC network adapter * * Called when system wants to get statistics from the device. @@ -1763,7 +1763,7 @@ static const struct net_device_ops emac_netdev_ops = { }; /** - * davinci_emac_probe: EMAC device probe + * davinci_emac_probe - EMAC device probe * @pdev: The DaVinci EMAC device that we are removing * * Called when probing for emac devicesr. We get details of instances and @@ -1949,7 +1949,7 @@ free_clk: } /** - * davinci_emac_remove: EMAC device remove + * davinci_emac_remove - EMAC device remove * @pdev: The DaVinci EMAC device that we are removing * * Called when removing the device driver. We disable clock usage and release @@ -2029,7 +2029,7 @@ static struct platform_driver davinci_emac_driver = { }; /** - * davinci_emac_init: EMAC driver module init + * davinci_emac_init - EMAC driver module init * * Called when initializing the driver. We register the driver with * the platform. @@ -2041,7 +2041,7 @@ static int __init davinci_emac_init(void) late_initcall(davinci_emac_init); /** - * davinci_emac_exit: EMAC driver module exit + * davinci_emac_exit - EMAC driver module exit * * Called when exiting the driver completely. We unregister the driver with * the platform and exit diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 6199f6b387b6..c1ebfe9efcb3 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -114,7 +114,8 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) out_be32(card->regs + reg, value); } -/** spider_net_write_phy - write to phy register +/** + * spider_net_write_phy - write to phy register * @netdev: adapter to be written to * @mii_id: id of MII * @reg: PHY register @@ -137,7 +138,8 @@ spider_net_write_phy(struct net_device *netdev, int mii_id, spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue); } -/** spider_net_read_phy - read from phy register +/** + * spider_net_read_phy - read from phy register * @netdev: network device to be read from * @mii_id: id of MII * @reg: PHY register diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1eaf7128afee..1817d0e18fb2 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -197,7 +197,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op, #endif /** - * * temac_dma_bd_release - Release buffer descriptor rings + * temac_dma_bd_release - Release buffer descriptor rings */ static void temac_dma_bd_release(struct net_device *ndev) { -- cgit v1.2.3 From 1aa8b471e09f227455c11d55c4bc94a655ee8497 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 10 Jul 2012 10:56:59 +0000 Subject: drivers/net/ethernet: Fix non-kernel-doc comments with kernel-doc start markers Convert doxygen (or similar) formatted comments to kernel-doc or unformatted comment. Delete a few that are content-free. Signed-off-by: Ben Hutchings Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 9 - .../net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h | 4 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | 3 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 3 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h | 3 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 36 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 5 +- drivers/net/ethernet/broadcom/cnic.c | 4 +- drivers/net/ethernet/brocade/bna/bfa_cee.c | 61 +--- drivers/net/ethernet/brocade/bna/bfa_cs.h | 34 +- drivers/net/ethernet/brocade/bna/bfa_defs.h | 63 +--- drivers/net/ethernet/brocade/bna/bfa_defs_cna.h | 15 +- .../net/ethernet/brocade/bna/bfa_defs_mfg_comm.h | 35 +- drivers/net/ethernet/brocade/bna/bfa_defs_status.h | 3 +- drivers/net/ethernet/brocade/bna/bfa_ioc.c | 393 +++++++-------------- drivers/net/ethernet/brocade/bna/bfa_ioc.h | 43 +-- drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c | 48 +-- drivers/net/ethernet/brocade/bna/bfa_msgq.c | 4 +- drivers/net/ethernet/brocade/bna/bfi.h | 81 ++--- drivers/net/ethernet/brocade/bna/bfi_cna.h | 42 +-- drivers/net/ethernet/brocade/bna/bfi_enet.h | 107 ++---- drivers/net/ethernet/brocade/bna/bfi_reg.h | 4 +- drivers/net/ethernet/brocade/bna/bna.h | 51 +-- drivers/net/ethernet/brocade/bna/bna_enet.c | 15 +- drivers/net/ethernet/brocade/bna/bna_hw_defs.h | 33 +- drivers/net/ethernet/brocade/bna/bna_tx_rx.c | 17 +- drivers/net/ethernet/brocade/bna/bna_types.h | 66 +--- drivers/net/ethernet/brocade/bna/bnad.c | 12 +- drivers/net/ethernet/brocade/bna/bnad.h | 4 +- drivers/net/ethernet/ibm/ehea/ehea_qmr.c | 4 +- drivers/net/ethernet/intel/e1000/e1000_main.c | 9 +- drivers/net/ethernet/micrel/ks8851_mll.c | 6 +- drivers/net/ethernet/neterion/vxge/vxge-main.h | 3 +- drivers/net/ethernet/nvidia/forcedeth.c | 3 +- drivers/net/ethernet/sfc/mcdi_pcol.h | 3 +- drivers/net/ethernet/ti/davinci_emac.c | 8 +- 36 files changed, 342 insertions(+), 892 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d2dc420df5bd..52f33b8c41e5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1740,15 +1740,6 @@ struct bnx2x_func_init_params { int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, struct bnx2x_vlan_mac_obj *obj, bool set, int mac_type, unsigned long *ramrod_flags); -/** - * Deletes all MACs configured for the specific MAC object. - * - * @param bp Function driver instance - * @param mac_obj MAC object to cleanup - * - * @return zero if all MACs were cleaned - */ - /** * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object * diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h index 426f77aa721a..bbc66ced9c25 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h @@ -321,9 +321,7 @@ #define DISABLE_STATISTIC_COUNTER_ID_VALUE 0 -/** - * This file defines HSI constants common to all microcode flows - */ +/* This file defines HSI constants common to all microcode flows */ #define PROTOCOL_STATE_BIT_OFFSET 6 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index c05f9d94938f..51cac8130051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -493,8 +493,7 @@ struct bnx2x_ets_params { struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS]; }; -/** - * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB +/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB * when link is already up */ int bnx2x_update_pfc(struct link_params *params, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5b8b521bdbc3..08eca3ff7db6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -11560,8 +11560,7 @@ static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) } } -/** - * IRO array is stored in the following format: +/* IRO array is stored in the following format: * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } */ static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index f371e3c06094..ec62a5c8bd37 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -7275,8 +7275,7 @@ Theotherbitsarereservedandshouldbezero*/ #define CDU_REGION_NUMBER_UCM_AG 4 -/** - * String-to-compress [31:8] = CID (all 24 bits) +/* String-to-compress [31:8] = CID (all 24 bits) * String-to-compress [7:4] = Region * String-to-compress [3:0] = Type */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 76818ef08f9b..f83e033da6da 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -167,9 +167,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp, union bnx2x_qable_obj *o, struct bnx2x_exeq_elem *elem); -/** - * @return positive is entry was optimized, 0 - if not, negative - * in case of an error. +/* Return positive if entry was optimized, 0 - if not, negative + * in case of an error. */ typedef int (*exe_q_optimize)(struct bnx2x *bp, union bnx2x_qable_obj *o, @@ -1286,12 +1285,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp, struct bnx2x_rx_mode_obj *o); /** - * Send and RX_MODE ramrod according to the provided parameters. + * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters. * - * @param bp - * @param p Command parameters + * @p: Command parameters * - * @return 0 - if operation was successfull and there is no pending completions, + * Return: 0 - if operation was successfull and there is no pending completions, * positive number - if there are pending completions, * negative - if there were errors */ @@ -1308,7 +1306,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, bnx2x_obj_type type); /** - * Configure multicast MACs list. May configure a new list + * bnx2x_config_mcast - Configure multicast MACs list. + * + * @cmd: command to execute: BNX2X_MCAST_CMD_X + * + * May configure a new list * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current * configuration, continue to execute the pending commands @@ -1319,11 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, * the current command will be enqueued to the tail of the * pending commands list. * - * @param bp - * @param p - * @param command to execute: BNX2X_MCAST_CMD_X - * - * @return 0 is operation was sucessfull and there are no pending completions, + * Return: 0 is operation was sucessfull and there are no pending completions, * negative if there were errors, positive if there are pending * completions. */ @@ -1348,21 +1346,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp, bnx2x_obj_type type); /** - * Updates RSS configuration according to provided parameters. - * - * @param bp - * @param p + * bnx2x_config_rss - Updates RSS configuration according to provided parameters * - * @return 0 in case of success + * Return: 0 in case of success */ int bnx2x_config_rss(struct bnx2x *bp, struct bnx2x_config_rss_params *p); /** - * Return the current ind_table configuration. + * bnx2x_get_rss_ind_table - Return the current ind_table configuration. * - * @param bp - * @param ind_table buffer to fill with the current indirection + * @ind_table: buffer to fill with the current indirection * table content. Should be at least * T_ETH_INDIRECTION_TABLE_SIZE bytes long. */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 514a528f6ddf..667d89042d35 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -1321,12 +1321,9 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp) bnx2x_stats_comp(bp); } -/** - * This function will prepare the statistics ramrod data the way +/* This function will prepare the statistics ramrod data the way * we will only have to increment the statistics counter and * send the ramrod each time we have to. - * - * @param bp */ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) { diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 22ad7b6d9048..650c545705f3 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5542,9 +5542,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, rcu_read_unlock(); } -/** - * netdev event handler - */ +/* netdev event handler */ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c index 8532a8159025..550d2521ba76 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cee.c +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -127,13 +127,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status) cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); } /** - * bfa_nw_cee_meminfo() - * - * @brief Returns the size of the DMA memory needed by CEE module - * - * @param[in] void - * - * @return Size of DMA region + * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module */ u32 bfa_nw_cee_meminfo(void) @@ -142,15 +136,11 @@ bfa_nw_cee_meminfo(void) } /** - * bfa_nw_cee_mem_claim() - * - * @brief Initialized CEE DMA Memory - * - * @param[in] cee CEE module pointer - * dma_kva Kernel Virtual Address of CEE DMA Memory - * dma_pa Physical Address of CEE DMA Memory + * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory * - * @return void + * @cee: CEE module pointer + * @dma_kva: Kernel Virtual Address of CEE DMA Memory + * @dma_pa: Physical Address of CEE DMA Memory */ void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) @@ -165,13 +155,11 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) } /** - * bfa_cee_get_attr() - * - * @brief Send the request to the f/w to fetch CEE attributes. + * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes. * - * @param[in] Pointer to the CEE module data structure. + * @cee: Pointer to the CEE module data structure. * - * @return Status + * Return: status */ enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, @@ -200,13 +188,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, } /** - * bfa_cee_isrs() - * - * @brief Handles Mail-box interrupts for CEE module. - * - * @param[in] Pointer to the CEE module data structure. - * - * @return void + * bfa_cee_isrs - Handles Mail-box interrupts for CEE module. */ static void @@ -233,14 +215,9 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) } /** - * bfa_cee_notify() - * - * @brief CEE module heart-beat failure handler. - * @brief CEE module IOC event handler. - * - * @param[in] IOC event type + * bfa_cee_notify - CEE module heart-beat failure handler. * - * @return void + * @event: IOC event type */ static void @@ -287,17 +264,13 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event) } /** - * bfa_nw_cee_attach() - * - * @brief CEE module-attach API + * bfa_nw_cee_attach - CEE module-attach API * - * @param[in] cee - Pointer to the CEE module data structure - * ioc - Pointer to the ioc module data structure - * dev - Pointer to the device driver module data structure - * The device driver specific mbox ISR functions have - * this pointer as one of the parameters. - * - * @return void + * @cee: Pointer to the CEE module data structure + * @ioc: Pointer to the ioc module data structure + * @dev: Pointer to the device driver module data structure. + * The device driver specific mbox ISR functions have + * this pointer as one of the parameters. */ void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h index 3da1a946ccdd..ad004a4c3897 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_cs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -16,23 +16,18 @@ * www.brocade.com */ -/** - * @file bfa_cs.h BFA common services - */ +/* BFA common services */ #ifndef __BFA_CS_H__ #define __BFA_CS_H__ #include "cna.h" -/** - * @ BFA state machine interfaces - */ +/* BFA state machine interfaces */ typedef void (*bfa_sm_t)(void *sm, int event); -/** - * oc - object class eg. bfa_ioc +/* oc - object class eg. bfa_ioc * st - state, eg. reset * otype - object type, eg. struct bfa_ioc * etype - object type, eg. enum ioc_event @@ -45,9 +40,7 @@ typedef void (*bfa_sm_t)(void *sm, int event); #define bfa_sm_get_state(_sm) ((_sm)->sm) #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) -/** - * For converting from state machine function to state encoding. - */ +/* For converting from state machine function to state encoding. */ struct bfa_sm_table { bfa_sm_t sm; /*!< state machine function */ int state; /*!< state machine encoding */ @@ -55,13 +48,10 @@ struct bfa_sm_table { }; #define BFA_SM(_sm) ((bfa_sm_t)(_sm)) -/** - * State machine with entry actions. - */ +/* State machine with entry actions. */ typedef void (*bfa_fsm_t)(void *fsm, int event); -/** - * oc - object class eg. bfa_ioc +/* oc - object class eg. bfa_ioc * st - state, eg. reset * otype - object type, eg. struct bfa_ioc * etype - object type, eg. enum ioc_event @@ -90,9 +80,7 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm) return smt[i].state; } -/** - * @ Generic wait counter. - */ +/* Generic wait counter. */ typedef void (*bfa_wc_resume_t) (void *cbarg); @@ -116,9 +104,7 @@ bfa_wc_down(struct bfa_wc *wc) wc->wc_resume(wc->wc_cbarg); } -/** - * Initialize a waiting counter. - */ +/* Initialize a waiting counter. */ static inline void bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) { @@ -128,9 +114,7 @@ bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) bfa_wc_up(wc); } -/** - * Wait for counter to reach zero - */ +/* Wait for counter to reach zero */ static inline void bfa_wc_wait(struct bfa_wc *wc) { diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h index 48f877337390..e423f82da490 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -26,13 +26,9 @@ #define BFA_STRING_32 32 #define BFA_VERSION_LEN 64 -/** - * ---------------------- adapter definitions ------------ - */ +/* ---------------------- adapter definitions ------------ */ -/** - * BFA adapter level attributes. - */ +/* BFA adapter level attributes. */ enum { BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), /* @@ -74,18 +70,14 @@ struct bfa_adapter_attr { u8 trunk_capable; }; -/** - * ---------------------- IOC definitions ------------ - */ +/* ---------------------- IOC definitions ------------ */ enum { BFA_IOC_DRIVER_LEN = 16, BFA_IOC_CHIP_REV_LEN = 8, }; -/** - * Driver and firmware versions. - */ +/* Driver and firmware versions. */ struct bfa_ioc_driver_attr { char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */ char driver_ver[BFA_VERSION_LEN]; /*!< driver version */ @@ -95,9 +87,7 @@ struct bfa_ioc_driver_attr { char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */ }; -/** - * IOC PCI device attributes - */ +/* IOC PCI device attributes */ struct bfa_ioc_pci_attr { u16 vendor_id; /*!< PCI vendor ID */ u16 device_id; /*!< PCI device ID */ @@ -108,9 +98,7 @@ struct bfa_ioc_pci_attr { char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */ }; -/** - * IOC states - */ +/* IOC states */ enum bfa_ioc_state { BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ BFA_IOC_RESET = 2, /*!< IOC is in reset state */ @@ -127,9 +115,7 @@ enum bfa_ioc_state { BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */ }; -/** - * IOC firmware stats - */ +/* IOC firmware stats */ struct bfa_fw_ioc_stats { u32 enable_reqs; u32 disable_reqs; @@ -139,9 +125,7 @@ struct bfa_fw_ioc_stats { u32 unknown_reqs; }; -/** - * IOC driver stats - */ +/* IOC driver stats */ struct bfa_ioc_drv_stats { u32 ioc_isrs; u32 ioc_enables; @@ -157,9 +141,7 @@ struct bfa_ioc_drv_stats { u32 rsvd; }; -/** - * IOC statistics - */ +/* IOC statistics */ struct bfa_ioc_stats { struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */ struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */ @@ -171,9 +153,7 @@ enum bfa_ioc_type { BFA_IOC_TYPE_LL = 3, }; -/** - * IOC attributes returned in queries - */ +/* IOC attributes returned in queries */ struct bfa_ioc_attr { enum bfa_ioc_type ioc_type; enum bfa_ioc_state state; /*!< IOC state */ @@ -187,22 +167,16 @@ struct bfa_ioc_attr { u8 rsvd[4]; /*!< 64bit align */ }; -/** - * Adapter capability mask definition - */ +/* Adapter capability mask definition */ enum { BFA_CM_HBA = 0x01, BFA_CM_CNA = 0x02, BFA_CM_NIC = 0x04, }; -/** - * ---------------------- mfg definitions ------------ - */ +/* ---------------------- mfg definitions ------------ */ -/** - * Checksum size - */ +/* Checksum size */ #define BFA_MFG_CHKSUM_SIZE 16 #define BFA_MFG_PARTNUM_SIZE 14 @@ -213,8 +187,7 @@ enum { #pragma pack(1) -/** - * @brief BFA adapter manufacturing block definition. +/* BFA adapter manufacturing block definition. * * All numerical fields are in big-endian format. */ @@ -256,9 +229,7 @@ struct bfa_mfg_block { #pragma pack() -/** - * ---------------------- pci definitions ------------ - */ +/* ---------------------- pci definitions ------------ */ /* * PCI device ID information @@ -275,9 +246,7 @@ enum { #define bfa_asic_id_ctc(device) \ (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device)) -/** - * PCI sub-system device and vendor ID information - */ +/* PCI sub-system device and vendor ID information */ enum { BFA_PCI_FCOE_SSDEVICE_ID = 0x14, BFA_PCI_CT2_SSID_FCoE = 0x22, diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h index 8ab33ee2c2bc..b39c5f23974b 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h @@ -20,10 +20,7 @@ #include "bfa_defs.h" -/** - * @brief - * FC physical port statistics. - */ +/* FC physical port statistics. */ struct bfa_port_fc_stats { u64 secs_reset; /*!< Seconds since stats is reset */ u64 tx_frames; /*!< Tx frames */ @@ -59,10 +56,7 @@ struct bfa_port_fc_stats { u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */ }; -/** - * @brief - * Eth Physical Port statistics. - */ +/* Eth Physical Port statistics. */ struct bfa_port_eth_stats { u64 secs_reset; /*!< Seconds since stats is reset */ u64 frame_64; /*!< Frames 64 bytes */ @@ -108,10 +102,7 @@ struct bfa_port_eth_stats { u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */ }; -/** - * @brief - * Port statistics. - */ +/* Port statistics. */ union bfa_port_stats_u { struct bfa_port_fc_stats fc; struct bfa_port_eth_stats eth; diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h index 6681fe87c1e1..7fb396fe679d 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -20,33 +20,23 @@ #include "bfa_defs.h" -/** - * Manufacturing block version - */ +/* Manufacturing block version */ #define BFA_MFG_VERSION 3 #define BFA_MFG_VERSION_UNINIT 0xFF -/** - * Manufacturing block encrypted version - */ +/* Manufacturing block encrypted version */ #define BFA_MFG_ENC_VER 2 -/** - * Manufacturing block version 1 length - */ +/* Manufacturing block version 1 length */ #define BFA_MFG_VER1_LEN 128 -/** - * Manufacturing block header length - */ +/* Manufacturing block header length */ #define BFA_MFG_HDR_LEN 4 #define BFA_MFG_SERIALNUM_SIZE 11 #define STRSZ(_n) (((_n) + 4) & ~3) -/** - * Manufacturing card type - */ +/* Manufacturing card type */ enum { BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */ BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */ @@ -70,9 +60,7 @@ enum { #pragma pack(1) -/** - * Check if Mezz card - */ +/* Check if Mezz card */ #define bfa_mfg_is_mezz(type) (( \ (type) == BFA_MFG_TYPE_JAYHAWK || \ (type) == BFA_MFG_TYPE_WANCHESE || \ @@ -127,9 +115,7 @@ do { \ } \ } while (0) -/** - * VPD data length - */ +/* VPD data length */ #define BFA_MFG_VPD_LEN 512 #define BFA_MFG_VPD_LEN_INVALID 0 @@ -137,9 +123,7 @@ do { \ #define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */ #define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */ -/** - * VPD vendor tag - */ +/* VPD vendor tag */ enum { BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */ @@ -151,8 +135,7 @@ enum { BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */ }; -/** - * @brief BFA adapter flash vpd data definition. +/* BFA adapter flash vpd data definition. * * All numerical fields are in big-endian format. */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h index 7c5fe6c2e80e..ea9af9ae754d 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h @@ -18,8 +18,7 @@ #ifndef __BFA_DEFS_STATUS_H__ #define __BFA_DEFS_STATUS_H__ -/** - * API status return values +/* API status return values * * NOTE: The error msgs are auto generated from the comments. Only singe line * comments are supported diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 0b640fafbda3..959c58ef972a 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -20,13 +20,9 @@ #include "bfi_reg.h" #include "bfa_defs.h" -/** - * IOC local definitions - */ +/* IOC local definitions */ -/** - * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. - */ +/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ #define bfa_ioc_firmware_lock(__ioc) \ ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) @@ -96,9 +92,7 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); -/** - * IOC state machine definitions/declarations - */ +/* IOC state machine definitions/declarations */ enum ioc_event { IOC_E_RESET = 1, /*!< IOC reset request */ IOC_E_ENABLE = 2, /*!< IOC enable request */ @@ -148,9 +142,7 @@ static void bfa_iocpf_initfail(struct bfa_ioc *ioc); static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); static void bfa_iocpf_stop(struct bfa_ioc *ioc); -/** - * IOCPF state machine events - */ +/* IOCPF state machine events */ enum iocpf_event { IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ @@ -166,9 +158,7 @@ enum iocpf_event { IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ }; -/** - * IOCPF states - */ +/* IOCPF states */ enum bfa_iocpf_state { BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ @@ -215,21 +205,15 @@ static struct bfa_sm_table iocpf_sm_table[] = { {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, }; -/** - * IOC State Machine - */ +/* IOC State Machine */ -/** - * Beginning state. IOC uninit state. - */ +/* Beginning state. IOC uninit state. */ static void bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) { } -/** - * IOC is in uninit state. - */ +/* IOC is in uninit state. */ static void bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) { @@ -243,18 +227,14 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) } } -/** - * Reset entry actions -- initialize state machine - */ +/* Reset entry actions -- initialize state machine */ static void bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) { bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); } -/** - * IOC is in reset state. - */ +/* IOC is in reset state. */ static void bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) { @@ -282,8 +262,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) bfa_iocpf_enable(ioc); } -/** - * Host IOC function is being enabled, awaiting response from firmware. +/* Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ static void @@ -325,9 +304,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) } } -/** - * Semaphore should be acquired for version check. - */ +/* Semaphore should be acquired for version check. */ static void bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) { @@ -336,9 +313,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) bfa_ioc_send_getattr(ioc); } -/** - * IOC configuration in progress. Timer is active. - */ +/* IOC configuration in progress. Timer is active. */ static void bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) { @@ -419,9 +394,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) bfa_iocpf_disable(ioc); } -/** - * IOC is being disabled - */ +/* IOC is being disabled */ static void bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) { @@ -449,9 +422,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) } } -/** - * IOC disable completion entry. - */ +/* IOC disable completion entry. */ static void bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) { @@ -485,9 +456,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) { } -/** - * Hardware initialization retry. - */ +/* Hardware initialization retry. */ static void bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) { @@ -534,9 +503,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) { } -/** - * IOC failure. - */ +/* IOC failure. */ static void bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) { @@ -568,9 +535,7 @@ bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) { } -/** - * IOC failure. - */ +/* IOC failure. */ static void bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) { @@ -593,13 +558,9 @@ bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) } } -/** - * IOCPF State Machine - */ +/* IOCPF State Machine */ -/** - * Reset entry actions -- initialize state machine - */ +/* Reset entry actions -- initialize state machine */ static void bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) { @@ -607,9 +568,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) iocpf->auto_recover = bfa_nw_auto_recover; } -/** - * Beginning state. IOC is in reset state. - */ +/* Beginning state. IOC is in reset state. */ static void bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -626,9 +585,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * Semaphore should be acquired for version check. - */ +/* Semaphore should be acquired for version check. */ static void bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) { @@ -636,9 +593,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * Awaiting h/w semaphore to continue with version check. - */ +/* Awaiting h/w semaphore to continue with version check. */ static void bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -683,9 +638,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * Notify enable completion callback - */ +/* Notify enable completion callback */ static void bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) { @@ -698,9 +651,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) msecs_to_jiffies(BFA_IOC_TOV)); } -/** - * Awaiting firmware version match. - */ +/* Awaiting firmware version match. */ static void bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -727,18 +678,14 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * Request for semaphore. - */ +/* Request for semaphore. */ static void bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) { bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * Awaiting semaphore for h/w initialzation. - */ +/* Awaiting semaphore for h/w initialzation. */ static void bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -778,8 +725,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) bfa_ioc_reset(iocpf->ioc, false); } -/** - * Hardware is being initialized. Interrupts are enabled. +/* Hardware is being initialized. Interrupts are enabled. * Holding hardware semaphore lock. */ static void @@ -822,8 +768,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) bfa_ioc_send_enable(iocpf->ioc); } -/** - * Host IOC function is being enabled, awaiting response from firmware. +/* Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ static void @@ -896,9 +841,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) bfa_ioc_send_disable(iocpf->ioc); } -/** - * IOC is being disabled - */ +/* IOC is being disabled */ static void bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -935,9 +878,7 @@ bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * IOC hb ack request is being removed. - */ +/* IOC hb ack request is being removed. */ static void bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -963,9 +904,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * IOC disable completion entry. - */ +/* IOC disable completion entry. */ static void bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) { @@ -1000,9 +939,7 @@ bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * Hardware initialization failed. - */ +/* Hardware initialization failed. */ static void bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -1046,9 +983,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) { } -/** - * Hardware initialization failed. - */ +/* Hardware initialization failed. */ static void bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -1084,9 +1019,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** - * IOC is in failed state. - */ +/* IOC is in failed state. */ static void bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -1134,10 +1067,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) { } -/** - * @brief - * IOC is in failed state. - */ +/* IOC is in failed state. */ static void bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) { @@ -1151,13 +1081,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) } } -/** - * BFA IOC private functions - */ +/* BFA IOC private functions */ -/** - * Notify common modules registered for notification. - */ +/* Notify common modules registered for notification. */ static void bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) { @@ -1298,10 +1224,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) del_timer(&ioc->sem_timer); } -/** - * @brief - * Initialize LPU local memory (aka secondary memory / SRAM) - */ +/* Initialize LPU local memory (aka secondary memory / SRAM) */ static void bfa_ioc_lmem_init(struct bfa_ioc *ioc) { @@ -1366,9 +1289,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc) writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); } -/** - * Get driver and firmware versions. - */ +/* Get driver and firmware versions. */ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) { @@ -1388,9 +1309,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) } } -/** - * Returns TRUE if same. - */ +/* Returns TRUE if same. */ bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) { @@ -1408,8 +1327,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) return true; } -/** - * Return true if current running version is valid. Firmware signature and +/* Return true if current running version is valid. Firmware signature and * execution context (driver/bios) must match. */ static bool @@ -1430,9 +1348,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); } -/** - * Conditionally flush any pending message from firmware at start. - */ +/* Conditionally flush any pending message from firmware at start. */ static void bfa_ioc_msgflush(struct bfa_ioc *ioc) { @@ -1443,9 +1359,6 @@ bfa_ioc_msgflush(struct bfa_ioc *ioc) writel(1, ioc->ioc_regs.lpu_mbox_cmd); } -/** - * @img ioc_init_logic.jpg - */ static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) { @@ -1603,10 +1516,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc) del_timer(&ioc->hb_timer); } -/** - * @brief - * Initiate a full firmware download. - */ +/* Initiate a full firmware download. */ static void bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env) @@ -1672,9 +1582,7 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force) bfa_ioc_hwinit(ioc, force); } -/** - * BFA ioc enable reply by firmware - */ +/* BFA ioc enable reply by firmware */ static void bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, u8 cap_bm) @@ -1686,10 +1594,7 @@ bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); } -/** - * @brief - * Update BFA configuration from firmware configuration. - */ +/* Update BFA configuration from firmware configuration. */ static void bfa_ioc_getattr_reply(struct bfa_ioc *ioc) { @@ -1702,9 +1607,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); } -/** - * Attach time initialization of mbox logic. - */ +/* Attach time initialization of mbox logic. */ static void bfa_ioc_mbox_attach(struct bfa_ioc *ioc) { @@ -1718,9 +1621,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc *ioc) } } -/** - * Mbox poll timer -- restarts any pending mailbox requests. - */ +/* Mbox poll timer -- restarts any pending mailbox requests. */ static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc) { @@ -1760,9 +1661,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc) } } -/** - * Cleanup any pending requests. - */ +/* Cleanup any pending requests. */ static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc) { @@ -1774,12 +1673,12 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc) } /** - * Read data from SMEM to host through PCI memmap + * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap * - * @param[in] ioc memory for IOC - * @param[in] tbuf app memory to store data from smem - * @param[in] soff smem offset - * @param[in] sz size of smem in bytes + * @ioc: memory for IOC + * @tbuf: app memory to store data from smem + * @soff: smem offset + * @sz: size of smem in bytes */ static int bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) @@ -1826,9 +1725,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) return 0; } -/** - * Retrieve saved firmware trace from a prior IOC failure. - */ +/* Retrieve saved firmware trace from a prior IOC failure. */ int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) { @@ -1844,9 +1741,7 @@ bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) return status; } -/** - * Save firmware trace if configured. - */ +/* Save firmware trace if configured. */ static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) { @@ -1861,9 +1756,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) } } -/** - * Retrieve saved firmware trace from a prior IOC failure. - */ +/* Retrieve saved firmware trace from a prior IOC failure. */ int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) { @@ -1892,9 +1785,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc) bfa_nw_ioc_debug_save_ftrc(ioc); } -/** - * IOCPF to IOC interface - */ +/* IOCPF to IOC interface */ static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc) { @@ -1928,9 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); } -/** - * IOC public - */ +/* IOC public */ static enum bfa_status bfa_ioc_pll_init(struct bfa_ioc *ioc) { @@ -1954,8 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc) return BFA_STATUS_OK; } -/** - * Interface used by diag module to do firmware boot with memory test +/* Interface used by diag module to do firmware boot with memory test * as the entry vector. */ static void @@ -1983,9 +1871,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, bfa_ioc_lpu_start(ioc); } -/** - * Enable/disable IOC failure auto recovery. - */ +/* Enable/disable IOC failure auto recovery. */ void bfa_nw_ioc_auto_recover(bool auto_recover) { @@ -2056,10 +1942,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) } /** - * IOC attach time initialization and setup. + * bfa_nw_ioc_attach - IOC attach time initialization and setup. * - * @param[in] ioc memory for IOC - * @param[in] bfa driver instance structure + * @ioc: memory for IOC + * @bfa: driver instance structure */ void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) @@ -2078,9 +1964,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) bfa_fsm_send_event(ioc, IOC_E_RESET); } -/** - * Driver detach time IOC cleanup. - */ +/* Driver detach time IOC cleanup. */ void bfa_nw_ioc_detach(struct bfa_ioc *ioc) { @@ -2091,9 +1975,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc) } /** - * Setup IOC PCI properties. + * bfa_nw_ioc_pci_init - Setup IOC PCI properties. * - * @param[in] pcidev PCI device information for this IOC + * @pcidev: PCI device information for this IOC */ void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, @@ -2160,10 +2044,10 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, } /** - * Initialize IOC dma memory + * bfa_nw_ioc_mem_claim - Initialize IOC dma memory * - * @param[in] dm_kva kernel virtual address of IOC dma memory - * @param[in] dm_pa physical address of IOC dma memory + * @dm_kva: kernel virtual address of IOC dma memory + * @dm_pa: physical address of IOC dma memory */ void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) @@ -2176,9 +2060,7 @@ bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) ioc->attr = (struct bfi_ioc_attr *) dm_kva; } -/** - * Return size of dma memory required. - */ +/* Return size of dma memory required. */ u32 bfa_nw_ioc_meminfo(void) { @@ -2201,9 +2083,7 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_DISABLE); } -/** - * Initialize memory for saving firmware trace. - */ +/* Initialize memory for saving firmware trace. */ void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) { @@ -2217,9 +2097,7 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); } -/** - * Register mailbox message handler function, to be called by common modules - */ +/* Register mailbox message handler function, to be called by common modules */ void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) @@ -2231,11 +2109,12 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, } /** - * Queue a mailbox command request to firmware. Waits if mailbox is busy. - * Responsibility of caller to serialize + * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. * - * @param[in] ioc IOC instance - * @param[i] cmd Mailbox command + * @ioc: IOC instance + * @cmd: Mailbox command + * + * Waits if mailbox is busy. Responsibility of caller to serialize */ bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, @@ -2272,9 +2151,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, return false; } -/** - * Handle mailbox interrupts - */ +/* Handle mailbox interrupts */ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) { @@ -2314,9 +2191,7 @@ bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_HWERROR); } -/** - * return true if IOC is disabled - */ +/* return true if IOC is disabled */ bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) { @@ -2324,17 +2199,14 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); } -/** - * return true if IOC is operational - */ +/* return true if IOC is operational */ bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) { return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); } -/** - * Add to IOC heartbeat failure notification queue. To be used by common +/* Add to IOC heartbeat failure notification queue. To be used by common * modules such as cee, port, diag. */ void @@ -2518,9 +2390,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } -/** - * WWN public - */ +/* WWN public */ static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc) { @@ -2533,9 +2403,7 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) return ioc->attr->mac; } -/** - * Firmware failure detected. Start recovery actions. - */ +/* Firmware failure detected. Start recovery actions. */ static void bfa_ioc_recover(struct bfa_ioc *ioc) { @@ -2545,10 +2413,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc) bfa_fsm_send_event(ioc, IOC_E_HBFAIL); } -/** - * @dg hal_iocpf_pvt BFA IOC PF private functions - * @{ - */ +/* BFA IOC PF private functions */ static void bfa_iocpf_enable(struct bfa_ioc *ioc) @@ -2669,8 +2534,6 @@ bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) /* * Send flash write request. - * - * @param[in] cbarg - callback argument */ static void bfa_flash_write_send(struct bfa_flash *flash) @@ -2699,10 +2562,10 @@ bfa_flash_write_send(struct bfa_flash *flash) flash->offset += len; } -/* - * Send flash read request. +/** + * bfa_flash_read_send - Send flash read request. * - * @param[in] cbarg - callback argument + * @cbarg: callback argument */ static void bfa_flash_read_send(void *cbarg) @@ -2724,11 +2587,11 @@ bfa_flash_read_send(void *cbarg) bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); } -/* - * Process flash response messages upon receiving interrupts. +/** + * bfa_flash_intr - Process flash response messages upon receiving interrupts. * - * @param[in] flasharg - flash structure - * @param[in] msg - message structure + * @flasharg: flash structure + * @msg: message structure */ static void bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) @@ -2821,12 +2684,12 @@ bfa_nw_flash_meminfo(void) return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } -/* - * Flash attach API. +/** + * bfa_nw_flash_attach - Flash attach API. * - * @param[in] flash - flash structure - * @param[in] ioc - ioc structure - * @param[in] dev - device structure + * @flash: flash structure + * @ioc: ioc structure + * @dev: device structure */ void bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) @@ -2842,12 +2705,12 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); } -/* - * Claim memory for flash +/** + * bfa_nw_flash_memclaim - Claim memory for flash * - * @param[in] flash - flash structure - * @param[in] dm_kva - pointer to virtual memory address - * @param[in] dm_pa - physical memory address + * @flash: flash structure + * @dm_kva: pointer to virtual memory address + * @dm_pa: physical memory address */ void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) @@ -2859,13 +2722,13 @@ bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); } -/* - * Get flash attribute. +/** + * bfa_nw_flash_get_attr - Get flash attribute. * - * @param[in] flash - flash structure - * @param[in] attr - flash attribute structure - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument + * @flash: flash structure + * @attr: flash attribute structure + * @cbfn: callback function + * @cbarg: callback argument * * Return status. */ @@ -2895,17 +2758,17 @@ bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, return BFA_STATUS_OK; } -/* - * Update flash partition. +/** + * bfa_nw_flash_update_part - Update flash partition. * - * @param[in] flash - flash structure - * @param[in] type - flash partition type - * @param[in] instance - flash partition instance - * @param[in] buf - update data buffer - * @param[in] len - data buffer length - * @param[in] offset - offset relative to the partition starting address - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument + * @flash: flash structure + * @type: flash partition type + * @instance: flash partition instance + * @buf: update data buffer + * @len: data buffer length + * @offset: offset relative to the partition starting address + * @cbfn: callback function + * @cbarg: callback argument * * Return status. */ @@ -2944,17 +2807,17 @@ bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, return BFA_STATUS_OK; } -/* - * Read flash partition. +/** + * bfa_nw_flash_read_part - Read flash partition. * - * @param[in] flash - flash structure - * @param[in] type - flash partition type - * @param[in] instance - flash partition instance - * @param[in] buf - read data buffer - * @param[in] len - data buffer length - * @param[in] offset - offset relative to the partition starting address - * @param[in] cbfn - callback function - * @param[in] cbarg - callback argument + * @flash: flash structure + * @type: flash partition type + * @instance: flash partition instance + * @buf: read data buffer + * @len: data buffer length + * @offset: offset relative to the partition starting address + * @cbfn: callback function + * @cbarg: callback argument * * Return status. */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h index 3b4460fdc148..63a85e555df8 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -30,9 +30,7 @@ #define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \ BFI_IOC_TRC_HDR_SZ) -/** - * PCI device information required by IOC - */ +/* PCI device information required by IOC */ struct bfa_pcidev { int pci_slot; u8 pci_func; @@ -41,8 +39,7 @@ struct bfa_pcidev { void __iomem *pci_bar_kva; }; -/** - * Structure used to remember the DMA-able memory block's KVA and Physical +/* Structure used to remember the DMA-able memory block's KVA and Physical * Address */ struct bfa_dma { @@ -52,15 +49,11 @@ struct bfa_dma { #define BFA_DMA_ALIGN_SZ 256 -/** - * smem size for Crossbow and Catapult - */ +/* smem size for Crossbow and Catapult */ #define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ #define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ -/** - * @brief BFA dma address assignment macro. (big endian format) - */ +/* BFA dma address assignment macro. (big endian format) */ #define bfa_dma_be_addr_set(dma_addr, pa) \ __bfa_dma_be_addr_set(&dma_addr, (u64)pa) static inline void @@ -108,9 +101,7 @@ struct bfa_ioc_regs { u32 smem_pg0; }; -/** - * IOC Mailbox structures - */ +/* IOC Mailbox structures */ typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg); struct bfa_mbox_cmd { struct list_head qe; @@ -119,9 +110,7 @@ struct bfa_mbox_cmd { u32 msg[BFI_IOC_MSGSZ]; }; -/** - * IOC mailbox module - */ +/* IOC mailbox module */ typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m); struct bfa_ioc_mbox_mod { struct list_head cmd_q; /*!< pending mbox queue */ @@ -132,9 +121,7 @@ struct bfa_ioc_mbox_mod { } mbhdlr[BFI_MC_MAX]; }; -/** - * IOC callback function interfaces - */ +/* IOC callback function interfaces */ typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa); typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa); @@ -146,9 +133,7 @@ struct bfa_ioc_cbfn { bfa_ioc_reset_cbfn_t reset_cbfn; }; -/** - * IOC event notification mechanism. - */ +/* IOC event notification mechanism. */ enum bfa_ioc_event { BFA_IOC_E_ENABLED = 1, BFA_IOC_E_DISABLED = 2, @@ -163,9 +148,7 @@ struct bfa_ioc_notify { void *cbarg; }; -/** - * Initialize a IOC event notification structure - */ +/* Initialize a IOC event notification structure */ #define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ (__notify)->cbfn = (__cbfn); \ (__notify)->cbarg = (__cbarg); \ @@ -261,9 +244,7 @@ struct bfa_ioc_hwif { #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) -/** - * IOC mailbox interface - */ +/* IOC mailbox interface */ bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, bfa_mbox_cmd_cbfn_t cbfn, void *cbarg); @@ -271,9 +252,7 @@ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc); void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); -/** - * IOC interfaces - */ +/* IOC interfaces */ #define bfa_ioc_pll_init_asic(__ioc) \ ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c index b6b036a143ae..5df0b0c68c5a 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -87,9 +87,7 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = { .ioc_sync_complete = bfa_ioc_ct_sync_complete, }; -/** - * Called from bfa_ioc_attach() to map asic specific calls. - */ +/* Called from bfa_ioc_attach() to map asic specific calls. */ void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) { @@ -102,9 +100,7 @@ bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) ioc->ioc_hwif = &nw_hwif_ct2; } -/** - * Return true if firmware of current driver matches the running firmware. - */ +/* Return true if firmware of current driver matches the running firmware. */ static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) { @@ -182,9 +178,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); } -/** - * Notify other functions on HB failure. - */ +/* Notify other functions on HB failure. */ static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) { @@ -195,9 +189,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) readl(ioc->ioc_regs.alt_ll_halt); } -/** - * Host to LPU mailbox message addresses - */ +/* Host to LPU mailbox message addresses */ static const struct { u32 hfn_mbox; u32 lpu_mbox; @@ -209,9 +201,7 @@ static const struct { { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } }; -/** - * Host <-> LPU mailbox command/status registers - port 0 - */ +/* Host <-> LPU mailbox command/status registers - port 0 */ static const struct { u32 hfn; u32 lpu; @@ -222,9 +212,7 @@ static const struct { { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } }; -/** - * Host <-> LPU mailbox command/status registers - port 1 - */ +/* Host <-> LPU mailbox command/status registers - port 1 */ static const struct { u32 hfn; u32 lpu; @@ -368,9 +356,7 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) ioc->ioc_regs.err_set = rb + ERR_SET_REG; } -/** - * Initialize IOC to port mapping. - */ +/* Initialize IOC to port mapping. */ #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) static void @@ -398,9 +384,7 @@ bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); } -/** - * Set interrupt mode for a function: INTX or MSIX - */ +/* Set interrupt mode for a function: INTX or MSIX */ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) { @@ -443,9 +427,7 @@ bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) return false; } -/** - * MSI-X resource allocation for 1860 with no asic block - */ +/* MSI-X resource allocation for 1860 with no asic block */ #define HOSTFN_MSIX_DEFAULT 64 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c @@ -473,9 +455,7 @@ bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); } -/** - * Cleanup hw semaphore and usecnt registers - */ +/* Cleanup hw semaphore and usecnt registers */ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) { @@ -492,9 +472,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) bfa_nw_ioc_hw_sem_release(ioc); } -/** - * Synchronized IOC failure processing routines - */ +/* Synchronized IOC failure processing routines */ static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) { @@ -518,9 +496,7 @@ bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) return bfa_ioc_ct_sync_complete(ioc); } -/** - * Synchronized IOC failure processing routines - */ +/* Synchronized IOC failure processing routines */ static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) { diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c index dd36427f4752..55067d0d25cf 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c @@ -16,9 +16,7 @@ * www.brocade.com */ -/** - * @file bfa_msgq.c MSGQ module source file. - */ +/* MSGQ module source file. */ #include "bfi.h" #include "bfa_msgq.h" diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h index 0d9df695397a..1f24c23dc786 100644 --- a/drivers/net/ethernet/brocade/bna/bfi.h +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -22,15 +22,11 @@ #pragma pack(1) -/** - * BFI FW image type - */ +/* BFI FW image type */ #define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ #define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) -/** - * Msg header common to all msgs - */ +/* Msg header common to all msgs */ struct bfi_mhdr { u8 msg_class; /*!< @ref enum bfi_mclass */ u8 msg_id; /*!< msg opcode with in the class */ @@ -65,17 +61,14 @@ struct bfi_mhdr { #define BFI_I2H_OPCODE_BASE 128 #define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) -/** - **************************************************************************** +/**************************************************************************** * * Scatter Gather Element and Page definition * **************************************************************************** */ -/** - * DMA addresses - */ +/* DMA addresses */ union bfi_addr_u { struct { u32 addr_lo; @@ -83,9 +76,7 @@ union bfi_addr_u { } a32; }; -/** - * Generic DMA addr-len pair. - */ +/* Generic DMA addr-len pair. */ struct bfi_alen { union bfi_addr_u al_addr; /* DMA addr of buffer */ u32 al_len; /* length of buffer */ @@ -98,26 +89,20 @@ struct bfi_alen { #define BFI_LMSG_PL_WSZ \ ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4) -/** - * Mailbox message structure - */ +/* Mailbox message structure */ #define BFI_MBMSG_SZ 7 struct bfi_mbmsg { struct bfi_mhdr mh; u32 pl[BFI_MBMSG_SZ]; }; -/** - * Supported PCI function class codes (personality) - */ +/* Supported PCI function class codes (personality) */ enum bfi_pcifn_class { BFI_PCIFN_CLASS_FC = 0x0c04, BFI_PCIFN_CLASS_ETH = 0x0200, }; -/** - * Message Classes - */ +/* Message Classes */ enum bfi_mclass { BFI_MC_IOC = 1, /*!< IO Controller (IOC) */ BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */ @@ -159,15 +144,12 @@ enum bfi_mclass { #define BFI_FWBOOT_ENV_OS 0 -/** - *---------------------------------------------------------------------- +/*---------------------------------------------------------------------- * IOC *---------------------------------------------------------------------- */ -/** - * Different asic generations - */ +/* Different asic generations */ enum bfi_asic_gen { BFI_ASIC_GEN_CB = 1, BFI_ASIC_GEN_CT = 2, @@ -196,9 +178,7 @@ enum bfi_ioc_i2h_msgs { BFI_IOC_I2H_HBEAT = BFA_I2HM(4), }; -/** - * BFI_IOC_H2I_GETATTR_REQ message - */ +/* BFI_IOC_H2I_GETATTR_REQ message */ struct bfi_ioc_getattr_req { struct bfi_mhdr mh; union bfi_addr_u attr_addr; @@ -231,30 +211,22 @@ struct bfi_ioc_attr { u32 card_type; /*!< card type */ }; -/** - * BFI_IOC_I2H_GETATTR_REPLY message - */ +/* BFI_IOC_I2H_GETATTR_REPLY message */ struct bfi_ioc_getattr_reply { struct bfi_mhdr mh; /*!< Common msg header */ u8 status; /*!< cfg reply status */ u8 rsvd[3]; }; -/** - * Firmware memory page offsets - */ +/* Firmware memory page offsets */ #define BFI_IOC_SMEM_PG0_CB (0x40) #define BFI_IOC_SMEM_PG0_CT (0x180) -/** - * Firmware statistic offset - */ +/* Firmware statistic offset */ #define BFI_IOC_FWSTATS_OFF (0x6B40) #define BFI_IOC_FWSTATS_SZ (4096) -/** - * Firmware trace offset - */ +/* Firmware trace offset */ #define BFI_IOC_TRC_OFF (0x4b00) #define BFI_IOC_TRC_ENTS 256 #define BFI_IOC_TRC_ENT_SZ 16 @@ -299,9 +271,7 @@ struct bfi_ioc_hbeat { u32 hb_count; /*!< current heart beat count */ }; -/** - * IOC hardware/firmware state - */ +/* IOC hardware/firmware state */ enum bfi_ioc_state { BFI_IOC_UNINIT = 0, /*!< not initialized */ BFI_IOC_INITING = 1, /*!< h/w is being initialized */ @@ -345,9 +315,7 @@ enum { ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ BFI_ADAPTER_UNSUPP)) -/** - * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages - */ +/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */ struct bfi_ioc_ctrl_req { struct bfi_mhdr mh; u16 clscode; @@ -355,9 +323,7 @@ struct bfi_ioc_ctrl_req { u32 tv_sec; }; -/** - * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages - */ +/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */ struct bfi_ioc_ctrl_reply { struct bfi_mhdr mh; /*!< Common msg header */ u8 status; /*!< enable/disable status */ @@ -367,9 +333,7 @@ struct bfi_ioc_ctrl_reply { }; #define BFI_IOC_MSGSZ 8 -/** - * H2I Messages - */ +/* H2I Messages */ union bfi_ioc_h2i_msg_u { struct bfi_mhdr mh; struct bfi_ioc_ctrl_req enable_req; @@ -378,17 +342,14 @@ union bfi_ioc_h2i_msg_u { u32 mboxmsg[BFI_IOC_MSGSZ]; }; -/** - * I2H Messages - */ +/* I2H Messages */ union bfi_ioc_i2h_msg_u { struct bfi_mhdr mh; struct bfi_ioc_ctrl_reply fw_event; u32 mboxmsg[BFI_IOC_MSGSZ]; }; -/** - *---------------------------------------------------------------------- +/*---------------------------------------------------------------------- * MSGQ *---------------------------------------------------------------------- */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h index 4eecabea397b..6704a4392973 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_cna.h +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h @@ -37,18 +37,14 @@ enum bfi_port_i2h { BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), }; -/** - * Generic REQ type - */ +/* Generic REQ type */ struct bfi_port_generic_req { struct bfi_mhdr mh; /*!< msg header */ u32 msgtag; /*!< msgtag for reply */ u32 rsvd; }; -/** - * Generic RSP type - */ +/* Generic RSP type */ struct bfi_port_generic_rsp { struct bfi_mhdr mh; /*!< common msg header */ u8 status; /*!< port enable status */ @@ -56,44 +52,12 @@ struct bfi_port_generic_rsp { u32 msgtag; /*!< msgtag for reply */ }; -/** - * @todo - * BFI_PORT_H2I_ENABLE_REQ - */ - -/** - * @todo - * BFI_PORT_I2H_ENABLE_RSP - */ - -/** - * BFI_PORT_H2I_DISABLE_REQ - */ - -/** - * BFI_PORT_I2H_DISABLE_RSP - */ - -/** - * BFI_PORT_H2I_GET_STATS_REQ - */ +/* BFI_PORT_H2I_GET_STATS_REQ */ struct bfi_port_get_stats_req { struct bfi_mhdr mh; /*!< common msg header */ union bfi_addr_u dma_addr; }; -/** - * BFI_PORT_I2H_GET_STATS_RSP - */ - -/** - * BFI_PORT_H2I_CLEAR_STATS_REQ - */ - -/** - * BFI_PORT_I2H_CLEAR_STATS_RSP - */ - union bfi_port_h2i_msg_u { struct bfi_mhdr mh; struct bfi_port_generic_req enable_req; diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h index a90f1cf46b41..eef6e1f8aecc 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_enet.h +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h @@ -16,12 +16,9 @@ * www.brocade.com */ -/** - * @file bfi_enet.h BNA Hardware and Firmware Interface - */ +/* BNA Hardware and Firmware Interface */ -/** - * Skipping statistics collection to avoid clutter. +/* Skipping statistics collection to avoid clutter. * Command is no longer needed: * MTU * TxQ Stop @@ -64,9 +61,7 @@ union bfi_addr_be_u { } a32; }; -/** - * T X Q U E U E D E F I N E S - */ +/* T X Q U E U E D E F I N E S */ /* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ /* TxQ Entry Opcodes */ #define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ @@ -106,10 +101,7 @@ struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */ union bfi_addr_be_u addr; }; -/** - * TxQ Entry Structure - * - */ +/* TxQ Entry Structure */ struct bfi_enet_txq_entry { union { struct bfi_enet_txq_wi_base base; @@ -124,16 +116,12 @@ struct bfi_enet_txq_entry { #define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ (((_hdr_size) << 10) | ((_offset) & 0x3FF)) -/** - * R X Q U E U E D E F I N E S - */ +/* R X Q U E U E D E F I N E S */ struct bfi_enet_rxq_entry { union bfi_addr_be_u rx_buffer; }; -/** - * R X C O M P L E T I O N Q U E U E D E F I N E S - */ +/* R X C O M P L E T I O N Q U E U E D E F I N E S */ /* CQ Entry Flags */ #define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0) #define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1) @@ -174,9 +162,7 @@ struct bfi_enet_cq_entry { u8 rxq_id; }; -/** - * E N E T C O N T R O L P A T H C O M M A N D S - */ +/* E N E T C O N T R O L P A T H C O M M A N D S */ struct bfi_enet_q { union bfi_addr_u pg_tbl; union bfi_addr_u first_entry; @@ -222,9 +208,7 @@ struct bfi_enet_ib { u16 rsvd; }; -/** - * ENET command messages - */ +/* ENET command messages */ enum bfi_enet_h2i_msgs { /* Rx Commands */ BFI_ENET_H2I_RX_CFG_SET_REQ = 1, @@ -350,9 +334,7 @@ enum bfi_enet_i2h_msgs { BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4), }; -/** - * The following error codes can be returned by the enet commands - */ +/* The following error codes can be returned by the enet commands */ enum bfi_enet_err { BFI_ENET_CMD_OK = 0, BFI_ENET_CMD_FAIL = 1, @@ -364,8 +346,7 @@ enum bfi_enet_err { BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */ }; -/** - * Generic Request +/* Generic Request * * bfi_enet_req is used by: * BFI_ENET_H2I_RX_CFG_CLR_REQ @@ -375,8 +356,7 @@ struct bfi_enet_req { struct bfi_msgq_mhdr mh; }; -/** - * Enable/Disable Request +/* Enable/Disable Request * * bfi_enet_enable_req is used by: * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero) @@ -391,9 +371,7 @@ struct bfi_enet_enable_req { u8 rsvd[3]; }; -/** - * Generic Response - */ +/* Generic Response */ struct bfi_enet_rsp { struct bfi_msgq_mhdr mh; u8 error; /*!< if error see cmd_offset */ @@ -401,20 +379,16 @@ struct bfi_enet_rsp { u16 cmd_offset; /*!< offset to invalid parameter */ }; -/** - * GLOBAL CONFIGURATION - */ +/* GLOBAL CONFIGURATION */ -/** - * bfi_enet_attr_req is used by: +/* bfi_enet_attr_req is used by: * BFI_ENET_H2I_GET_ATTR_REQ */ struct bfi_enet_attr_req { struct bfi_msgq_mhdr mh; }; -/** - * bfi_enet_attr_rsp is used by: +/* bfi_enet_attr_rsp is used by: * BFI_ENET_I2H_GET_ATTR_RSP */ struct bfi_enet_attr_rsp { @@ -427,8 +401,7 @@ struct bfi_enet_attr_rsp { u32 rit_size; }; -/** - * Tx Configuration +/* Tx Configuration * * bfi_enet_tx_cfg is used by: * BFI_ENET_H2I_TX_CFG_SET_REQ @@ -477,8 +450,7 @@ struct bfi_enet_tx_cfg_rsp { } q_handles[BFI_ENET_TXQ_PRIO_MAX]; }; -/** - * Rx Configuration +/* Rx Configuration * * bfi_enet_rx_cfg is used by: * BFI_ENET_H2I_RX_CFG_SET_REQ @@ -553,8 +525,7 @@ struct bfi_enet_rx_cfg_rsp { } q_handles[BFI_ENET_RX_QSET_MAX]; }; -/** - * RIT +/* RIT * * bfi_enet_rit_req is used by: * BFI_ENET_H2I_RIT_CFG_REQ @@ -566,8 +537,7 @@ struct bfi_enet_rit_req { u8 table[BFI_ENET_RSS_RIT_MAX]; }; -/** - * RSS +/* RSS * * bfi_enet_rss_cfg_req is used by: * BFI_ENET_H2I_RSS_CFG_REQ @@ -591,8 +561,7 @@ struct bfi_enet_rss_cfg_req { struct bfi_enet_rss_cfg cfg; }; -/** - * MAC Unicast +/* MAC Unicast * * bfi_enet_rx_vlan_req is used by: * BFI_ENET_H2I_MAC_UCAST_SET_REQ @@ -606,17 +575,14 @@ struct bfi_enet_ucast_req { u8 rsvd[2]; }; -/** - * MAC Unicast + VLAN - */ +/* MAC Unicast + VLAN */ struct bfi_enet_mac_n_vlan_req { struct bfi_msgq_mhdr mh; u16 vlan_id; mac_t mac_addr; }; -/** - * MAC Multicast +/* MAC Multicast * * bfi_enet_mac_mfilter_add_req is used by: * BFI_ENET_H2I_MAC_MCAST_ADD_REQ @@ -627,8 +593,7 @@ struct bfi_enet_mcast_add_req { u8 rsvd[2]; }; -/** - * bfi_enet_mac_mfilter_add_rsp is used by: +/* bfi_enet_mac_mfilter_add_rsp is used by: * BFI_ENET_I2H_MAC_MCAST_ADD_RSP */ struct bfi_enet_mcast_add_rsp { @@ -640,8 +605,7 @@ struct bfi_enet_mcast_add_rsp { u8 rsvd1[2]; }; -/** - * bfi_enet_mac_mfilter_del_req is used by: +/* bfi_enet_mac_mfilter_del_req is used by: * BFI_ENET_H2I_MAC_MCAST_DEL_REQ */ struct bfi_enet_mcast_del_req { @@ -650,8 +614,7 @@ struct bfi_enet_mcast_del_req { u8 rsvd[2]; }; -/** - * VLAN +/* VLAN * * bfi_enet_rx_vlan_req is used by: * BFI_ENET_H2I_RX_VLAN_SET_REQ @@ -663,8 +626,7 @@ struct bfi_enet_rx_vlan_req { u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX]; }; -/** - * PAUSE +/* PAUSE * * bfi_enet_set_pause_req is used by: * BFI_ENET_H2I_SET_PAUSE_REQ @@ -676,8 +638,7 @@ struct bfi_enet_set_pause_req { u8 rx_pause; /* 1 = enable; 0 = disable */ }; -/** - * DIAGNOSTICS +/* DIAGNOSTICS * * bfi_enet_diag_lb_req is used by: * BFI_ENET_H2I_DIAG_LOOPBACK @@ -689,16 +650,13 @@ struct bfi_enet_diag_lb_req { u8 enable; /* 1 = enable; 0 = disable */ }; -/** - * enum for Loopback opmodes - */ +/* enum for Loopback opmodes */ enum { BFI_ENET_DIAG_LB_OPMODE_EXT = 0, BFI_ENET_DIAG_LB_OPMODE_CBL = 1, }; -/** - * STATISTICS +/* STATISTICS * * bfi_enet_stats_req is used by: * BFI_ENET_H2I_STATS_GET_REQ @@ -713,9 +671,7 @@ struct bfi_enet_stats_req { union bfi_addr_u host_buffer; }; -/** - * defines for "stats_mask" above. - */ +/* defines for "stats_mask" above. */ #define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */ #define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */ #define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */ @@ -881,8 +837,7 @@ struct bfi_enet_stats_mac { u64 tx_fragments; }; -/** - * Complete statistics, DMAed from fw to host followed by +/* Complete statistics, DMAed from fw to host followed by * BFI_ENET_I2H_STATS_GET_RSP */ struct bfi_enet_stats { diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h index 0e094fe46dfd..c49fa312ddbd 100644 --- a/drivers/net/ethernet/brocade/bna/bfi_reg.h +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h @@ -221,9 +221,7 @@ enum { #define __PMM_1T_RESET_P 0x00000001 #define PMM_1T_RESET_REG_P1 0x00023c1c -/** - * Brocade 1860 Adapter specific defines - */ +/* Brocade 1860 Adapter specific defines */ #define CT2_PCI_CPQ_BASE 0x00030000 #define CT2_PCI_APP_BASE 0x00030100 #define CT2_PCI_ETH_BASE 0x00030400 diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h index 4d7a5de08e12..ede532b4e9db 100644 --- a/drivers/net/ethernet/brocade/bna/bna.h +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -25,11 +25,7 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; -/** - * - * Macros and constants - * - */ +/* Macros and constants */ #define BNA_IOC_TIMER_FREQ 200 @@ -356,11 +352,7 @@ do { \ } \ } while (0) -/** - * - * Inline functions - * - */ +/* Inline functions */ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) { @@ -377,15 +369,9 @@ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr) #define bna_attr(_bna) (&(_bna)->ioceth.attr) -/** - * - * Function prototypes - * - */ +/* Function prototypes */ -/** - * BNA - */ +/* BNA */ /* FW response handlers */ void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr); @@ -413,24 +399,19 @@ struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod); void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, struct bna_mcam_handle *handle); -/** - * MBOX - */ +/* MBOX */ /* API for BNAD */ void bna_mbox_handler(struct bna *bna, u32 intr_status); -/** - * ETHPORT - */ +/* ETHPORT */ /* Callbacks for RX */ void bna_ethport_cb_rx_started(struct bna_ethport *ethport); void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport); -/** - * TX MODULE AND TX - */ +/* TX MODULE AND TX */ + /* FW response handelrs */ void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr); @@ -462,9 +443,7 @@ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, void bna_tx_cleanup_complete(struct bna_tx *tx); void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); -/** - * RX MODULE, RX, RXF - */ +/* RX MODULE, RX, RXF */ /* FW response handlers */ void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, @@ -522,9 +501,7 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); void bna_rx_vlanfilter_enable(struct bna_rx *rx); -/** - * ENET - */ +/* ENET */ /* API for RX */ int bna_enet_mtu_get(struct bna_enet *enet); @@ -544,18 +521,14 @@ void bna_enet_mtu_set(struct bna_enet *enet, int mtu, void (*cbfn)(struct bnad *)); void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac); -/** - * IOCETH - */ +/* IOCETH */ /* APIs for BNAD */ void bna_ioceth_enable(struct bna_ioceth *ioceth); void bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type); -/** - * BNAD - */ +/* BNAD */ /* Callbacks for ENET */ void bnad_cb_ethport_link_status(struct bnad *bnad, diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c index 9ccc586e3767..db14f69d63bc 100644 --- a/drivers/net/ethernet/brocade/bna/bna_enet.c +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -378,9 +378,8 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) } } -/** - * ETHPORT - */ +/* ETHPORT */ + #define call_ethport_stop_cbfn(_ethport) \ do { \ if ((_ethport)->stop_cbfn) { \ @@ -804,9 +803,8 @@ bna_ethport_cb_rx_stopped(struct bna_ethport *ethport) } } -/** - * ENET - */ +/* ENET */ + #define bna_enet_chld_start(enet) \ do { \ enum bna_tx_type tx_type = \ @@ -1328,9 +1326,8 @@ bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac) *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc); } -/** - * IOCETH - */ +/* IOCETH */ + #define enable_mbox_intr(_ioceth) \ do { \ u32 intr_status; \ diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h index 4c6aab2a9534..b8c4e21fbf4c 100644 --- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -16,20 +16,15 @@ * www.brocade.com */ -/** - * File for interrupt macros and functions - */ +/* File for interrupt macros and functions */ #ifndef __BNA_HW_DEFS_H__ #define __BNA_HW_DEFS_H__ #include "bfi_reg.h" -/** - * - * SW imposed limits - * - */ +/* SW imposed limits */ + #define BFI_ENET_DEF_TXQ 1 #define BFI_ENET_DEF_RXP 1 #define BFI_ENET_DEF_UCAM 1 @@ -141,11 +136,8 @@ } #define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id) -/** - * - * Interrupt related bits, flags and macros - * - */ + +/* Interrupt related bits, flags and macros */ #define IB_STATUS_BITS 0x0000ffff @@ -280,11 +272,7 @@ do { \ (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ (_rcb)->q_dbell)); -/** - * - * TxQ, RxQ, CQ related bits, offsets, macros - * - */ +/* TxQ, RxQ, CQ related bits, offsets, macros */ /* TxQ Entry Opcodes */ #define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ @@ -334,11 +322,7 @@ do { \ #define BNA_CQ_EF_LOCAL (1 << 20) -/** - * - * Data structures - * - */ +/* Data structures */ struct bna_reg_offset { u32 fn_int_status; @@ -371,8 +355,7 @@ struct bna_txq_wi_vector { struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */ }; -/** - * TxQ Entry Structure +/* TxQ Entry Structure * * BEWARE: Load values into this structure with correct endianess. */ diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c index 276fcb589f4b..71144b396e02 100644 --- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -18,9 +18,7 @@ #include "bna.h" #include "bfi.h" -/** - * IB - */ +/* IB */ static void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) { @@ -29,9 +27,7 @@ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) (u32)ib->coalescing_timeo, 0); } -/** - * RXF - */ +/* RXF */ #define bna_rxf_vlan_cfg_soft_reset(rxf) \ do { \ @@ -1312,9 +1308,7 @@ bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) return 0; } -/** - * RX - */ +/* RX */ #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ (qcfg)->num_paths : ((qcfg)->num_paths * 2)) @@ -2791,9 +2785,8 @@ const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { {1, 2}, }; -/** - * TX - */ +/* TX */ + #define call_tx_stop_cbfn(tx) \ do { \ if ((tx)->stop_cbfn) { \ diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index e8d3ab7ea6cb..d3eb8bddfb2a 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -23,11 +23,7 @@ #include "bfa_cee.h" #include "bfa_msgq.h" -/** - * - * Forward declarations - * - */ +/* Forward declarations */ struct bna_mcam_handle; struct bna_txq; @@ -40,11 +36,7 @@ struct bna_enet; struct bna; struct bnad; -/** - * - * Enums, primitive data types - * - */ +/* Enums, primitive data types */ enum bna_status { BNA_STATUS_T_DISABLED = 0, @@ -331,11 +323,7 @@ struct bna_attr { int max_rit_size; }; -/** - * - * IOCEth - * - */ +/* IOCEth */ struct bna_ioceth { bfa_fsm_t fsm; @@ -351,11 +339,7 @@ struct bna_ioceth { struct bna *bna; }; -/** - * - * Enet - * - */ +/* Enet */ /* Pause configuration */ struct bna_pause_config { @@ -390,11 +374,7 @@ struct bna_enet { struct bna *bna; }; -/** - * - * Ethport - * - */ +/* Ethport */ struct bna_ethport { bfa_fsm_t fsm; @@ -419,11 +399,7 @@ struct bna_ethport { struct bna *bna; }; -/** - * - * Interrupt Block - * - */ +/* Interrupt Block */ /* Doorbell structure */ struct bna_ib_dbell { @@ -447,11 +423,7 @@ struct bna_ib { int interpkt_timeo; }; -/** - * - * Tx object - * - */ +/* Tx object */ /* Tx datapath control structure */ #define BNA_Q_NAME_SIZE 16 @@ -585,11 +557,7 @@ struct bna_tx_mod { struct bna *bna; }; -/** - * - * Rx object - * - */ +/* Rx object */ /* Rx datapath control structure */ struct bna_rcb { @@ -898,11 +866,7 @@ struct bna_rx_mod { u32 rid_mask; }; -/** - * - * CAM - * - */ +/* CAM */ struct bna_ucam_mod { struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */ @@ -927,11 +891,7 @@ struct bna_mcam_mod { struct bna *bna; }; -/** - * - * Statistics - * - */ +/* Statistics */ struct bna_stats { struct bna_dma_addr hw_stats_dma; @@ -949,11 +909,7 @@ struct bna_stats_mod { struct bfi_enet_stats_req stats_clr; }; -/** - * - * BNA - * - */ +/* BNA */ struct bna { struct bna_ident ident; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 67cd2ed0306a..b441f33258e7 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1302,8 +1302,7 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, return 0; } -/** - * NOTE: Should be called for MSIX only +/* NOTE: Should be called for MSIX only * Unregisters Tx MSIX vector(s) from the kernel */ static void @@ -1322,8 +1321,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, } } -/** - * NOTE: Should be called for MSIX only +/* NOTE: Should be called for MSIX only * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel */ static int @@ -1354,8 +1352,7 @@ err_return: return -1; } -/** - * NOTE: Should be called for MSIX only +/* NOTE: Should be called for MSIX only * Unregisters Rx MSIX vector(s) from the kernel */ static void @@ -1375,8 +1372,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, } } -/** - * NOTE: Should be called for MSIX only +/* NOTE: Should be called for MSIX only * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel */ static int diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h index 72742be11277..d78339224751 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.h +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -389,9 +389,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, void bnad_debugfs_init(struct bnad *bnad); void bnad_debugfs_uninit(struct bnad *bnad); -/** - * MACROS - */ +/* MACROS */ /* To set & get the stats counters */ #define BNAD_UPDATE_CTR(_bnad, _ctr) \ (((_bnad)->stats.drv_stats._ctr)++) diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c index 4fb47f14dbfe..cb66f574dc97 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c @@ -376,9 +376,7 @@ int ehea_destroy_eq(struct ehea_eq *eq) return 0; } -/** - * allocates memory for a queue and registers pages in phyp - */ +/* allocates memory for a queue and registers pages in phyp */ static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue, int nr_pages, int wqe_size, int act_nr_sges, struct ehea_adapter *adapter, int h_call_q_selector) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 183a4a3224ba..3bfbb8df8989 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -721,9 +721,7 @@ void e1000_reset(struct e1000_adapter *adapter) e1000_release_manageability(adapter); } -/** - * Dump the eeprom for users having checksum issues - **/ +/* Dump the eeprom for users having checksum issues */ static void e1000_dump_eeprom(struct e1000_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -3056,14 +3054,13 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, mmiowb(); } -/** - * 82547 workaround to avoid controller hang in half-duplex environment. +/* 82547 workaround to avoid controller hang in half-duplex environment. * The workaround is to avoid queuing a large packet that would span * the internal Tx FIFO ring boundary by notifying the stack to resend * the packet at a later time. This gives the Tx FIFO an opportunity to * flush all packets. When that occurs, we reset the Tx FIFO pointers * to the beginning of the Tx FIFO. - **/ + */ #define E1000_FIFO_HDR 0x10 #define E1000_82547_PAD_LEN 0x3E0 diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 76bf1598c609..59ef568d5dd5 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -16,8 +16,7 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -/** - * Supports: +/* Supports: * KS8851 16bit MLL chip from Micrel Inc. */ @@ -465,8 +464,7 @@ static int msg_enable; #define BE1 0x2000 /* Byte Enable 1 */ #define BE0 0x1000 /* Byte Enable 0 */ -/** - * register read/write calls. +/* register read/write calls. * * All these calls issue transactions to access the chip's registers. They * all require that the necessary lock is held to prevent accesses when the diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 35f3e7552ec2..36ca40f8f249 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -430,8 +430,7 @@ void vxge_initialize_ethtool_ops(struct net_device *ndev); enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override); -/** - * #define VXGE_DEBUG_INIT: debug for initialization functions +/* #define VXGE_DEBUG_INIT: debug for initialization functions * #define VXGE_DEBUG_TX : debug transmit related functions * #define VXGE_DEBUG_RX : debug recevice related functions * #define VXGE_DEBUG_MEM : debug memory module diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index c503fbebdf7e..8b7c5129c7e1 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3552,8 +3552,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data) return IRQ_HANDLED; } -/** - * All _optimized functions are used to help increase performance +/* All _optimized functions are used to help increase performance * (reduce CPU and increase throughput). They use descripter version 3, * compiler directives, and reduce memory accesses. */ diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index 0310b9f08c9b..db4beed97669 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -48,8 +48,7 @@ /* Unused commands: 0x23, 0x27, 0x30, 0x31 */ -/** - * MCDI version 1 +/* MCDI version 1 * * Each MCDI request starts with an MCDI_HEADER, which is a 32byte * structure, filled in by the client. diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 9cdd6197a176..ab0bbb78699a 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -614,11 +614,9 @@ static int emac_set_coalesce(struct net_device *ndev, } -/** - * ethtool_ops: DaVinci EMAC Ethtool structure +/* ethtool_ops: DaVinci EMAC Ethtool structure * * Ethtool support for EMAC adapter - * */ static const struct ethtool_ops ethtool_ops = { .get_drvinfo = emac_get_drvinfo, @@ -2015,9 +2013,7 @@ static const struct dev_pm_ops davinci_emac_pm_ops = { .resume = davinci_emac_resume, }; -/** - * davinci_emac_driver: EMAC platform driver structure - */ +/* davinci_emac_driver: EMAC platform driver structure */ static struct platform_driver davinci_emac_driver = { .driver = { .name = "davinci_emac", -- cgit v1.2.3 From 49c7ffbe7b9eee0dbbce09d9afbfdbec98324438 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:30:43 +0000 Subject: ixgbe: count q_vectors instead of MSI-X vectors It makes much more sense for us to count q_vectors instead of MSI-X vectors. We were using num_msix_vectors to find the number of q_vectors in multiple places. This was wasteful since we only had one place that actually needs the number of MSI-X vectors and that is in slow path. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 14 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 8 +- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 27 ++++--- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 99 +++++++++--------------- 4 files changed, 59 insertions(+), 89 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index d1acf2451d52..24cd510e8e86 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -315,7 +315,7 @@ struct ixgbe_ring_container { ? 8 : 1) #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS -/* MAX_MSIX_Q_VECTORS of these are allocated, +/* MAX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ struct ixgbe_q_vector { @@ -401,11 +401,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) #define NON_Q_VECTORS (OTHER_VECTOR) #define MAX_MSIX_VECTORS_82599 64 -#define MAX_MSIX_Q_VECTORS_82599 64 +#define MAX_Q_VECTORS_82599 64 #define MAX_MSIX_VECTORS_82598 18 -#define MAX_MSIX_Q_VECTORS_82598 16 +#define MAX_Q_VECTORS_82598 16 -#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 +#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 #define MIN_MSIX_Q_VECTORS 1 @@ -496,7 +496,7 @@ struct ixgbe_adapter { u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; - struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; /* DCB parameters */ struct ieee_pfc *ixgbe_ieee_pfc; @@ -507,8 +507,8 @@ struct ixgbe_adapter { u8 dcbx_cap; enum ixgbe_fc_mode last_lfc_mode; - int num_msix_vectors; - int max_msix_q_vectors; /* true count of q_vectors for device */ + int num_q_vectors; /* current number of q_vectors for device */ + int max_q_vectors; /* true count of q_vectors for device */ struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; struct msix_entry *msix_entries; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index bbc7da5cdb4d..8e1be50af70a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2090,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_q_vector *q_vector; int i; - int num_vectors; u16 tx_itr_param, rx_itr_param; bool need_reset = false; @@ -2126,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, /* check the old value and enable RSC if necessary */ need_reset = ixgbe_update_rsc(adapter); - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - num_vectors = 1; - - for (i = 0; i < num_vectors; i++) { + for (i = 0; i < adapter->num_q_vectors; i++) { q_vector = adapter->q_vector[i]; if (q_vector->tx.count && !q_vector->rx.count) /* tx only */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f36c3c38dbcb..39a80d2bec9c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -507,8 +507,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, * of max_msix_q_vectors + NON_Q_VECTORS, or the number of * vectors we were allocated. */ - adapter->num_msix_vectors = min(vectors, - adapter->max_msix_q_vectors + NON_Q_VECTORS); + vectors -= NON_Q_VECTORS; + adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); } } @@ -695,7 +695,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) **/ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) { - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + int q_vectors = adapter->num_q_vectors; int rxr_remaining = adapter->num_rx_queues; int txr_remaining = adapter->num_tx_queues; int rxr_idx = 0, txr_idx = 0, v_idx = 0; @@ -739,10 +739,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) return 0; err_out: - while (v_idx) { - v_idx--; + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; + + while (v_idx--) ixgbe_free_q_vector(adapter, v_idx); - } return -ENOMEM; } @@ -757,14 +759,13 @@ err_out: **/ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) { - int v_idx, q_vectors; + int v_idx = adapter->num_q_vectors; - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - q_vectors = 1; + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + adapter->num_q_vectors = 0; - for (v_idx = 0; v_idx < q_vectors; v_idx++) + while (v_idx--) ixgbe_free_q_vector(adapter, v_idx); } @@ -844,6 +845,8 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) if (err) return err; + adapter->num_q_vectors = 1; + err = pci_enable_msi(adapter->pdev); if (!err) { adapter->flags |= IXGBE_FLAG_MSI_ENABLED; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 59a3f141feb1..903d1653516e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -993,7 +993,6 @@ out_no_update: static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) { - int num_q_vectors; int i; if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) @@ -1002,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) /* always use CB2 mode, difference is masked in the CB driver */ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - else - num_q_vectors = 1; - - for (i = 0; i < num_q_vectors; i++) { + for (i = 0; i < adapter->num_q_vectors; i++) { adapter->q_vector[i]->cpu = -1; ixgbe_update_dca(adapter->q_vector[i]); } @@ -1831,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) { struct ixgbe_q_vector *q_vector; - int q_vectors, v_idx; + int v_idx; u32 mask; - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - /* Populate MSIX to EITR Select */ if (adapter->num_vfs > 32) { u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; @@ -1846,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) * Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (v_idx = 0; v_idx < q_vectors; v_idx++) { + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { struct ixgbe_ring *ring; q_vector = adapter->q_vector[v_idx]; @@ -2410,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int vector, err; int ri = 0, ti = 0; - for (vector = 0; vector < q_vectors; vector++) { + for (vector = 0; vector < adapter->num_q_vectors; vector++) { struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct msix_entry *entry = &adapter->msix_entries[vector]; @@ -2569,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) static void ixgbe_free_irq(struct ixgbe_adapter *adapter) { - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int i, q_vectors; + int vector; - q_vectors = adapter->num_msix_vectors; - i = q_vectors - 1; - free_irq(adapter->msix_entries[i].vector, adapter); - i--; + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { + free_irq(adapter->pdev->irq, adapter); + return; + } - for (; i >= 0; i--) { - /* free only the irqs that were actually requested */ - if (!adapter->q_vector[i]->rx.ring && - !adapter->q_vector[i]->tx.ring) - continue; + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(adapter->msix_entries[i].vector, - NULL); + /* free only the irqs that were actually requested */ + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; - free_irq(adapter->msix_entries[i].vector, - adapter->q_vector[i]); - } - } else { - free_irq(adapter->pdev->irq, adapter); + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + + free_irq(entry->vector, q_vector); } + + free_irq(adapter->msix_entries[vector++].vector, adapter); } /** @@ -2616,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) } IXGBE_WRITE_FLUSH(&adapter->hw); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int i; - for (i = 0; i < adapter->num_msix_vectors; i++) - synchronize_irq(adapter->msix_entries[i].vector); + int vector; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) + synchronize_irq(adapter->msix_entries[vector].vector); + + synchronize_irq(adapter->msix_entries[vector++].vector); } else { synchronize_irq(adapter->pdev->irq); } @@ -3561,33 +3553,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev) static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) { int q_idx; - struct ixgbe_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - - /* legacy and MSI only use one vector */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; - napi_enable(&q_vector->napi); - } + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_enable(&adapter->q_vector[q_idx]->napi); } static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) { int q_idx; - struct ixgbe_q_vector *q_vector; - int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - /* legacy and MSI only use one vector */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) - q_vectors = 1; - - for (q_idx = 0; q_idx < q_vectors; q_idx++) { - q_vector = adapter->q_vector[q_idx]; - napi_disable(&q_vector->napi); - } + for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) + napi_disable(&adapter->q_vector[q_idx]->napi); } #ifdef CONFIG_IXGBE_DCB @@ -4416,12 +4392,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) case ixgbe_mac_82598EB: if (hw->device_id == IXGBE_DEV_ID_82598AT) adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; - adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; + adapter->max_q_vectors = MAX_Q_VECTORS_82598; break; case ixgbe_mac_X540: adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; case ixgbe_mac_82599EB: - adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; + adapter->max_q_vectors = MAX_Q_VECTORS_82599; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) @@ -5313,7 +5289,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); } else { /* get one bit for every active tx/rx interrupt vector */ - for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { + for (i = 0; i < adapter->num_q_vectors; i++) { struct ixgbe_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) eics |= ((u64)1 << i); @@ -6525,11 +6501,8 @@ static void ixgbe_netpoll(struct net_device *netdev) adapter->flags |= IXGBE_FLAG_IN_NETPOLL; if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { - int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - for (i = 0; i < num_q_vectors; i++) { - struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; - ixgbe_msix_clean_rings(0, q_vector); - } + for (i = 0; i < adapter->num_q_vectors; i++) + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); } else { ixgbe_intr(adapter->pdev->irq, netdev); } -- cgit v1.2.3 From c087663ec870c71b01d8e4ebbd68e481e0e253e3 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 10 May 2012 00:01:46 +0000 Subject: ixgbe: Add upper limit to ring features We are currently using indices to indicate the upper limit on a ring feature. However since we can switch back and forth on features such as DCB and that has effects on other features such as RSS it is preferable to instead store the upper limit separate from the current value for the number of rings related to the feature. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 13 +++++++++---- 4 files changed, 14 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 24cd510e8e86..ae3da83560ac 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -278,7 +278,8 @@ enum ixgbe_ring_f_enum { #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES #endif /* IXGBE_FCOE */ struct ixgbe_ring_feature { - int indices; + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ int mask; } ____cacheline_internodealigned_in_smp; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 0ee4dbf4a752..b4da760bd5f0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -770,7 +770,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev) ixgbe_clear_interrupt_scheme(adapter); adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; + adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; netdev->features |= NETIF_F_FCOE_CRC; netdev->features |= NETIF_F_FSO; netdev->features |= NETIF_F_FCOE_MTU; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 39a80d2bec9c..b64588a81b8b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -302,7 +302,7 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) bool ret = false; struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; - f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices); + f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->limit); f_fdir->mask = 0; /* @@ -339,7 +339,7 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return false; - f->indices = min_t(int, num_online_cpus(), f->indices); + f->indices = min_t(int, num_online_cpus(), f->limit); adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 903d1653516e..8e83f15d2550 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4386,7 +4386,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /* Set capability flags */ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); - adapter->ring_feature[RING_F_RSS].indices = rss; + adapter->ring_feature[RING_F_RSS].limit = rss; adapter->flags |= IXGBE_FLAG_RSS_ENABLED; switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -4405,13 +4405,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /* Flow Director hash filters enabled */ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->atr_sample_rate = 20; - adapter->ring_feature[RING_F_FDIR].indices = + adapter->ring_feature[RING_F_FDIR].limit = IXGBE_MAX_FDIR_INDICES; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; #ifdef IXGBE_FCOE adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = 0; #ifdef CONFIG_IXGBE_DCB /* Default traffic class to use for FCoE */ adapter->fcoe.up = IXGBE_FCOE_DEFTC; @@ -6206,8 +6205,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) if (((protocol == htons(ETH_P_FCOE)) || (protocol == htons(ETH_P_FIP))) && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { - txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); + struct ixgbe_ring_feature *f; + + f = &adapter->ring_feature[RING_F_FCOE]; + + while (txq >= f->indices) + txq -= f->indices; txq += adapter->ring_feature[RING_F_FCOE].mask; + return txq; } #endif -- cgit v1.2.3 From e4b317e90964d471b4f259400f9c80321028f779 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:30:53 +0000 Subject: ixgbe: Add feature offset value to ring features The mask value for ring features was overloaded for FCoE which can lead to some confusion. In order to avoid any confusion I am splitting the mask value and adding an offset value. This can be used for the start of the FCoE rings, and in the future I hope to use it to store the start of the registers for SR-IOV. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 6 +++--- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 23 ++++++++++------------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 4 files changed, 16 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index ae3da83560ac..2ffdc8f4c276 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -280,7 +280,8 @@ enum ixgbe_ring_f_enum { struct ixgbe_ring_feature { u16 limit; /* upper limit on feature indices */ u16 indices; /* current value of indices */ - int mask; + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ } ____cacheline_internodealigned_in_smp; /* diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index b4da760bd5f0..0922ece4d853 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -674,7 +674,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) if (adapter->ring_feature[RING_F_FCOE].indices) { /* Use multiple rx queues for FCoE by redirection table */ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { - fcoe_i = f->mask + i % f->indices; + fcoe_i = f->offset + i % f->indices; fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); @@ -683,7 +683,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); } else { /* Use single rx queue for FCoE */ - fcoe_i = f->mask; + fcoe_i = f->offset; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), @@ -691,7 +691,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); } /* send FIP frames to the first FCoE queue */ - fcoe_i = f->mask; + fcoe_i = f->offset; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), IXGBE_ETQS_QUEUE_EN | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index b64588a81b8b..47e54e8d2e25 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -185,12 +185,12 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) else ixgbe_cache_ring_rss(adapter); - fcoe_rx_i = f->mask; - fcoe_tx_i = f->mask; + fcoe_rx_i = f->offset; + fcoe_tx_i = f->offset; } for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { - adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; - adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; + adapter->rx_ring[f->offset + i]->reg_idx = fcoe_rx_i; + adapter->tx_ring[f->offset + i]->reg_idx = fcoe_tx_i; } return true; } @@ -327,10 +327,7 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) * @adapter: board private structure to initialize * * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. - * The ring feature mask is not used as a mask for FCoE, as it can take any 8 - * rx queues out of the max number of rx queues, instead, it is used as the - * index of the first rx queue used by FCoE. - * + * Offset is used as the index of the first rx queue used by FCoE. **/ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) { @@ -353,7 +350,7 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) } /* adding FCoE rx rings to the end */ - f->mask = adapter->num_rx_queues; + f->offset = adapter->num_rx_queues; adapter->num_rx_queues += f->indices; adapter->num_tx_queues += f->indices; @@ -388,7 +385,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) #ifdef IXGBE_FCOE /* FCoE enabled queues require special configuration indexed - * by feature specific indices and mask. Here we map FCoE + * by feature specific indices and offset. Here we map FCoE * indices onto the DCB queue pairs allowing FCoE to own * configuration later. */ @@ -401,7 +398,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); tc = prio_tc[adapter->fcoe.up]; f->indices = dev->tc_to_txq[tc].count; - f->mask = dev->tc_to_txq[tc].offset; + f->offset = dev->tc_to_txq[tc].offset; } #endif @@ -632,8 +629,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, if (adapter->netdev->features & NETIF_F_FCOE_MTU) { struct ixgbe_ring_feature *f; f = &adapter->ring_feature[RING_F_FCOE]; - if ((rxr_idx >= f->mask) && - (rxr_idx < f->mask + f->indices)) + if ((rxr_idx >= f->offset) && + (rxr_idx < f->offset + f->indices)) set_bit(__IXGBE_RX_FCOE, &ring->state); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 8e83f15d2550..3a807ffd5ce2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6211,7 +6211,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) while (txq >= f->indices) txq -= f->indices; - txq += adapter->ring_feature[RING_F_FCOE].mask; + txq += adapter->ring_feature[RING_F_FCOE].offset; return txq; } -- cgit v1.2.3 From 45e9baa515df201455658e609c19e5ef7c628756 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:30:59 +0000 Subject: ixgbe: Clean up a useless switch statement and dead code in configure_srrctl This patch replaces a switch statement for an 82598 workaround with an if statement that only applies to 82598. In addition I am pulling out several dead pieces of code and instead of reading the SRRCTL register and then modifying it we are just writing a value which we generate from scratch. Finally I am also removing any drop enable related code since that was moved to a function of its own. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 36 +++++++++++---------------- 1 file changed, 15 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3a807ffd5ce2..d3cf8873d483 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2847,40 +2847,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring) { + struct ixgbe_hw *hw = &adapter->hw; u32 srrctl; u8 reg_idx = rx_ring->reg_idx; - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: { - struct ixgbe_ring_feature *feature = adapter->ring_feature; - const int mask = feature[RING_F_RSS].mask; - reg_idx = reg_idx & mask; - } - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - default: - break; - } - - srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx)); + if (hw->mac.type == ixgbe_mac_82598EB) { + u16 mask = adapter->ring_feature[RING_F_RSS].mask; - srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; - srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; - if (adapter->num_vfs) - srrctl |= IXGBE_SRRCTL_DROP_EN; + /* + * if VMDq is not active we must program one srrctl register + * per RSS queue since we have enabled RDRXCTL.MVMEN + */ + reg_idx &= mask; + } - srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK; + /* configure header buffer length, needed for RSC */ + srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + /* configure the packet buffer length */ #if PAGE_SIZE > IXGBE_MAX_RXBUFFER srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #else srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; #endif + + /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) -- cgit v1.2.3 From 0b7f5d0b658a2596e582f1660670dcd1c1fc468e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:31:04 +0000 Subject: ixgbe: Merge RSS and flow director ring register caching and configuration There are really only 3 modes that can control the number of queues. Those are RSS, DCB, and VMDq/SR-IOV. Currently we have things much more broken up than they need to be for how we are configuring the rings. In order to try and straiten some of this out I am going to start merging similar functionality into single functions. To start with I am merging the Flow Director ring configuration into the RSS ring configuration since Flow Director cannot function with DCB or SR-IOV. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 98 +++++++--------------------- 1 file changed, 24 insertions(+), 74 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 47e54e8d2e25..83eadd019e6b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -138,30 +138,6 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) } #endif -/** - * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for Flow Director to the assigned rings. - * - **/ -static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter) -{ - int i; - bool ret = false; - - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - ret = true; - } - - return ret; -} - #ifdef IXGBE_FCOE /** * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE @@ -180,10 +156,7 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) return false; if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - ixgbe_cache_ring_fdir(adapter); - else - ixgbe_cache_ring_rss(adapter); + ixgbe_cache_ring_rss(adapter); fcoe_rx_i = f->offset; fcoe_tx_i = f->offset; @@ -244,9 +217,6 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) return; #endif /* IXGBE_FCOE */ - if (ixgbe_cache_ring_fdir(adapter)) - return; - if (ixgbe_cache_ring_rss(adapter)) return; } @@ -272,53 +242,39 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. * **/ -static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) +static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) { - bool ret = false; - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; + struct ixgbe_ring_feature *f; + u16 rss_i; - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - f->mask = 0xF; - adapter->num_rx_queues = f->indices; - adapter->num_tx_queues = f->indices; - ret = true; + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + return false; } - return ret; -} + /* set mask for 16 queue limit of RSS */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = f->limit; -/** - * ixgbe_set_fdir_queues - Allocate queues for Flow Director - * @adapter: board private structure to initialize - * - * Flow Director is an advanced Rx filter, attempting to get Rx flows back - * to the original CPU that initiated the Tx session. This runs in addition - * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the - * Rx load across CPUs using RSS. - * - **/ -static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) -{ - bool ret = false; - struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; - - f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->limit); - f_fdir->mask = 0; + f->indices = rss_i; + f->mask = 0xF; /* - * Use RSS in addition to Flow Director to ensure the best + * Use Flow Director in addition to RSS to ensure the best * distribution of flows across cores, even when an FDIR flow * isn't matched. */ - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && - (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { - adapter->num_tx_queues = f_fdir->indices; - adapter->num_rx_queues = f_fdir->indices; - ret = true; - } else { - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + f = &adapter->ring_feature[RING_F_FDIR]; + + f->indices = min_t(u16, num_online_cpus(), f->limit); + rss_i = max_t(u16, rss_i, f->indices); } - return ret; + + adapter->num_rx_queues = rss_i; + adapter->num_tx_queues = rss_i; + + return true; } #ifdef IXGBE_FCOE @@ -343,10 +299,7 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { e_info(probe, "FCoE enabled with RSS\n"); - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) - ixgbe_set_fdir_queues(adapter); - else - ixgbe_set_rss_queues(adapter); + ixgbe_set_rss_queues(adapter); } /* adding FCoE rx rings to the end */ @@ -438,9 +391,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) goto done; #endif /* IXGBE_FCOE */ - if (ixgbe_set_fdir_queues(adapter)) - goto done; - if (ixgbe_set_rss_queues(adapter)) goto done; -- cgit v1.2.3 From 18115f82bc93094a3554f3013cc314ee366a6e7a Mon Sep 17 00:00:00 2001 From: Tushar Dave Date: Thu, 12 Jul 2012 08:00:15 +0000 Subject: e1000e: Cleanup code logic in e1000_check_for_serdes_link_82571() Cleanup code to make it more clean and readable. Signed-off-by: Tushar Dave Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/82571.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 36db4df09aed..19f4cb9582b5 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -1677,16 +1677,18 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) e_dbg("ANYSTATE -> DOWN\n"); } else { /* - * Check several times, if Sync and Config - * both are consistently 1 then simply ignore - * the Invalid bit and restart Autoneg + * Check several times, if SYNCH bit and CONFIG + * bit both are consistently 1 then simply ignore + * the IV bit and restart Autoneg */ for (i = 0; i < AN_RETRY_COUNT; i++) { udelay(10); rxcw = er32(RXCW); - if ((rxcw & E1000_RXCW_IV) && - !((rxcw & E1000_RXCW_SYNCH) && - (rxcw & E1000_RXCW_C))) { + if ((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C)) + continue; + + if (rxcw & E1000_RXCW_IV) { mac->serdes_has_link = false; mac->serdes_link_state = e1000_serdes_link_down; -- cgit v1.2.3 From 22a4cca2f4c2d60c703cdc42158c907570f508e6 Mon Sep 17 00:00:00 2001 From: Matthew Vick Date: Thu, 12 Jul 2012 00:02:42 +0000 Subject: e1000e: Program the correct register for ITR when using MSI-X. When configuring interrupt throttling on 82574 in MSI-X mode, we need to be programming the EITR registers instead of the ITR register. -rc2: Renamed e1000_write_itr() to e1000e_write_itr(), fixed whitespace issues, and removed unnecessary !! operation. -rc3: Reduced the scope of the loop variable in e1000e_write_itr(). Signed-off-by: Matthew Vick Acked-by: Bruce Allan Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/e1000.h | 1 + drivers/net/ethernet/intel/e1000e/ethtool.c | 5 ++--- drivers/net/ethernet/intel/e1000e/netdev.c | 32 +++++++++++++++++++++++++---- 3 files changed, 31 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 6e6fffb34581..cd153326c3cf 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -514,6 +514,7 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); extern void e1000e_get_hw_control(struct e1000_adapter *adapter); extern void e1000e_release_hw_control(struct e1000_adapter *adapter); +extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr); extern unsigned int copybreak; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 905e2147d918..105d554ea9db 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -1897,7 +1897,6 @@ static int e1000_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { struct e1000_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || ((ec->rx_coalesce_usecs > 4) && @@ -1916,9 +1915,9 @@ static int e1000_set_coalesce(struct net_device *netdev, } if (adapter->itr_setting != 0) - ew32(ITR, 1000000000 / (adapter->itr * 256)); + e1000e_write_itr(adapter, adapter->itr); else - ew32(ITR, 0); + e1000e_write_itr(adapter, 0); return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ca477e87eb87..95b245310f17 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2473,6 +2473,30 @@ set_itr_now: } } +/** + * e1000e_write_itr - write the ITR value to the appropriate registers + * @adapter: address of board private structure + * @itr: new ITR value to program + * + * e1000e_write_itr determines if the adapter is in MSI-X mode + * and, if so, writes the EITR registers with the ITR value. + * Otherwise, it writes the ITR value into the ITR register. + **/ +void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr) +{ + struct e1000_hw *hw = &adapter->hw; + u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; + + if (adapter->msix_entries) { + int vector; + + for (vector = 0; vector < adapter->num_vectors; vector++) + writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); + } else { + ew32(ITR, new_itr); + } +} + /** * e1000_alloc_queues - Allocate memory for all rings * @adapter: board private structure to initialize @@ -3059,7 +3083,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) /* irq moderation */ ew32(RADV, adapter->rx_abs_int_delay); if ((adapter->itr_setting != 0) && (adapter->itr != 0)) - ew32(ITR, 1000000000 / (adapter->itr * 256)); + e1000e_write_itr(adapter, adapter->itr); ctrl_ext = er32(CTRL_EXT); /* Auto-Mask interrupts upon ICR access */ @@ -3486,14 +3510,14 @@ void e1000e_reset(struct e1000_adapter *adapter) dev_info(&adapter->pdev->dev, "Interrupt Throttle Rate turned off\n"); adapter->flags2 |= FLAG2_DISABLE_AIM; - ew32(ITR, 0); + e1000e_write_itr(adapter, 0); } } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { dev_info(&adapter->pdev->dev, "Interrupt Throttle Rate turned on\n"); adapter->flags2 &= ~FLAG2_DISABLE_AIM; adapter->itr = 20000; - ew32(ITR, 1000000000 / (adapter->itr * 256)); + e1000e_write_itr(adapter, adapter->itr); } } @@ -4576,7 +4600,7 @@ link_up: adapter->gorc - adapter->gotc) / 10000; u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; - ew32(ITR, 1000000000 / (itr * 256)); + e1000e_write_itr(adapter, itr); } /* Cause software interrupt to ensure Rx ring is cleaned */ -- cgit v1.2.3 From 15cbc70ea2b8a43ba3f0dc858299ed1c5b295b71 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 17 May 2012 05:14:34 +0000 Subject: ixgbe: Simplify logic for getting traffic class from user priority This patch is meant to help simplify the logic for getting traffic classes from user priorities. To do this I am adding a function named ixgbe_dcb_get_tc_from_up that will go through the traffic classes in reverse order in order to determine which traffic class contains a bit for a given user priority. Adding a declaration for this new function to the header so that we have a centralized means for sorting out traffic classes belonging to features such as FCoE. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 30 +++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 8bfaaee5ac5b..4fd5a0d1e129 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -231,16 +231,32 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, } } -void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) +static u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, + int direction, u8 up) { - int i, up; - unsigned long bitmap; + struct tc_configuration *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; + u8 tc; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap; - for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY) - map[up] = i; + /* + * Test for TCs 7 through 1 and report the first match we find. If + * we find no match we can assume that the TC is 0 since the TC must + * be set for all user priorities + */ + for (tc = MAX_TRAFFIC_CLASS - 1; tc; tc--) { + if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) + break; } + + return tc; +} + +void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) +{ + u8 up; + + for (up = 0; up < MAX_USER_PRIORITY; up++) + map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); } /** -- cgit v1.2.3 From df0676d1bddf085c4cc4aca39b18f9fcbd08d83d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 17 May 2012 05:14:39 +0000 Subject: ixgbe: Cleanup unpacking code for DCB This is meant to be a generic clean-up of the remaining functions for unpacking data from the DCB structures. The only real changes are: replaced the variable i with tc for functions that were looping through the traffic classes, and added a pointer for tc_class instead of path since that way we only need to pull the pointer once instead of once per loop. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 47 +++++++++++++--------------- 1 file changed, 22 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 4fd5a0d1e129..39ac2feca1f1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -180,55 +180,52 @@ out: void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) { - int i; + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; - *pfc_en = 0; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i; + for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { + if (tc_config[tc].dcb_pfc != pfc_disabled) + *pfc_en |= 1 << tc; + } } void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, u16 *refill) { - struct tc_bw_alloc *p; - int i; + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - refill[i] = p->data_credits_refill; - } + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; } void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) { - int i; + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - max[i] = cfg->tc_config[i].desc_credits_max; + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; } void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, u8 *bwgid) { - struct tc_bw_alloc *p; - int i; + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - bwgid[i] = p->bwg_id; - } + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; } void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, u8 *ptype) { - struct tc_bw_alloc *p; - int i; + struct tc_configuration *tc_config = &cfg->tc_config[0]; + int tc; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - p = &cfg->tc_config[i].path[direction]; - ptype[i] = p->prio_type; - } + for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) + ptype[tc] = tc_config[tc].path[direction].prio_type; } static u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, -- cgit v1.2.3 From 02debdc9b9adf88a4e2a42b96d1544e63b82e69a Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 18 May 2012 06:33:31 +0000 Subject: ixgbe: Populate the prio_tc_map in ixgbe_setup_tc There were cases where the prio_tc_map was not populated when we were calling open. This will result in us incorrectly configuring the traffic classes when DCB is enabled. In order to correct this I have updated the code so that we now populate the values prior to allocating the q_vectors and calling ixgbe_open. Cc: John Fastabend Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 3 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 26 +++++------------------ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 28 +++++++++++++++++++++++++ 4 files changed, 35 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 39ac2feca1f1..5442b359141e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -228,8 +228,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, ptype[tc] = tc_config[tc].path[direction].prio_type; } -static u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, - int direction, u8 up) +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { struct tc_configuration *tc_config = &cfg->tc_config[0]; u8 prio_mask = 1 << up; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h index 24333b718166..1f4108ee154b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h @@ -146,6 +146,7 @@ void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); +u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); /* DCB credits calculation */ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 5164a21b13ca..f1e002d5fa8f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -151,34 +151,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { - int err = 0; - u8 prio_tc[MAX_USER_PRIORITY] = {0}; - int i; struct ixgbe_adapter *adapter = netdev_priv(netdev); + int err = 0; /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 1; /* verify there is something to do, if not then exit */ - if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - goto out; - - if (state > 0) { - err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs); - ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); - } else { - err = ixgbe_setup_tc(netdev, 0); - } - - if (err) + if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) goto out; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - netdev_set_prio_tc_map(netdev, i, prio_tc[i]); - + err = ixgbe_setup_tc(netdev, + state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0); out: - return err ? 1 : 0; + return !!err; } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, @@ -584,9 +571,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, if (err) goto err_out; - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]); - err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame); err_out: return err; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d3cf8873d483..91bc60fc58ec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6595,6 +6595,31 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) return; } +/** + * ixgbe_set_prio_tc_map - Configure netdev prio tc map + * @adapter: Pointer to adapter struct + * + * Populate the netdev user priority to tc map + */ +static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ieee_ets *ets = adapter->ixgbe_ieee_ets; + u8 prio; + + for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { + u8 tc = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) + tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio); + else if (ets) + tc = ets->prio_tc[prio]; + + netdev_set_prio_tc_map(dev, prio, tc); + } +} + /** * ixgbe_setup_tc - configure net_device for multiple traffic classes * @@ -6633,6 +6658,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) if (tc) { netdev_set_num_tc(dev, tc); + ixgbe_set_prio_tc_map(adapter); + adapter->flags |= IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; @@ -6642,6 +6669,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) } } else { netdev_reset_tc(dev); + if (adapter->hw.mac.type == ixgbe_mac_82598EB) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; -- cgit v1.2.3 From 800bd607c31e648267e8a1055b14ad27bde943f5 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 2 Jun 2012 00:11:02 +0000 Subject: ixgbe: Add function for obtaining FCoE TC based on FCoE user priority In upcoming patches it will become increasingly common to need to determine the FCoE traffic class in order to determine the correct queues for FCoE. In order to make this easier I am adding a function for obtaining the FCoE traffic class based on the user priority. Cc: John Fastabend Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 15 +++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 7 ++----- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 14 ++++---------- 4 files changed, 22 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 2ffdc8f4c276..5a75a9c8b813 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -707,6 +707,7 @@ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, struct netdev_fcoe_hbainfo *info); +extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); #endif /* IXGBE_FCOE */ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 0922ece4d853..cc28c44a048c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -960,3 +960,18 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, return 0; } + +/** + * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to + * @adapter - pointer to the device adapter structure + * + * Return : TC that FCoE is mapped to + */ +u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) +{ +#ifdef CONFIG_IXGBE_DCB + return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); +#else + return 0; +#endif +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 83eadd019e6b..c0ff31edb0a9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -49,8 +49,8 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) return true; } -#ifdef CONFIG_IXGBE_DCB +#ifdef CONFIG_IXGBE_DCB /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, unsigned int *tx, unsigned int *rx) @@ -343,13 +343,10 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) * configuration later. */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - u8 prio_tc[MAX_USER_PRIORITY] = {0}; - int tc; + u8 tc = ixgbe_fcoe_get_tc(adapter); struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); - tc = prio_tc[adapter->fcoe.up]; f->indices = dev->tc_to_txq[tc].count; f->offset = dev->tc_to_txq[tc].offset; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 91bc60fc58ec..3d7ce7e236e1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3646,18 +3646,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) #ifdef IXGBE_FCOE /* FCoE traffic class uses FCOE jumbo frames */ - if (dev->features & NETIF_F_FCOE_MTU) { - int fcoe_pb = 0; - -#ifdef CONFIG_IXGBE_DCB - fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up); + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == ixgbe_fcoe_get_tc(adapter))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; #endif - if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) - tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; - } -#endif - /* Calculate delay value for device */ switch (hw->mac.type) { case ixgbe_mac_X540: -- cgit v1.2.3 From d411a9368bca616b4593e4f3d820dceccdd9027e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 30 Jun 2012 00:14:01 +0000 Subject: ixgbe: Merge FCoE set_num and cache_ring calls into RSS/DCB config This change merges the ixgbe_cache_ring_fcoe and ixgbe_set_fcoe_queues logic into the DCB and RSS initialization calls. Cc: John Fastabend Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 257 ++++++++++++-------------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 19 +- 2 files changed, 129 insertions(+), 147 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index c0ff31edb0a9..d308e7140171 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -28,28 +28,6 @@ #include "ixgbe.h" #include "ixgbe_sriov.h" -/** - * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for RSS to the assigned rings. - * - **/ -static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) -{ - int i; - - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - return false; - - for (i = 0; i < adapter->num_rx_queues; i++) - adapter->rx_ring[i]->reg_idx = i; - for (i = 0; i < adapter->num_tx_queues; i++) - adapter->tx_ring[i]->reg_idx = i; - - return true; -} - #ifdef CONFIG_IXGBE_DCB /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, @@ -136,39 +114,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) return true; } -#endif - -#ifdef IXGBE_FCOE -/** - * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE - * @adapter: board private structure to initialize - * - * Cache the descriptor ring offsets for FCoE mode to the assigned rings. - * - */ -static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - int i; - u8 fcoe_rx_i = 0, fcoe_tx_i = 0; - - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return false; - - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - ixgbe_cache_ring_rss(adapter); - - fcoe_rx_i = f->offset; - fcoe_tx_i = f->offset; - } - for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { - adapter->rx_ring[f->offset + i]->reg_idx = fcoe_rx_i; - adapter->tx_ring[f->offset + i]->reg_idx = fcoe_tx_i; - } - return true; -} -#endif /* IXGBE_FCOE */ +#endif /** * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov * @adapter: board private structure to initialize @@ -187,6 +134,28 @@ static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) return false; } +/** + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for RSS to the assigned rings. + * + **/ +static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +{ + int i; + + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + return false; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + + return true; +} + /** * ixgbe_cache_ring_register - Descriptor ring to register mapping * @adapter: board private structure to initialize @@ -212,13 +181,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) return; #endif -#ifdef IXGBE_FCOE - if (ixgbe_cache_ring_fcoe(adapter)) - return; -#endif /* IXGBE_FCOE */ - - if (ixgbe_cache_ring_rss(adapter)) - return; + ixgbe_cache_ring_rss(adapter); } /** @@ -234,6 +197,74 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) return false; } +#define IXGBE_RSS_16Q_MASK 0xF +#define IXGBE_RSS_8Q_MASK 0x7 +#define IXGBE_RSS_4Q_MASK 0x3 +#define IXGBE_RSS_2Q_MASK 0x1 +#define IXGBE_RSS_DISABLED_MASK 0x0 + +#ifdef CONFIG_IXGBE_DCB +static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_ring_feature *f; + int rss_i, rss_m, i; + int tcs; + + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ + rss_i = dev->num_tx_queues / tcs; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + rss_i = min_t(u16, rss_i, 4); + rss_m = IXGBE_RSS_4Q_MASK; + } else if (tcs > 4) { + /* 8 TC w/ 8 queues per TC */ + rss_i = min_t(u16, rss_i, 8); + rss_m = IXGBE_RSS_8Q_MASK; + } else { + /* 4 TC w/ 16 queues per TC */ + rss_i = min_t(u16, rss_i, 16); + rss_m = IXGBE_RSS_16Q_MASK; + } + + /* set RSS mask and indices */ + f = &adapter->ring_feature[RING_F_RSS]; + rss_i = min_t(int, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + +#ifdef IXGBE_FCOE + /* FCoE enabled queues require special configuration indexed + * by feature specific indices and offset. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + u8 tc = ixgbe_fcoe_get_tc(adapter); + + f = &adapter->ring_feature[RING_F_FCOE]; + f->indices = min_t(u16, rss_i, f->limit); + f->offset = rss_i * tc; + } + +#endif /* IXGBE_FCOE */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + + adapter->num_tx_queues = rss_i * tcs; + adapter->num_rx_queues = rss_i * tcs; + + return true; +} + +#endif /** * ixgbe_set_rss_queues - Allocate queues for RSS * @adapter: board private structure to initialize @@ -257,7 +288,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = f->limit; f->indices = rss_i; - f->mask = 0xF; + f->mask = IXGBE_RSS_16Q_MASK; /* * Use Flow Director in addition to RSS to ensure the best @@ -271,90 +302,41 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) rss_i = max_t(u16, rss_i, f->indices); } - adapter->num_rx_queues = rss_i; - adapter->num_tx_queues = rss_i; - - return true; -} - #ifdef IXGBE_FCOE -/** - * ixgbe_set_fcoe_queues - Allocate queues for Fiber Channel over Ethernet (FCoE) - * @adapter: board private structure to initialize - * - * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. - * Offset is used as the index of the first rx queue used by FCoE. - **/ -static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) -{ - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + /* + * FCoE can exist on the same rings as standard network traffic + * however it is preferred to avoid that if possible. In order + * to get the best performance we allocate as many FCoE queues + * as we can and we place them at the end of the ring array to + * avoid sharing queues with standard RSS on systems with 24 or + * more CPUs. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct net_device *dev = adapter->netdev; + u16 fcoe_i; - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - return false; + f = &adapter->ring_feature[RING_F_FCOE]; - f->indices = min_t(int, num_online_cpus(), f->limit); + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus()); + fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues); - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - e_info(probe, "FCoE enabled with RSS\n"); - ixgbe_set_rss_queues(adapter); + /* attempt to reserve some queues for just FCoE */ + f->indices = min_t(u16, fcoe_i, f->limit); + f->offset = fcoe_i - f->indices; + rss_i = max_t(u16, fcoe_i, rss_i); } - /* adding FCoE rx rings to the end */ - f->offset = adapter->num_rx_queues; - adapter->num_rx_queues += f->indices; - adapter->num_tx_queues += f->indices; - - return true; -} #endif /* IXGBE_FCOE */ - -/* Artificial max queue cap per traffic class in DCB mode */ -#define DCB_QUEUE_CAP 8 - -#ifdef CONFIG_IXGBE_DCB -static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) -{ - int per_tc_q, q, i, offset = 0; - struct net_device *dev = adapter->netdev; - int tcs = netdev_get_num_tc(dev); - - if (!tcs) - return false; - - /* Map queue offset and counts onto allocated tx queues */ - per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); - q = min_t(int, num_online_cpus(), per_tc_q); - - for (i = 0; i < tcs; i++) { - netdev_set_tc_queue(dev, i, q, offset); - offset += q; - } - - adapter->num_tx_queues = q * tcs; - adapter->num_rx_queues = q * tcs; - -#ifdef IXGBE_FCOE - /* FCoE enabled queues require special configuration indexed - * by feature specific indices and offset. Here we map FCoE - * indices onto the DCB queue pairs allowing FCoE to own - * configuration later. - */ - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - u8 tc = ixgbe_fcoe_get_tc(adapter); - struct ixgbe_ring_feature *f = - &adapter->ring_feature[RING_F_FCOE]; - - f->indices = dev->tc_to_txq[tc].count; - f->offset = dev->tc_to_txq[tc].offset; - } -#endif + adapter->num_rx_queues = rss_i; + adapter->num_tx_queues = rss_i; return true; } -#endif /** * ixgbe_set_num_queues - Allocate queues for device, feature dependent @@ -383,11 +365,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) goto done; #endif -#ifdef IXGBE_FCOE - if (ixgbe_set_fcoe_queues(adapter)) - goto done; - -#endif /* IXGBE_FCOE */ if (ixgbe_set_rss_queues(adapter)) goto done; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3d7ce7e236e1..ee230f533ee3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3610,16 +3610,17 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (hw->mac.type != ixgbe_mac_82598EB) { int i; u32 reg = 0; + u8 msb = 0; + u8 rss_i = adapter->netdev->tc_to_txq[0].count - 1; - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { - u8 msb = 0; - u8 cnt = adapter->netdev->tc_to_txq[i].count; - - while (cnt >>= 1) - msb++; + while (rss_i) { + msb++; + rss_i >>= 1; + } + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) reg |= msb << IXGBE_RQTC_SHIFT_TC(i); - } + IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); } } @@ -7027,7 +7028,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, #endif if (ii->mac == ixgbe_mac_82598EB) +#ifdef CONFIG_IXGBE_DCB + indices = min_t(unsigned int, indices, MAX_TRAFFIC_CLASS * 4); +#else indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); +#endif else indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); -- cgit v1.2.3 From 7efd26d0db5917b9e53d72e76e52338b2600ae20 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 12 Jul 2012 19:33:06 +0000 Subject: ethernet: Use eth_random_addr Convert the existing uses of random_ether_addr to the new eth_random_addr. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1c/atl1c_hw.c | 2 +- drivers/net/ethernet/atheros/atlx/atl1.c | 2 +- drivers/net/ethernet/atheros/atlx/atl2.c | 2 +- drivers/net/ethernet/ethoc.c | 2 +- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 2 +- drivers/net/ethernet/lantiq_etop.c | 2 +- drivers/net/ethernet/micrel/ks8851.c | 2 +- drivers/net/ethernet/micrel/ks8851_mll.c | 2 +- drivers/net/ethernet/smsc/smsc911x.c | 2 +- drivers/net/ethernet/ti/cpsw.c | 2 +- drivers/net/ethernet/tile/tilegx.c | 2 +- drivers/net/ethernet/wiznet/w5100.c | 2 +- drivers/net/ethernet/wiznet/w5300.c | 2 +- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 2 +- 15 files changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index 801f0126512d..21e261ffbe10 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -199,7 +199,7 @@ int atl1c_read_mac_addr(struct atl1c_hw *hw) err = atl1c_get_permanent_address(hw); if (err) - random_ether_addr(hw->perm_mac_addr); + eth_random_addr(hw->perm_mac_addr); memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); return err; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index f2402f355cec..7bae2ad7a7c0 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -538,7 +538,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw) u16 i; if (atl1_get_permanent_address(hw)) { - random_ether_addr(hw->perm_mac_addr); + eth_random_addr(hw->perm_mac_addr); ret = 1; } diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 7c0b7e2bcb66..57d64b80fd72 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -2346,7 +2346,7 @@ static s32 atl2_read_mac_addr(struct atl2_hw *hw) { if (get_permanent_address(hw)) { /* for test */ - /* FIXME: shouldn't we use random_ether_addr() here? */ + /* FIXME: shouldn't we use eth_random_addr() here? */ hw->perm_mac_addr[0] = 0x00; hw->perm_mac_addr[1] = 0x13; hw->perm_mac_addr[2] = 0x74; diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 20297881f8eb..94b7bfcdb24e 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1057,7 +1057,7 @@ static int __devinit ethoc_probe(struct platform_device *pdev) /* Check the MAC again for validity, if it still isn't choose and * program a random one. */ if (!is_valid_ether_addr(netdev->dev_addr)) { - random_ether_addr(netdev->dev_addr); + eth_random_addr(netdev->dev_addr); random_mac = true; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 60e307548f4e..8adeca9787ca 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5008,7 +5008,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf) unsigned int device_id; u16 thisvf_devfn; - random_ether_addr(mac_addr); + eth_random_addr(mac_addr); igb_set_vf_mac(adapter, vf, mac_addr); switch (adapter->hw.mac.type) { @@ -5417,7 +5417,7 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) /* generate a new mac address as we were hotplug removed/added */ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC)) - random_ether_addr(vf_mac); + eth_random_addr(vf_mac); /* process remaining reset events */ igb_vf_reset(adapter, vf); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 2d971d18696e..eb3f67c854a7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -467,7 +467,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) bool enable = ((event_mask & 0x10000000U) != 0); if (enable) { - random_ether_addr(vf_mac_addr); + eth_random_addr(vf_mac_addr); e_info(probe, "IOV: VF %d is enabled MAC %pM\n", vfn, vf_mac_addr); /* diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 9fa39ebf545d..003c5bc7189f 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -645,7 +645,7 @@ ltq_etop_init(struct net_device *dev) memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); if (!is_valid_ether_addr(mac.sa_data)) { pr_warn("etop: invalid MAC, using random\n"); - random_ether_addr(mac.sa_data); + eth_random_addr(mac.sa_data); random_mac = true; } diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index 5e313e9a252f..1540ebeb8669 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -422,7 +422,7 @@ static void ks8851_read_mac_addr(struct net_device *dev) * * Get or create the initial mac address for the device and then set that * into the station address register. If there is an EEPROM present, then - * we try that. If no valid mac address is found we use random_ether_addr() + * we try that. If no valid mac address is found we use eth_random_addr() * to create a new one. */ static void ks8851_init_mac(struct ks8851_net *ks) diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index 59ef568d5dd5..38529edfe350 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -1609,7 +1609,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev) memcpy(ks->mac_addr, pdata->mac_addr, 6); if (!is_valid_ether_addr(ks->mac_addr)) { /* Use random MAC address if none passed */ - random_ether_addr(ks->mac_addr); + eth_random_addr(ks->mac_addr); netdev_info(netdev, "Using random mac address\n"); } netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr); diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 54ca99dbb406..62d1baf111ea 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2488,7 +2488,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) eth_hw_addr_random(dev); smsc911x_set_hw_mac_address(pdata, dev->dev_addr); SMSC_TRACE(pdata, probe, - "MAC Address is set to random_ether_addr"); + "MAC Address is set to eth_random_addr"); } } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 6685bbb5705a..ca381d3f9fa8 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -748,7 +748,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev) memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); pr_info("Detected MACID = %pM", priv->mac_addr); } else { - random_ether_addr(priv->mac_addr); + eth_random_addr(priv->mac_addr); pr_info("Random MACID = %pM", priv->mac_addr); } diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 83b4b388ad49..7f500288f6b3 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -1844,7 +1844,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac) memcpy(dev->dev_addr, mac, 6); dev->addr_len = 6; } else { - random_ether_addr(dev->dev_addr); + eth_random_addr(dev->dev_addr); } /* Register the network device. */ diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a75e9ef5a4ce..a5826a3111a6 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -637,7 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev) if (data && is_valid_ether_addr(data->mac_addr)) { memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); } else { - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); ndev->addr_assign_type |= NET_ADDR_RANDOM; } diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index 3306a20ec211..bdd8891c215a 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -557,7 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev) if (data && is_valid_ether_addr(data->mac_addr)) { memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); } else { - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); ndev->addr_assign_type |= NET_ADDR_RANDOM; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 9c365e192a31..0793299bd39e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -312,7 +312,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address) if (address) memcpy(ndev->dev_addr, address, ETH_ALEN); if (!is_valid_ether_addr(ndev->dev_addr)) - random_ether_addr(ndev->dev_addr); + eth_random_addr(ndev->dev_addr); /* Set up unicast MAC address filter set its mac address */ axienet_iow(lp, XAE_UAW0_OFFSET, -- cgit v1.2.3 From 77d5dfca41ef00dbe4368ba05297adc32d5e5741 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:32:19 +0000 Subject: ixgbevf: Drop all dead or unnecessary code There is a large amount of code present in this driver to support features that either do no exist or are not supported such ask packet split, DCA, or RSC. This patch strips out almost all of that code and in the case of conditionals based on unused flags I am flatting the code out to just the path that would have been selected. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 11 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 40 +---- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 199 ++-------------------- 3 files changed, 22 insertions(+), 228 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index e8dddf572d38..af9c6570f4b5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -43,7 +43,6 @@ #define IXGBE_ALL_RAR_ENTRIES 16 -#ifdef ETHTOOL_GSTATS struct ixgbe_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; @@ -75,21 +74,17 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { zero_base)}, {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base, zero_base)}, - {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)}, }; #define IXGBE_QUEUE_STATS_LEN 0 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) #define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) -#endif /* ETHTOOL_GSTATS */ -#ifdef ETHTOOL_TEST static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { "Register test (offline)", "Link test (on/offline)" }; #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) -#endif /* ETHTOOL_TEST */ static int ixgbevf_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) @@ -674,10 +669,8 @@ static int ixgbevf_nway_reset(struct net_device *netdev) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - if (netif_running(netdev)) { - if (!adapter->dev_closed) - ixgbevf_reinit_locked(adapter); - } + if (netif_running(netdev)) + ixgbevf_reinit_locked(adapter); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 0a1b99240d43..0cbce49672d0 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -52,9 +52,6 @@ struct ixgbevf_tx_buffer { struct ixgbevf_rx_buffer { struct sk_buff *skb; dma_addr_t dma; - struct page *page; - dma_addr_t page_dma; - unsigned int page_offset; }; struct ixgbevf_ring { @@ -83,11 +80,6 @@ struct ixgbevf_ring { * offset associated with this ring, which is different * for DCB and RSS modes */ -#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) - /* cpu for tx queue */ - int cpu; -#endif - u64 v_idx; /* maps directly to the index for this ring in the hardware * vector array, can also be used for finding the bit in EICR * and friends that represents the vector for this ring */ @@ -96,16 +88,6 @@ struct ixgbevf_ring { u16 rx_buf_len; }; -enum ixgbevf_ring_f_enum { - RING_F_NONE = 0, - RING_F_ARRAY_SIZE /* must be last in enum set */ -}; - -struct ixgbevf_ring_feature { - int indices; - int mask; -}; - /* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ @@ -120,8 +102,6 @@ struct ixgbevf_ring_feature { #define IXGBEVF_MIN_RXD 64 /* Supported Rx Buffer Sizes */ -#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ #define IXGBEVF_RXBUFFER_2048 2048 #define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ @@ -213,18 +193,13 @@ struct ixgbevf_adapter { /* RX */ struct ixgbevf_ring *rx_ring; /* One per active queue */ int num_rx_queues; - int num_rx_pools; /* == num_rx_queues in 82598 */ - int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; u64 hw_csum_rx_good; u64 non_eop_descs; int num_msix_vectors; - int max_msix_q_vectors; /* true count of q_vectors for device */ - struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE]; struct msix_entry *msix_entries; - u64 rx_hdr_split; u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; @@ -233,14 +208,8 @@ struct ixgbevf_adapter { */ u32 flags; #define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) -#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) -#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2) -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5) -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) -#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8) +#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 1) + /* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev; @@ -254,18 +223,15 @@ struct ixgbevf_adapter { u32 eitr_param; unsigned long state; - u32 *config_space; u64 tx_busy; unsigned int tx_ring_count; unsigned int rx_ring_count; u32 link_speed; bool link_up; - unsigned long link_check_timeout; struct work_struct watchdog_task; bool netdev_registered; - bool dev_closed; }; enum ixbgevf_state_t { @@ -302,10 +268,8 @@ extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); -#ifdef ETHTOOL_OPS_COMPAT extern int ethtool_ioctl(struct ifreq *ifr); -#endif extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 0368160286f9..31327711c89e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -177,12 +177,8 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, /* Tx Descriptors needed, worst case */ #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) -#ifdef MAX_SKB_FRAGS #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ -#else -#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) -#endif static void ixgbevf_tx_timeout(struct net_device *netdev); @@ -255,19 +251,11 @@ cont_loop: * sees the new next_to_clean. */ smp_mb(); -#ifdef HAVE_TX_MQ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && !test_bit(__IXGBEVF_DOWN, &adapter->state)) { netif_wake_subqueue(netdev, tx_ring->queue_index); ++adapter->restart_queue; } -#else - if (netif_queue_stopped(netdev) && - !test_bit(__IXGBEVF_DOWN, &adapter->state)) { - netif_wake_queue(netdev); - ++adapter->restart_queue; - } -#endif } /* re-arm the interrupt */ @@ -304,10 +292,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, if (is_vlan && test_bit(tag, adapter->active_vlans)) __vlan_hwaccel_put_tag(skb, tag); - if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) - napi_gro_receive(&q_vector->napi, skb); - else - netif_rx(skb); + napi_gro_receive(&q_vector->napi, skb); } /** @@ -365,27 +350,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, while (cleaned_count--) { rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); - - if (!bi->page_dma && - (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { - if (!bi->page) { - bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD); - if (!bi->page) { - adapter->alloc_rx_page_failed++; - goto no_buffers; - } - bi->page_offset = 0; - } else { - /* use a half page if we're re-using */ - bi->page_offset ^= (PAGE_SIZE / 2); - } - - bi->page_dma = dma_map_page(&pdev->dev, bi->page, - bi->page_offset, - (PAGE_SIZE / 2), - DMA_FROM_DEVICE); - } - skb = bi->skb; if (!skb) { skb = netdev_alloc_skb(adapter->netdev, @@ -410,14 +374,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, rx_ring->rx_buf_len, DMA_FROM_DEVICE); } - /* Refresh the desc even if buffer_addrs didn't change because - * each write-back erases this info. */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); - rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); - } else { - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); - } + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) @@ -445,16 +402,6 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); } -static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) -{ - return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; -} - -static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) -{ - return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; -} - static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, int *work_done, int work_to_do) @@ -466,7 +413,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb; unsigned int i; u32 len, staterr; - u16 hdr_info; bool cleaned = false; int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; @@ -477,24 +423,12 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, rx_buffer_info = &rx_ring->rx_buffer_info[i]; while (staterr & IXGBE_RXD_STAT_DD) { - u32 upper_len = 0; if (*work_done >= work_to_do) break; (*work_done)++; rmb(); /* read descriptor and rx_buffer_info after status DD */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); - len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> - IXGBE_RXDADV_HDRBUFLEN_SHIFT; - if (hdr_info & IXGBE_RXDADV_SPH) - adapter->rx_hdr_split++; - if (len > IXGBEVF_RX_HDR_SIZE) - len = IXGBEVF_RX_HDR_SIZE; - upper_len = le16_to_cpu(rx_desc->wb.upper.length); - } else { - len = le16_to_cpu(rx_desc->wb.upper.length); - } + len = le16_to_cpu(rx_desc->wb.upper.length); cleaned = true; skb = rx_buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); @@ -508,26 +442,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, skb_put(skb, len); } - if (upper_len) { - dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer_info->page, - rx_buffer_info->page_offset, - upper_len); - - if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || - (page_count(rx_buffer_info->page) != 1)) - rx_buffer_info->page = NULL; - else - get_page(rx_buffer_info->page); - - skb->len += upper_len; - skb->data_len += upper_len; - skb->truesize += upper_len; - } - i++; if (i == rx_ring->count) i = 0; @@ -539,15 +453,8 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, next_buffer = &rx_ring->rx_buffer_info[i]; if (!(staterr & IXGBE_RXD_STAT_EOP)) { - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - rx_buffer_info->skb = next_buffer->skb; - rx_buffer_info->dma = next_buffer->dma; - next_buffer->skb = skb; - next_buffer->dma = 0; - } else { - skb->next = next_buffer->skb; - skb->next->prev = skb; - } + skb->next = next_buffer->skb; + skb->next->prev = skb; adapter->non_eop_descs++; goto next_desc; } @@ -673,11 +580,6 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) r_idx + 1); } -#ifndef HAVE_NETDEV_NAPI_LIST - if (!netif_running(adapter->netdev)) - work_done = 0; - -#endif r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); rx_ring = &(adapter->rx_ring[r_idx]); @@ -1320,29 +1222,14 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) srrctl = IXGBE_SRRCTL_DROP_EN; - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - u16 bufsz = IXGBEVF_RXBUFFER_2048; - /* grow the amount we can receive on large page machines */ - if (bufsz < (PAGE_SIZE / 2)) - bufsz = (PAGE_SIZE / 2); - /* cap the bufsz at our largest descriptor size */ - bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz); - - srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; - srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; - srrctl |= ((IXGBEVF_RX_HDR_SIZE << - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & - IXGBE_SRRCTL_BSIZEHDR_MASK); - } else { - srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) - srrctl |= IXGBEVF_RXBUFFER_2048 >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else - srrctl |= rx_ring->rx_buf_len >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - } + if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) + srrctl |= IXGBEVF_RXBUFFER_2048 >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + else + srrctl |= rx_ring->rx_buf_len >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } @@ -1362,36 +1249,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) u32 rdlen; int rx_buf_len; - /* Decide whether to use packet split mode or not */ - if (netdev->mtu > ETH_DATA_LEN) { - if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE) - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; - else - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - } else { - if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE) - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - else - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; - } - - /* Set the RX buffer length according to the mode */ - if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { - /* PSRTYPE must be initialized in 82599 */ - u32 psrtype = IXGBE_PSRTYPE_TCPHDR | - IXGBE_PSRTYPE_UDPHDR | - IXGBE_PSRTYPE_IPV4HDR | - IXGBE_PSRTYPE_IPV6HDR | - IXGBE_PSRTYPE_L2HDR; - IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); - rx_buf_len = IXGBEVF_RX_HDR_SIZE; - } else { - IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); - if (netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else - rx_buf_len = ALIGN(max_frame, 1024); - } + /* PSRTYPE must be initialized in 82599 */ + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); + if (netdev->mtu <= ETH_DATA_LEN) + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else + rx_buf_len = ALIGN(max_frame, 1024); rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); /* Setup the HW Rx Head and Tail Descriptor Pointers and @@ -1667,10 +1530,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) ixgbevf_save_reset_stats(adapter); ixgbevf_init_last_counter_stats(adapter); - /* bring the link up in the watchdog, this could race with our first - * link up interrupt but shouldn't be a problem */ - adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; - adapter->link_check_timeout = jiffies; mod_timer(&adapter->watchdog_timer, jiffies); } @@ -1723,14 +1582,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, dev_kfree_skb(this); } while (skb); } - if (!rx_buffer_info->page) - continue; - dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, - PAGE_SIZE / 2, DMA_FROM_DEVICE); - rx_buffer_info->page_dma = 0; - put_page(rx_buffer_info->page); - rx_buffer_info->page = NULL; - rx_buffer_info->page_offset = 0; } size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; @@ -1958,8 +1809,6 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) /* Start with base case */ adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; - adapter->num_rx_pools = adapter->num_rx_queues; - adapter->num_rx_queues_per_pool = 1; } /** @@ -3220,9 +3069,7 @@ static void ixgbevf_shutdown(struct pci_dev *pdev) ixgbevf_free_all_rx_resources(adapter); } -#ifdef CONFIG_PM pci_save_state(pdev); -#endif pci_disable_device(pdev); } @@ -3350,12 +3197,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, pci_set_master(pdev); -#ifdef HAVE_TX_MQ netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), MAX_TX_QUEUES); -#else - netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter)); -#endif if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; @@ -3396,10 +3239,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, sizeof(struct ixgbe_mbx_operations)); - adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE; - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; - adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE; - /* setup the private structure */ err = ixgbevf_sw_init(adapter); if (err) @@ -3469,8 +3308,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, hw_dbg(hw, "MAC: %d\n", hw->mac.type); - hw_dbg(hw, "LRO is disabled\n"); - hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); cards_found++; return 0; -- cgit v1.2.3 From fd13a9abeb8d487e2a3cfd2166542c180d33acc9 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:32:24 +0000 Subject: ixgbevf: Drop netdev_registered value since that is already stored in netdev There is no need to keep a separate netdev_registered value since that is already stored in the netdev itself. Signed-off-by: Alexander Duyck Acked-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 1 - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 6 +----- 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 0cbce49672d0..ed19f7e378a2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -231,7 +231,6 @@ struct ixgbevf_adapter { bool link_up; struct work_struct watchdog_task; - bool netdev_registered; }; enum ixbgevf_state_t { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 31327711c89e..3cd865110a1a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3297,8 +3297,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, if (err) goto err_register; - adapter->netdev_registered = true; - netif_carrier_off(netdev); ixgbevf_init_last_counter_stats(adapter); @@ -3347,10 +3345,8 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); cancel_work_sync(&adapter->watchdog_task); - if (adapter->netdev_registered) { + if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - adapter->netdev_registered = false; - } ixgbevf_reset_interrupt_capability(adapter); -- cgit v1.2.3 From 525a940c37c706155dc6726582d6430c148d9f8b Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:32:29 +0000 Subject: ixgbevf: Make use of NETIF_F_RXCSUM instead of keeping our own flag The IXGBE_FLAG_RX_CSUM_ENABLED flag is redundant since NETIF_F_RXCSUM is keeping the value we want to already have. As such we can drop the redundant flag and just make use of NETIF_F_RXCSUM. Signed-off-by: Alexander Duyck Acked-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 3 +-- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 19 +------------------ 2 files changed, 2 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index ed19f7e378a2..773dafa13ca5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -207,8 +207,7 @@ struct ixgbevf_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) -#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 1) +#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1) /* OS defined structs */ struct net_device *netdev; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 3cd865110a1a..2e85d7966564 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -307,7 +307,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, skb_checksum_none_assert(skb); /* Rx csum disabled */ - if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) + if (!(adapter->netdev->features & NETIF_F_RXCSUM)) return; /* if IP and error */ @@ -2077,9 +2077,6 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; - /* enable rx csum by default */ - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - set_bit(__IXGBEVF_DOWN, &adapter->state); return 0; @@ -3112,19 +3109,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } -static int ixgbevf_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct ixgbevf_adapter *adapter = netdev_priv(netdev); - - if (features & NETIF_F_RXCSUM) - adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; - else - adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; - - return 0; -} - static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, @@ -3137,7 +3121,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_tx_timeout = ixgbevf_tx_timeout, .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, - .ndo_set_features = ixgbevf_set_features, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) -- cgit v1.2.3 From e2c28ce76001f01fefb255e0ce1fd6819a2ad1ea Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:32:34 +0000 Subject: ixgbevf: Drop use of eitr_low and eitr_high for hard coded values This patch drops the use of eitr_low and eitr_high as values being stored in the adapter structure. Since the values have no external way to be changed they might as well just be hard coded values and save us the space on the adapter structure. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 2 -- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 12 ++++-------- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 773dafa13ca5..c62caba7ec56 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -177,8 +177,6 @@ struct ixgbevf_adapter { /* Interrupt Throttle Rate */ u32 itr_setting; - u16 eitr_low; - u16 eitr_high; /* TX */ struct ixgbevf_ring *tx_ring; /* One per active queue */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 2e85d7966564..75af1920b0f7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -704,17 +704,17 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, switch (itr_setting) { case lowest_latency: - if (bytes_perint > adapter->eitr_low) + if (bytes_perint > 10) retval = low_latency; break; case low_latency: - if (bytes_perint > adapter->eitr_high) + if (bytes_perint > 20) retval = bulk_latency; - else if (bytes_perint <= adapter->eitr_low) + else if (bytes_perint <= 10) retval = lowest_latency; break; case bulk_latency: - if (bytes_perint <= adapter->eitr_high) + if (bytes_perint <= 20) retval = low_latency; break; } @@ -2069,10 +2069,6 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->eitr_param = 20000; adapter->itr_setting = 1; - /* set defaults for eitr in MegaBytes */ - adapter->eitr_low = 10; - adapter->eitr_high = 20; - /* set default ring sizes */ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; -- cgit v1.2.3 From 3595990a9ccc1b819bfe801940eb05f8a84f253e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:32:40 +0000 Subject: ixgbevf: Cleanup accounting for space needed at start of xmit_frame This change cleans up the accounting needed at the start of xmit_frame so that we can avoid doing too much work to determine how many descriptors we will need. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 46 ++++++++++++----------- 1 file changed, 24 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 75af1920b0f7..855bb21824fe 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -175,10 +175,8 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) /* Tx Descriptors needed, worst case */ -#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ - (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) -#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) static void ixgbevf_tx_timeout(struct net_device *netdev); @@ -2932,33 +2930,37 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_flags = 0; u8 hdr_len = 0; int r_idx = 0, tso; - int count = 0; - - unsigned int f; + u16 count = TXD_USE_COUNT(skb_headlen(skb)); +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + unsigned short f; +#endif tx_ring = &adapter->tx_ring[r_idx]; + /* + * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD, + * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, + * + 2 desc gap to keep tail from touching head, + * + 1 desc for context descriptor, + * otherwise try next time + */ +#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); +#else + count += skb_shinfo(skb)->nr_frags; +#endif + if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count + 3)) { + adapter->tx_busy++; + return NETDEV_TX_BUSY; + } + if (vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb); tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } - /* four things can cause us to need a context descriptor */ - if (skb_is_gso(skb) || - (skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IXGBE_TX_FLAGS_VLAN)) - count++; - - count += TXD_USE_COUNT(skb_headlen(skb)); - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f])); - - if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) { - adapter->tx_busy++; - return NETDEV_TX_BUSY; - } - first = tx_ring->next_to_use; if (skb->protocol == htons(ETH_P_IP)) -- cgit v1.2.3 From 6b43c44654f686c68f742baebb85ee9185d48687 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:32:45 +0000 Subject: ixgbevf: Update q_vector to contain ring pointers instead of bitmaps For most cases the ixgbevf driver will only ever contain a single Tx and single Rx queue. In order to track that it makes more sense to use a pointer instead of using a bitmap which must be search in order to locate the ring on an adapter index. As such I am changing the code to use pointers and an iterator to access all rings on a given q_vector. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 18 ++- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 152 ++++++++-------------- 2 files changed, 65 insertions(+), 105 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index c62caba7ec56..8cae4ff95315 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -55,6 +55,7 @@ struct ixgbevf_rx_buffer { }; struct ixgbevf_ring { + struct ixgbevf_ring *next; struct ixgbevf_adapter *adapter; /* backlink */ void *desc; /* descriptor ring memory */ dma_addr_t dma; /* phys. address of descriptor ring */ @@ -120,18 +121,23 @@ struct ixgbevf_ring { #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 +struct ixgbevf_ring_container { + struct ixgbevf_ring *ring; /* pointer to linked list of rings */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +/* iterator for handling rings in ring container */ +#define ixgbevf_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + /* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector. */ struct ixgbevf_q_vector { struct ixgbevf_adapter *adapter; struct napi_struct napi; - DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ - DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ - u8 rxr_count; /* Rx ring count assigned to this vector */ - u8 txr_count; /* Tx ring count assigned to this vector */ - u8 tx_itr; - u8 rx_itr; + struct ixgbevf_ring_container rx, tx; u32 eitr; int v_idx; /* vector index in list */ }; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 855bb21824fe..744a02697ef6 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -526,14 +526,9 @@ static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget) struct ixgbevf_q_vector *q_vector = container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *rx_ring = NULL; int work_done = 0; - long r_idx; - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rx_ring = &(adapter->rx_ring[r_idx]); - - ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); + ixgbevf_clean_rx_irq(q_vector, q_vector->rx.ring, &work_done, budget); /* If all Rx work done, exit the polling mode */ if (work_done < budget) { @@ -541,7 +536,8 @@ static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget) if (adapter->itr_setting & 1) ixgbevf_set_itr_msix(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx); + ixgbevf_irq_enable_queues(adapter, + 1 << q_vector->v_idx); } return work_done; @@ -560,26 +556,16 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) struct ixgbevf_q_vector *q_vector = container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *rx_ring = NULL; - int work_done = 0, i; - long r_idx; - u64 enable_mask = 0; + struct ixgbevf_ring *rx_ring; + int work_done = 0; /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ - budget /= (q_vector->rxr_count ?: 1); + budget /= (q_vector->rx.count ?: 1); budget = max(budget, 1); - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rxr_count; i++) { - rx_ring = &(adapter->rx_ring[r_idx]); - ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); - enable_mask |= rx_ring->v_idx; - r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); - } - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rx_ring = &(adapter->rx_ring[r_idx]); + ixgbevf_for_each_ring(rx_ring, q_vector->rx) + ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); /* If all Rx work done, exit the polling mode */ if (work_done < budget) { @@ -587,7 +573,8 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) if (adapter->itr_setting & 1) ixgbevf_set_itr_msix(q_vector); if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - ixgbevf_irq_enable_queues(adapter, enable_mask); + ixgbevf_irq_enable_queues(adapter, + 1 << q_vector->v_idx); } return work_done; @@ -605,7 +592,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) { struct ixgbevf_q_vector *q_vector; struct ixgbe_hw *hw = &adapter->hw; - int i, j, q_vectors, v_idx, r_idx; + int q_vectors, v_idx; u32 mask; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; @@ -615,33 +602,19 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) * corresponding register. */ for (v_idx = 0; v_idx < q_vectors; v_idx++) { + struct ixgbevf_ring *ring; q_vector = adapter->q_vector[v_idx]; - /* XXX for_each_set_bit(...) */ - r_idx = find_first_bit(q_vector->rxr_idx, - adapter->num_rx_queues); - - for (i = 0; i < q_vector->rxr_count; i++) { - j = adapter->rx_ring[r_idx].reg_idx; - ixgbevf_set_ivar(adapter, 0, j, v_idx); - r_idx = find_next_bit(q_vector->rxr_idx, - adapter->num_rx_queues, - r_idx + 1); - } - r_idx = find_first_bit(q_vector->txr_idx, - adapter->num_tx_queues); - - for (i = 0; i < q_vector->txr_count; i++) { - j = adapter->tx_ring[r_idx].reg_idx; - ixgbevf_set_ivar(adapter, 1, j, v_idx); - r_idx = find_next_bit(q_vector->txr_idx, - adapter->num_tx_queues, - r_idx + 1); - } + + ixgbevf_for_each_ring(ring, q_vector->rx) + ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); + + ixgbevf_for_each_ring(ring, q_vector->tx) + ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); /* if this is a tx only vector halve the interrupt rate */ - if (q_vector->txr_count && !q_vector->rxr_count) + if (q_vector->tx.ring && !q_vector->rx.ring) q_vector->eitr = (adapter->eitr_param >> 1); - else if (q_vector->rxr_count) + else if (q_vector->rx.ring) /* rx only */ q_vector->eitr = adapter->eitr_param; @@ -752,40 +725,32 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) struct ixgbevf_adapter *adapter = q_vector->adapter; u32 new_itr; u8 current_itr, ret_itr; - int i, r_idx, v_idx = q_vector->v_idx; + int v_idx = q_vector->v_idx; struct ixgbevf_ring *rx_ring, *tx_ring; - r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); - for (i = 0; i < q_vector->txr_count; i++) { - tx_ring = &(adapter->tx_ring[r_idx]); + ixgbevf_for_each_ring(tx_ring, q_vector->tx) { ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, - q_vector->tx_itr, + q_vector->tx.itr, tx_ring->total_packets, tx_ring->total_bytes); /* if the result for this queue would decrease interrupt * rate for this vector then use that result */ - q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? - q_vector->tx_itr - 1 : ret_itr); - r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); + q_vector->tx.itr = ((q_vector->tx.itr > ret_itr) ? + q_vector->tx.itr - 1 : ret_itr); } - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rxr_count; i++) { - rx_ring = &(adapter->rx_ring[r_idx]); + ixgbevf_for_each_ring(rx_ring, q_vector->rx) { ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, - q_vector->rx_itr, + q_vector->rx.itr, rx_ring->total_packets, rx_ring->total_bytes); /* if the result for this queue would decrease interrupt * rate for this vector then use that result */ - q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? - q_vector->rx_itr - 1 : ret_itr); - r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); + q_vector->rx.itr = ((q_vector->rx.itr > ret_itr) ? + q_vector->rx.itr - 1 : ret_itr); } - current_itr = max(q_vector->rx_itr, q_vector->tx_itr); + current_itr = max(q_vector->rx.itr, q_vector->tx.itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ @@ -861,19 +826,14 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) struct ixgbevf_q_vector *q_vector = data; struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_ring *tx_ring; - int i, r_idx; - if (!q_vector->txr_count) + if (!q_vector->tx.ring) return IRQ_HANDLED; - r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); - for (i = 0; i < q_vector->txr_count; i++) { - tx_ring = &(adapter->tx_ring[r_idx]); + ixgbevf_for_each_ring(tx_ring, q_vector->tx) { tx_ring->total_bytes = 0; tx_ring->total_packets = 0; ixgbevf_clean_tx_irq(adapter, tx_ring); - r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, - r_idx + 1); } if (adapter->itr_setting & 1) @@ -893,25 +853,17 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; struct ixgbevf_ring *rx_ring; - int r_idx; - int i; - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - for (i = 0; i < q_vector->rxr_count; i++) { - rx_ring = &(adapter->rx_ring[r_idx]); + ixgbevf_for_each_ring(rx_ring, q_vector->rx) { rx_ring->total_bytes = 0; rx_ring->total_packets = 0; - r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, - r_idx + 1); } - if (!q_vector->rxr_count) + if (!q_vector->rx.ring) return IRQ_HANDLED; - r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); - rx_ring = &(adapter->rx_ring[r_idx]); /* disable interrupts on this vector only */ - IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, 1 << q_vector->v_idx); napi_schedule(&q_vector->napi); @@ -931,8 +883,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, { struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; - set_bit(r_idx, q_vector->rxr_idx); - q_vector->rxr_count++; + a->rx_ring[r_idx].next = q_vector->rx.ring; + q_vector->rx.ring = &a->rx_ring[r_idx]; + q_vector->rx.count++; a->rx_ring[r_idx].v_idx = 1 << v_idx; } @@ -941,8 +894,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, { struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; - set_bit(t_idx, q_vector->txr_idx); - q_vector->txr_count++; + a->tx_ring[t_idx].next = q_vector->tx.ring; + q_vector->tx.ring = &a->tx_ring[t_idx]; + q_vector->tx.count++; a->tx_ring[t_idx].v_idx = 1 << v_idx; } @@ -1026,10 +980,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) /* Decrement for Other and TCP Timer vectors */ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; -#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ - ? &ixgbevf_msix_clean_many : \ - (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \ - (_v)->txr_count ? &ixgbevf_msix_clean_tx : \ +#define SET_HANDLER(_v) (((_v)->rx.ring && (_v)->tx.ring) \ + ? &ixgbevf_msix_clean_many : \ + (_v)->rx.ring ? &ixgbevf_msix_clean_rx : \ + (_v)->tx.ring ? &ixgbevf_msix_clean_tx : \ NULL) for (vector = 0; vector < q_vectors; vector++) { handler = SET_HANDLER(adapter->q_vector[vector]); @@ -1085,10 +1039,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) for (i = 0; i < q_vectors; i++) { struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; - bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); - bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); - q_vector->rxr_count = 0; - q_vector->txr_count = 0; + q_vector->rx.ring = NULL; + q_vector->tx.ring = NULL; + q_vector->rx.count = 0; + q_vector->tx.count = 0; q_vector->eitr = adapter->eitr_param; } } @@ -1365,10 +1319,10 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { struct napi_struct *napi; q_vector = adapter->q_vector[q_idx]; - if (!q_vector->rxr_count) + if (!q_vector->rx.ring) continue; napi = &q_vector->napi; - if (q_vector->rxr_count > 1) + if (q_vector->rx.count > 1) napi->poll = &ixgbevf_clean_rxonly_many; napi_enable(napi); @@ -1383,7 +1337,7 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; - if (!q_vector->rxr_count) + if (!q_vector->rx.ring) continue; napi_disable(&q_vector->napi); } @@ -2144,7 +2098,7 @@ static void ixgbevf_watchdog(unsigned long data) /* get one bit for every active tx/rx interrupt vector */ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; - if (qv->rxr_count || qv->txr_count) + if (qv->rx.ring || qv->tx.ring) eics |= (1 << i); } -- cgit v1.2.3 From fa71ae270a9af0ee3a1bd605d008f750371cfc1f Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:32:50 +0000 Subject: ixgbevf: Move Tx clean-up into NAPI context Currently the VF driver is processing all of the transmits in interrupt context. This can be messy since the Rx is all handled in NAPI and this may result in interrupts being disabled. In order to resolve this move all of the Tx packet processing into NAPI and combine all of the interrupt and polling routines into just a pair of functions. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 2 - drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 12 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 257 +++++++--------------- 3 files changed, 89 insertions(+), 182 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index af9c6570f4b5..15947c9122e1 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -364,7 +364,6 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, } goto err_tx_ring_setup; } - tx_ring[i].v_idx = adapter->tx_ring[i].v_idx; } memcpy(rx_ring, adapter->rx_ring, @@ -380,7 +379,6 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, } goto err_rx_ring_setup; } - rx_ring[i].v_idx = adapter->rx_ring[i].v_idx; } /* diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 8cae4ff95315..8bedd0fef0b7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -81,11 +81,6 @@ struct ixgbevf_ring { * offset associated with this ring, which is different * for DCB and RSS modes */ - u64 v_idx; /* maps directly to the index for this ring in the hardware - * vector array, can also be used for finding the bit in EICR - * and friends that represents the vector for this ring */ - - u16 work_limit; /* max work per interrupt */ u16 rx_buf_len; }; @@ -140,6 +135,7 @@ struct ixgbevf_q_vector { struct ixgbevf_ring_container rx, tx; u32 eitr; int v_idx; /* vector index in list */ + char name[IFNAMSIZ + 9]; }; /* Helper macros to switch between ints/sec and what the register uses. @@ -167,9 +163,8 @@ struct ixgbevf_q_vector { #define NON_Q_VECTORS (OTHER_VECTOR) #define MAX_MSIX_Q_VECTORS 2 -#define MAX_MSIX_COUNT 2 -#define MIN_MSIX_Q_VECTORS 2 +#define MIN_MSIX_Q_VECTORS 1 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) /* board specific private data structure */ @@ -179,7 +174,6 @@ struct ixgbevf_adapter { u16 bd_number; struct work_struct reset_task; struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; - char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; /* Interrupt Throttle Rate */ u32 itr_setting; @@ -187,6 +181,7 @@ struct ixgbevf_adapter { /* TX */ struct ixgbevf_ring *tx_ring; /* One per active queue */ int num_tx_queues; + u16 tx_itr_setting; u64 restart_queue; u64 hw_csum_tx_good; u64 lsc_int; @@ -197,6 +192,7 @@ struct ixgbevf_adapter { /* RX */ struct ixgbevf_ring *rx_ring; /* One per active queue */ int num_rx_queues; + u16 rx_itr_setting; u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; u64 hw_csum_rx_good; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 744a02697ef6..628643b7f286 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -97,7 +97,7 @@ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); /* forward decls */ -static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector); +static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, u32 itr_reg); @@ -182,14 +182,14 @@ static void ixgbevf_tx_timeout(struct net_device *netdev); /** * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes - * @adapter: board private structure + * @q_vector: board private structure * @tx_ring: tx ring to clean **/ -static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, +static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *tx_ring) { + struct ixgbevf_adapter *adapter = q_vector->adapter; struct net_device *netdev = adapter->netdev; - struct ixgbe_hw *hw = &adapter->hw; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int i, eop, count = 0; @@ -200,7 +200,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && - (count < tx_ring->work_limit)) { + (count < tx_ring->count)) { bool cleaned = false; rmb(); /* read buffer_info after eop_desc */ /* eop could change between read and DD-check */ @@ -256,18 +256,12 @@ cont_loop: } } - /* re-arm the interrupt */ - if ((count >= tx_ring->work_limit) && - (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { - IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx); - } - u64_stats_update_begin(&tx_ring->syncp); tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; u64_stats_update_end(&tx_ring->syncp); - return count < tx_ring->work_limit; + return count < tx_ring->count; } /** @@ -402,7 +396,7 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *rx_ring, - int *work_done, int work_to_do) + int budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct pci_dev *pdev = adapter->pdev; @@ -411,7 +405,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb; unsigned int i; u32 len, staterr; - bool cleaned = false; int cleaned_count = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0; @@ -421,13 +414,12 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, rx_buffer_info = &rx_ring->rx_buffer_info[i]; while (staterr & IXGBE_RXD_STAT_DD) { - if (*work_done >= work_to_do) + if (!budget) break; - (*work_done)++; + budget--; rmb(); /* read descriptor and rx_buffer_info after status DD */ len = le16_to_cpu(rx_desc->wb.upper.length); - cleaned = true; skb = rx_buffer_info->skb; prefetch(skb->data - NET_IP_ALIGN); rx_buffer_info->skb = NULL; @@ -510,74 +502,52 @@ next_desc: rx_ring->total_bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); - return cleaned; -} - -/** - * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine - * @napi: napi struct with our devices info in it - * @budget: amount of work driver is allowed to do this pass, in packets - * - * This function is optimized for cleaning one queue only on a single - * q_vector!!! - **/ -static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget) -{ - struct ixgbevf_q_vector *q_vector = - container_of(napi, struct ixgbevf_q_vector, napi); - struct ixgbevf_adapter *adapter = q_vector->adapter; - int work_done = 0; - - ixgbevf_clean_rx_irq(q_vector, q_vector->rx.ring, &work_done, budget); - - /* If all Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (adapter->itr_setting & 1) - ixgbevf_set_itr_msix(q_vector); - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - ixgbevf_irq_enable_queues(adapter, - 1 << q_vector->v_idx); - } - - return work_done; + return !!budget; } /** - * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine + * ixgbevf_poll - NAPI polling calback * @napi: napi struct with our devices info in it * @budget: amount of work driver is allowed to do this pass, in packets * - * This function will clean more than one rx queue associated with a + * This function will clean more than one or more rings associated with a * q_vector. **/ -static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) +static int ixgbevf_poll(struct napi_struct *napi, int budget) { struct ixgbevf_q_vector *q_vector = container_of(napi, struct ixgbevf_q_vector, napi); struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *rx_ring; - int work_done = 0; + struct ixgbevf_ring *ring; + int per_ring_budget; + bool clean_complete = true; + + ixgbevf_for_each_ring(ring, q_vector->tx) + clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); /* attempt to distribute budget to each queue fairly, but don't allow * the budget to go below 1 because we'll exit polling */ - budget /= (q_vector->rx.count ?: 1); - budget = max(budget, 1); - - ixgbevf_for_each_ring(rx_ring, q_vector->rx) - ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); - - /* If all Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete(napi); - if (adapter->itr_setting & 1) - ixgbevf_set_itr_msix(q_vector); - if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) - ixgbevf_irq_enable_queues(adapter, - 1 << q_vector->v_idx); - } + if (q_vector->rx.count > 1) + per_ring_budget = max(budget/q_vector->rx.count, 1); + else + per_ring_budget = budget; + + ixgbevf_for_each_ring(ring, q_vector->rx) + clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring, + per_ring_budget); + + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + /* all work done, exit the polling mode */ + napi_complete(napi); + if (adapter->rx_itr_setting & 1) + ixgbevf_set_itr(q_vector); + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + ixgbevf_irq_enable_queues(adapter, + 1 << q_vector->v_idx); - return work_done; + return 0; } @@ -720,7 +690,7 @@ static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); } -static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) +static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) { struct ixgbevf_adapter *adapter = q_vector->adapter; u32 new_itr; @@ -780,8 +750,7 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) { - struct net_device *netdev = data; - struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; u32 eicr; u32 msg; @@ -821,59 +790,22 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) -{ - struct ixgbevf_q_vector *q_vector = data; - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbevf_ring *tx_ring; - - if (!q_vector->tx.ring) - return IRQ_HANDLED; - - ixgbevf_for_each_ring(tx_ring, q_vector->tx) { - tx_ring->total_bytes = 0; - tx_ring->total_packets = 0; - ixgbevf_clean_tx_irq(adapter, tx_ring); - } - - if (adapter->itr_setting & 1) - ixgbevf_set_itr_msix(q_vector); - - return IRQ_HANDLED; -} /** - * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) + * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) * @irq: unused * @data: pointer to our q_vector struct for this interrupt vector **/ -static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) +static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) { struct ixgbevf_q_vector *q_vector = data; struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; - struct ixgbevf_ring *rx_ring; - - ixgbevf_for_each_ring(rx_ring, q_vector->rx) { - rx_ring->total_bytes = 0; - rx_ring->total_packets = 0; - } - - if (!q_vector->rx.ring) - return IRQ_HANDLED; /* disable interrupts on this vector only */ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, 1 << q_vector->v_idx); - napi_schedule(&q_vector->napi); - - - return IRQ_HANDLED; -} - -static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data) -{ - ixgbevf_msix_clean_rx(irq, data); - ixgbevf_msix_clean_tx(irq, data); + if (q_vector->rx.ring || q_vector->tx.ring) + napi_schedule(&q_vector->napi); return IRQ_HANDLED; } @@ -886,7 +818,6 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, a->rx_ring[r_idx].next = q_vector->rx.ring; q_vector->rx.ring = &a->rx_ring[r_idx]; q_vector->rx.count++; - a->rx_ring[r_idx].v_idx = 1 << v_idx; } static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, @@ -897,7 +828,6 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, a->tx_ring[t_idx].next = q_vector->tx.ring; q_vector->tx.ring = &a->tx_ring[t_idx]; q_vector->tx.count++; - a->tx_ring[t_idx].v_idx = 1 << v_idx; } /** @@ -973,37 +903,30 @@ out: static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; - irqreturn_t (*handler)(int, void *); - int i, vector, q_vectors, err; + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + int vector, err; int ri = 0, ti = 0; - /* Decrement for Other and TCP Timer vectors */ - q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - -#define SET_HANDLER(_v) (((_v)->rx.ring && (_v)->tx.ring) \ - ? &ixgbevf_msix_clean_many : \ - (_v)->rx.ring ? &ixgbevf_msix_clean_rx : \ - (_v)->tx.ring ? &ixgbevf_msix_clean_tx : \ - NULL) for (vector = 0; vector < q_vectors; vector++) { - handler = SET_HANDLER(adapter->q_vector[vector]); - - if (handler == &ixgbevf_msix_clean_rx) { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "rx", ri++); - } else if (handler == &ixgbevf_msix_clean_tx) { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "tx", ti++); - } else if (handler == &ixgbevf_msix_clean_many) { - sprintf(adapter->name[vector], "%s-%s-%d", - netdev->name, "TxRx", vector); + struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "TxRx", ri++); + ti++; + } else if (q_vector->rx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "rx", ri++); + } else if (q_vector->tx.ring) { + snprintf(q_vector->name, sizeof(q_vector->name) - 1, + "%s-%s-%d", netdev->name, "tx", ti++); } else { /* skip this unused q_vector */ continue; } - err = request_irq(adapter->msix_entries[vector].vector, - handler, 0, adapter->name[vector], - adapter->q_vector[vector]); + err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, + q_vector->name, q_vector); if (err) { hw_dbg(&adapter->hw, "request_irq failed for MSIX interrupt " @@ -1012,9 +935,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) } } - sprintf(adapter->name[vector], "%s:mbx", netdev->name); err = request_irq(adapter->msix_entries[vector].vector, - &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev); + &ixgbevf_msix_mbx, 0, netdev->name, adapter); if (err) { hw_dbg(&adapter->hw, "request_irq for msix_mbx failed: %d\n", err); @@ -1024,9 +946,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) return 0; free_queue_irqs: - for (i = vector - 1; i >= 0; i--) - free_irq(adapter->msix_entries[--vector].vector, - &(adapter->q_vector[i])); + while (vector) { + vector--; + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } pci_disable_msix(adapter->pdev); kfree(adapter->msix_entries); adapter->msix_entries = NULL; @@ -1069,17 +993,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) { - struct net_device *netdev = adapter->netdev; int i, q_vectors; q_vectors = adapter->num_msix_vectors; - i = q_vectors - 1; - free_irq(adapter->msix_entries[i].vector, netdev); + free_irq(adapter->msix_entries[i].vector, adapter); i--; for (; i >= 0; i--) { + /* free only the irqs that were actually requested */ + if (!adapter->q_vector[i]->rx.ring && + !adapter->q_vector[i]->tx.ring) + continue; + free_irq(adapter->msix_entries[i].vector, adapter->q_vector[i]); } @@ -1317,15 +1244,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (q_idx = 0; q_idx < q_vectors; q_idx++) { - struct napi_struct *napi; q_vector = adapter->q_vector[q_idx]; - if (!q_vector->rx.ring) - continue; - napi = &q_vector->napi; - if (q_vector->rx.count > 1) - napi->poll = &ixgbevf_clean_rxonly_many; - - napi_enable(napi); + napi_enable(&q_vector->napi); } } @@ -1337,8 +1257,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) for (q_idx = 0; q_idx < q_vectors; q_idx++) { q_vector = adapter->q_vector[q_idx]; - if (!q_vector->rx.ring) - continue; napi_disable(&q_vector->napi); } } @@ -1703,10 +1621,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, { int err, vector_threshold; - /* We'll want at least 3 (vector_threshold): - * 1) TxQ[0] Cleanup - * 2) RxQ[0] Cleanup - * 3) Other (Link Status Change, etc.) + /* We'll want at least 2 (vector_threshold): + * 1) TxQ[0] + RxQ[0] handler + * 2) Other (Link Status Change, etc.) */ vector_threshold = MIN_MSIX_COUNT; @@ -1821,10 +1738,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) * It's easy to be greedy for MSI-X vectors, but it really * doesn't do us much good if we have a lot more vectors * than CPU's. So let's be conservative and only ask for - * (roughly) twice the number of vectors as there are CPU's. + * (roughly) the same number of vectors as there are CPU's. + * The default is to use pairs of vectors. */ - v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, - (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; + v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); + v_budget = min_t(int, v_budget, num_online_cpus()); + v_budget += NON_Q_VECTORS; /* A failure in MSI-X entry allocation isn't fatal, but it does * mean we disable MSI-X capabilities of the adapter. */ @@ -1855,12 +1774,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) { int q_idx, num_q_vectors; struct ixgbevf_q_vector *q_vector; - int napi_vectors; - int (*poll)(struct napi_struct *, int); num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; - napi_vectors = adapter->num_rx_queues; - poll = &ixgbevf_clean_rxonly; for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); @@ -1869,9 +1784,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) q_vector->adapter = adapter; q_vector->v_idx = q_idx; q_vector->eitr = adapter->eitr_param; - if (q_idx < napi_vectors) - netif_napi_add(adapter->netdev, &q_vector->napi, - (*poll), 64); + netif_napi_add(adapter->netdev, &q_vector->napi, + ixgbevf_poll, 64); adapter->q_vector[q_idx] = q_vector; } @@ -2272,7 +2186,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; - tx_ring->work_limit = tx_ring->count; return 0; err: -- cgit v1.2.3 From 5f3600ebe252aa5fe782e9f9115c66c639f62ac0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:32:55 +0000 Subject: ixgbevf: Use igb style interrupt masks instead of ixgbe style The interrupt registers accessed in ixgbevf are more similar to the igb style registers than they are to the ixgbe style registers. As such we would be better off setting up the code for the EICS, EIMS, EICS, EIAM, and EIAC like we do in igb instead of ixgbe. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/defines.h | 27 +-- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 29 +++- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 195 +++++++++------------- 3 files changed, 105 insertions(+), 146 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index e09a6cc633bb..10cede503fe8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -264,32 +264,9 @@ struct ixgbe_adv_tx_context_desc { /* Interrupt register bitmasks */ -/* Extended Interrupt Cause Read */ -#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ -#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ -#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ - -/* Extended Interrupt Cause Set */ -#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -/* Extended Interrupt Mask Set */ -#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -/* Extended Interrupt Mask Clear */ -#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ -#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ -#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ - -#define IXGBE_EIMS_ENABLE_MASK ( \ - IXGBE_EIMS_RTX_QUEUE | \ - IXGBE_EIMS_MAILBOX | \ - IXGBE_EIMS_OTHER) - #define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 /* Error Codes */ #define IXGBE_ERR_INVALID_MAC_ADDR -1 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 8bedd0fef0b7..f92daca249f8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -118,6 +118,8 @@ struct ixgbevf_ring { struct ixgbevf_ring_container { struct ixgbevf_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ u8 count; /* total number of rings in vector */ u8 itr; /* current ITR setting for ring */ }; @@ -131,13 +133,25 @@ struct ixgbevf_ring_container { */ struct ixgbevf_q_vector { struct ixgbevf_adapter *adapter; + u16 v_idx; /* index of q_vector within array, also used for + * finding the bit in EICR and friends that + * represents the vector for this ring */ + u16 itr; /* Interrupt throttle rate written to EITR */ struct napi_struct napi; struct ixgbevf_ring_container rx, tx; - u32 eitr; - int v_idx; /* vector index in list */ char name[IFNAMSIZ + 9]; }; +/* + * microsecond values for various ITR rates shifted by 2 to fit itr register + * with the first 3 bits reserved 0 + */ +#define IXGBE_MIN_RSC_ITR 24 +#define IXGBE_100K_ITR 40 +#define IXGBE_20K_ITR 200 +#define IXGBE_10K_ITR 400 +#define IXGBE_8K_ITR 500 + /* Helper macros to switch between ints/sec and what the register uses. * And yes, it's the same math going both ways. The lowest value * supported by all of the ixgbe hardware is 8. @@ -176,12 +190,16 @@ struct ixgbevf_adapter { struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; /* Interrupt Throttle Rate */ - u32 itr_setting; + u16 rx_itr_setting; + u16 tx_itr_setting; + + /* interrupt masks */ + u32 eims_enable_mask; + u32 eims_other; /* TX */ struct ixgbevf_ring *tx_ring; /* One per active queue */ int num_tx_queues; - u16 tx_itr_setting; u64 restart_queue; u64 hw_csum_tx_good; u64 lsc_int; @@ -192,7 +210,6 @@ struct ixgbevf_adapter { /* RX */ struct ixgbevf_ring *rx_ring; /* One per active queue */ int num_rx_queues; - u16 rx_itr_setting; u64 hw_csum_rx_error; u64 hw_rx_no_dma_resources; u64 hw_csum_rx_good; @@ -265,7 +282,7 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); - +void ixgbevf_write_eitr(struct ixgbevf_q_vector *); extern int ethtool_ioctl(struct ifreq *ifr); extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 628643b7f286..8e022c6f4b90 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -98,8 +98,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); /* forward decls */ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); -static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, - u32 itr_reg); static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, struct ixgbevf_ring *rx_ring, @@ -385,13 +383,11 @@ no_buffers: } static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, - u64 qmask) + u32 qmask) { - u32 mask; struct ixgbe_hw *hw = &adapter->hw; - mask = (qmask & 0xFFFFFFFF); - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask); } static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, @@ -561,11 +557,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) { struct ixgbevf_q_vector *q_vector; - struct ixgbe_hw *hw = &adapter->hw; int q_vectors, v_idx; - u32 mask; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; + adapter->eims_enable_mask = 0; /* * Populate the IVAR table and set the ITR values to the @@ -581,22 +576,30 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ixgbevf_for_each_ring(ring, q_vector->tx) ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); - /* if this is a tx only vector halve the interrupt rate */ - if (q_vector->tx.ring && !q_vector->rx.ring) - q_vector->eitr = (adapter->eitr_param >> 1); - else if (q_vector->rx.ring) - /* rx only */ - q_vector->eitr = adapter->eitr_param; + if (q_vector->tx.ring && !q_vector->rx.ring) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) + q_vector->itr = IXGBE_10K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { + /* rx or rx/tx vector */ + if (adapter->rx_itr_setting == 1) + q_vector->itr = IXGBE_20K_ITR; + else + q_vector->itr = adapter->rx_itr_setting; + } + + /* add q_vector eims value to global eims_enable_mask */ + adapter->eims_enable_mask |= 1 << v_idx; - ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr); + ixgbevf_write_eitr(q_vector); } ixgbevf_set_ivar(adapter, -1, 1, v_idx); - - /* set up to autoclear timer, and the vectors */ - mask = IXGBE_EIMS_ENABLE_MASK; - mask &= ~IXGBE_EIMS_OTHER; - IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); + /* setup eims_other and add value to global eims_enable_mask */ + adapter->eims_other = 1 << v_idx; + adapter->eims_enable_mask |= adapter->eims_other; } enum latency_range { @@ -608,11 +611,8 @@ enum latency_range { /** * ixgbevf_update_itr - update the dynamic ITR value based on statistics - * @adapter: pointer to adapter - * @eitr: eitr setting (ints per sec) to give last timeslice - * @itr_setting: current throttle rate in ints/second - * @packets: the number of packets during this measurement interval - * @bytes: the number of bytes during this measurement interval + * @q_vector: structure containing interrupt and ring information + * @ring_container: structure containing ring performance data * * Stores a new ITR value based on packets and byte * counts during the last interrupt. The advantage of per interrupt @@ -622,17 +622,17 @@ enum latency_range { * on testing data as well as attempting to minimize response time * while increasing bulk throughput. **/ -static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, - u32 eitr, u8 itr_setting, - int packets, int bytes) +static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, + struct ixgbevf_ring_container *ring_container) { - unsigned int retval = itr_setting; + int bytes = ring_container->total_bytes; + int packets = ring_container->total_packets; u32 timepassed_us; u64 bytes_perint; + u8 itr_setting = ring_container->itr; if (packets == 0) - goto update_itr_done; - + return; /* simple throttlerate management * 0-20MB/s lowest (100000 ints/s) @@ -640,46 +640,48 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, * 100-1249MB/s bulk (8000 ints/s) */ /* what was last interrupt timeslice? */ - timepassed_us = 1000000/eitr; + timepassed_us = q_vector->itr >> 2; bytes_perint = bytes / timepassed_us; /* bytes/usec */ switch (itr_setting) { case lowest_latency: if (bytes_perint > 10) - retval = low_latency; + itr_setting = low_latency; break; case low_latency: if (bytes_perint > 20) - retval = bulk_latency; + itr_setting = bulk_latency; else if (bytes_perint <= 10) - retval = lowest_latency; + itr_setting = lowest_latency; break; case bulk_latency: if (bytes_perint <= 20) - retval = low_latency; + itr_setting = low_latency; break; } -update_itr_done: - return retval; + /* clear work counters since we have the values we need */ + ring_container->total_bytes = 0; + ring_container->total_packets = 0; + + /* write updated itr to ring container */ + ring_container->itr = itr_setting; } /** * ixgbevf_write_eitr - write VTEITR register in hardware specific way - * @adapter: pointer to adapter struct - * @v_idx: vector index into q_vector array - * @itr_reg: new value to be written in *register* format, not ints/s + * @q_vector: structure containing interrupt and ring information * * This function is made to be called by ethtool and by the driver * when it needs to update VTEITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ -static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, - u32 itr_reg) +void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) { + struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbe_hw *hw = &adapter->hw; - - itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg); + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; /* * set the WDIS bit to not clear the timer bits and cause an @@ -692,59 +694,37 @@ static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) { - struct ixgbevf_adapter *adapter = q_vector->adapter; - u32 new_itr; - u8 current_itr, ret_itr; - int v_idx = q_vector->v_idx; - struct ixgbevf_ring *rx_ring, *tx_ring; - - ixgbevf_for_each_ring(tx_ring, q_vector->tx) { - ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, - q_vector->tx.itr, - tx_ring->total_packets, - tx_ring->total_bytes); - /* if the result for this queue would decrease interrupt - * rate for this vector then use that result */ - q_vector->tx.itr = ((q_vector->tx.itr > ret_itr) ? - q_vector->tx.itr - 1 : ret_itr); - } - - ixgbevf_for_each_ring(rx_ring, q_vector->rx) { - ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, - q_vector->rx.itr, - rx_ring->total_packets, - rx_ring->total_bytes); - /* if the result for this queue would decrease interrupt - * rate for this vector then use that result */ - q_vector->rx.itr = ((q_vector->rx.itr > ret_itr) ? - q_vector->rx.itr - 1 : ret_itr); - } + u32 new_itr = q_vector->itr; + u8 current_itr; + + ixgbevf_update_itr(q_vector, &q_vector->tx); + ixgbevf_update_itr(q_vector, &q_vector->rx); current_itr = max(q_vector->rx.itr, q_vector->tx.itr); switch (current_itr) { /* counts and packets in update_itr are dependent on these numbers */ case lowest_latency: - new_itr = 100000; + new_itr = IXGBE_100K_ITR; break; case low_latency: - new_itr = 20000; /* aka hwitr = ~200 */ + new_itr = IXGBE_20K_ITR; break; case bulk_latency: default: - new_itr = 8000; + new_itr = IXGBE_8K_ITR; break; } - if (new_itr != q_vector->eitr) { - u32 itr_reg; - - /* save the algorithm value here, not the smoothed one */ - q_vector->eitr = new_itr; + if (new_itr != q_vector->itr) { /* do an exponential smoothing */ - new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); - itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); - ixgbevf_write_eitr(adapter, v_idx, itr_reg); + new_itr = (10 * new_itr * q_vector->itr) / + ((9 * new_itr) + q_vector->itr); + + /* save the algorithm value here */ + q_vector->itr = new_itr; + + ixgbevf_write_eitr(q_vector); } } @@ -752,13 +732,9 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) { struct ixgbevf_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; - u32 eicr; u32 msg; bool got_ack = false; - eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); - IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); - if (!hw->mbx.ops.check_for_ack(hw)) got_ack = true; @@ -787,6 +763,8 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) if (got_ack) hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); + return IRQ_HANDLED; } @@ -799,11 +777,8 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data) { struct ixgbevf_q_vector *q_vector = data; - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbe_hw *hw = &adapter->hw; - /* disable interrupts on this vector only */ - IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, 1 << q_vector->v_idx); + /* EIAM disabled interrupts (on this vector) for us */ if (q_vector->rx.ring || q_vector->tx.ring) napi_schedule(&q_vector->napi); @@ -967,7 +942,6 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) q_vector->tx.ring = NULL; q_vector->rx.count = 0; q_vector->tx.count = 0; - q_vector->eitr = adapter->eitr_param; } } @@ -1020,10 +994,12 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) **/ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) { - int i; struct ixgbe_hw *hw = &adapter->hw; + int i; + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0); IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0); IXGBE_WRITE_FLUSH(hw); @@ -1035,23 +1011,13 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) * ixgbevf_irq_enable - Enable default interrupt generation settings * @adapter: board private structure **/ -static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter, - bool queues, bool flush) +static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 mask; - u64 qmask; - - mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); - qmask = ~0; - - IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); - - if (queues) - ixgbevf_irq_enable_queues(adapter, qmask); - if (flush) - IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); } /** @@ -1414,7 +1380,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) /* clear any pending interrupts, may auto mask */ IXGBE_READ_REG(hw, IXGBE_VTEICR); - ixgbevf_irq_enable(adapter, true, true); + ixgbevf_irq_enable(adapter); } /** @@ -1783,7 +1749,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) goto err_out; q_vector->adapter = adapter; q_vector->v_idx = q_idx; - q_vector->eitr = adapter->eitr_param; netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64); adapter->q_vector[q_idx] = q_vector; @@ -1932,8 +1897,8 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) } /* Enable dynamic interrupt throttling rates */ - adapter->eitr_param = 20000; - adapter->itr_setting = 1; + adapter->rx_itr_setting = 1; + adapter->tx_itr_setting = 1; /* set default ring sizes */ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; @@ -1998,7 +1963,7 @@ static void ixgbevf_watchdog(unsigned long data) { struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; struct ixgbe_hw *hw = &adapter->hw; - u64 eics = 0; + u32 eics = 0; int i; /* @@ -2013,10 +1978,10 @@ static void ixgbevf_watchdog(unsigned long data) for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= (1 << i); + eics |= 1 << i; } - IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics); + IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics); watchdog_short_circuit: schedule_work(&adapter->watchdog_task); @@ -2389,7 +2354,7 @@ static int ixgbevf_open(struct net_device *netdev) if (err) goto err_req_irq; - ixgbevf_irq_enable(adapter, true, true); + ixgbevf_irq_enable(adapter); return 0; -- cgit v1.2.3 From befa2af778b5cf5737eea3e3ff370c4f46d3f131 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:30:38 +0000 Subject: ixgbe: Ping the VFs on link status change to trigger link change When the link status changes on the PF we need to notify the VFs. In order to do this we should ping all of the VFs in order to trigger a link status change on them as well. This fixes issues in which the PF would reset, but the VF didn't because the NAK flag was not set in the VF mailbox. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ee230f533ee3..17f46f0079a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5390,6 +5390,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) netif_carrier_on(netdev); ixgbe_check_vf_rate_limit(adapter); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); } /** @@ -5419,6 +5422,9 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) e_info(drv, "NIC Link is Down\n"); netif_carrier_off(netdev); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); } /** -- cgit v1.2.3 From de3d5b94bc891c405b8d91d3c112681a0654613f Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 18 May 2012 06:33:47 +0000 Subject: ixgbe: Handle failures in the ixgbe_setup_rx/tx_resources calls Previously we were exiting without cleaning up the memory internally on the ixgbe_setup_rx_resources and ixgbe_setup_tx_resources calls. Instead of forcing the caller to clean things up for us we should instead just unwind the rings and free the memory as we go. This way we can more gracefully clean up the rings in the event of an allocation failure. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 17f46f0079a5..373342f0b807 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4549,10 +4549,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); if (!err) continue; + e_err(probe, "Allocation for Tx Queue %u failed\n", i); - break; + goto err_setup_tx; } + return 0; +err_setup_tx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbe_free_tx_resources(adapter->tx_ring[i]); return err; } @@ -4627,10 +4633,16 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); if (!err) continue; + e_err(probe, "Allocation for Rx Queue %u failed\n", i); - break; + goto err_setup_rx; } + return 0; +err_setup_rx: + /* rewind the index freeing the rings as we go */ + while (i--) + ixgbe_free_rx_resources(adapter->rx_ring[i]); return err; } @@ -4791,10 +4803,10 @@ static int ixgbe_open(struct net_device *netdev) return 0; err_req_irq: -err_setup_rx: ixgbe_free_all_rx_resources(adapter); -err_setup_tx: +err_setup_rx: ixgbe_free_all_tx_resources(adapter); +err_setup_tx: ixgbe_reset(adapter); return err; -- cgit v1.2.3 From ac802f5dfe56139a288df50c89c820412863cd8a Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 12 Jul 2012 05:52:53 +0000 Subject: ixgbe: Move configuration of set_real_num_rx/tx_queues into open It makes much more sense for us to configure the real number of Tx and Rx queues in the ixgbe_open call than it does in ixgbe_set_num_queues. By setting the number in ixgbe_open we can avoid a number of unecessary updates and only have to make the calls once. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 58 +++++++-------------------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 36 +++++++++++------ 2 files changed, 38 insertions(+), 56 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index d308e7140171..c03d771c5eb9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -349,7 +349,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) * fallthrough conditions. * **/ -static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) +static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) { /* Start with base case */ adapter->num_rx_queues = 1; @@ -358,29 +358,14 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) adapter->num_rx_queues_per_pool = 1; if (ixgbe_set_sriov_queues(adapter)) - goto done; + return; #ifdef CONFIG_IXGBE_DCB if (ixgbe_set_dcb_queues(adapter)) - goto done; + return; #endif - if (ixgbe_set_rss_queues(adapter)) - goto done; - - /* fallback to base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - -done: - if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) || - (adapter->netdev->reg_state == NETREG_UNREGISTERING)) - return 0; - - /* Notify the stack of the (possibly) reduced queue counts. */ - netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); - return netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_queues); + ixgbe_set_rss_queues(adapter); } static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, @@ -710,11 +695,10 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) * Attempt to configure the interrupts using the best available * capabilities of the hardware and the kernel. **/ -static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) +static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int err = 0; - int vector, v_budget; + int vector, v_budget, err; /* * It's easy to be greedy for MSI-X vectors, but it really @@ -747,7 +731,7 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) ixgbe_acquire_msix_vectors(adapter, v_budget); if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) - goto out; + return; } adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; @@ -762,25 +746,17 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ixgbe_disable_sriov(adapter); - err = ixgbe_set_num_queues(adapter); - if (err) - return err; - + ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; err = pci_enable_msi(adapter->pdev); - if (!err) { - adapter->flags |= IXGBE_FLAG_MSI_ENABLED; - } else { + if (err) { netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, "Unable to allocate MSI interrupt, " "falling back to legacy. Error: %d\n", err); - /* reset err */ - err = 0; + return; } - -out: - return err; + adapter->flags |= IXGBE_FLAG_MSI_ENABLED; } /** @@ -798,15 +774,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) int err; /* Number of supported queues */ - err = ixgbe_set_num_queues(adapter); - if (err) - return err; + ixgbe_set_num_queues(adapter); - err = ixgbe_set_interrupt_capability(adapter); - if (err) { - e_dev_err("Unable to setup interrupt capabilities\n"); - goto err_set_interrupt; - } + /* Set interrupt mode */ + ixgbe_set_interrupt_capability(adapter); err = ixgbe_alloc_q_vectors(adapter); if (err) { @@ -826,7 +797,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) err_alloc_q_vectors: ixgbe_reset_interrupt_capability(adapter); -err_set_interrupt: return err; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 373342f0b807..7f2aa220501e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4798,10 +4798,26 @@ static int ixgbe_open(struct net_device *netdev) if (err) goto err_req_irq; + /* Notify the stack of the actual queue counts. */ + err = netif_set_real_num_tx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_tx_queues); + if (err) + goto err_set_queues; + + + err = netif_set_real_num_rx_queues(netdev, + adapter->num_rx_pools > 1 ? 1 : + adapter->num_rx_queues); + if (err) + goto err_set_queues; + ixgbe_up_complete(adapter); return 0; +err_set_queues: + ixgbe_free_irq(adapter); err_req_irq: ixgbe_free_all_rx_resources(adapter); err_setup_rx: @@ -4864,23 +4880,19 @@ static int ixgbe_resume(struct pci_dev *pdev) pci_wake_from_d3(pdev, false); - rtnl_lock(); - err = ixgbe_init_interrupt_scheme(adapter); - rtnl_unlock(); - if (err) { - e_dev_err("Cannot initialize interrupts for device\n"); - return err; - } - ixgbe_reset(adapter); IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); - if (netif_running(netdev)) { + rtnl_lock(); + err = ixgbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) err = ixgbe_open(netdev); - if (err) - return err; - } + + rtnl_unlock(); + + if (err) + return err; netif_device_attach(netdev); -- cgit v1.2.3 From 4ae63730bb420610cb99ed152d6daa35236cc9e9 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 22 Jun 2012 06:46:33 +0000 Subject: ixgbe: Update the logic for ixgbe_cache_ring_dcb and DCB RSS configuration This change cleans up some of the logic in an attempt to try and simplify things for how we are configuring DCB w/ RSS. In this patch I basically did 3 things. I updated the logic for getting the first register index. I applied the fact that all TCs get the same number of queues to simplify the looping logic in caching the DCB ring register. Finally I updated how we configure the RQTC register to match the fact that all TCs are assigned the same number of queues. Cc: John Fastabend Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 80 +++++++++++++-------------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 12 ++-- 2 files changed, 42 insertions(+), 50 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index c03d771c5eb9..4c3822f04bb9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -42,42 +42,37 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, switch (hw->mac.type) { case ixgbe_mac_82598EB: - *tx = tc << 2; - *rx = tc << 3; + /* TxQs/TC: 4 RxQs/TC: 8 */ + *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */ + *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */ break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: if (num_tcs > 4) { - if (tc < 3) { - *tx = tc << 5; - *rx = tc << 4; - } else if (tc < 5) { - *tx = ((tc + 2) << 4); - *rx = tc << 4; - } else if (tc < num_tcs) { - *tx = ((tc + 8) << 3); - *rx = tc << 4; - } + /* + * TCs : TC0/1 TC2/3 TC4-7 + * TxQs/TC: 32 16 8 + * RxQs/TC: 16 16 16 + */ + *rx = tc << 4; + if (tc < 3) + *tx = tc << 5; /* 0, 32, 64 */ + else if (tc < 5) + *tx = (tc + 2) << 4; /* 80, 96 */ + else + *tx = (tc + 8) << 3; /* 104, 112, 120 */ } else { - *rx = tc << 5; - switch (tc) { - case 0: - *tx = 0; - break; - case 1: - *tx = 64; - break; - case 2: - *tx = 96; - break; - case 3: - *tx = 112; - break; - default: - break; - } + /* + * TCs : TC0 TC1 TC2/3 + * TxQs/TC: 64 32 16 + * RxQs/TC: 32 32 32 + */ + *rx = tc << 5; + if (tc < 2) + *tx = tc << 6; /* 0, 64 */ + else + *tx = (tc + 4) << 4; /* 96, 112 */ } - break; default: break; } @@ -90,25 +85,26 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, * Cache the descriptor ring offsets for DCB to the assigned rings. * **/ -static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) +static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; - int i, j, k; + unsigned int tx_idx, rx_idx; + int tc, offset, rss_i, i; u8 num_tcs = netdev_get_num_tc(dev); - if (!num_tcs) + /* verify we have DCB queueing enabled before proceeding */ + if (num_tcs <= 1) return false; - for (i = 0, k = 0; i < num_tcs; i++) { - unsigned int tx_s, rx_s; - u16 count = dev->tc_to_txq[i].count; + rss_i = adapter->ring_feature[RING_F_RSS].indices; - ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); - for (j = 0; j < count; j++, k++) { - adapter->tx_ring[k]->reg_idx = tx_s + j; - adapter->rx_ring[k]->reg_idx = rx_s + j; - adapter->tx_ring[k]->dcb_tc = i; - adapter->rx_ring[k]->dcb_tc = i; + for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) { + ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx); + for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) { + adapter->tx_ring[offset + i]->reg_idx = tx_idx; + adapter->rx_ring[offset + i]->reg_idx = rx_idx; + adapter->tx_ring[offset + i]->dcb_tc = tc; + adapter->rx_ring[offset + i]->dcb_tc = tc; } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7f2aa220501e..32c8cd649cb0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3608,20 +3608,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) /* Enable RSS Hash per TC */ if (hw->mac.type != ixgbe_mac_82598EB) { - int i; - u32 reg = 0; - u8 msb = 0; - u8 rss_i = adapter->netdev->tc_to_txq[0].count - 1; + u32 msb = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1; while (rss_i) { msb++; rss_i >>= 1; } - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - reg |= msb << IXGBE_RQTC_SHIFT_TC(i); - - IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); + /* write msb to all 8 TCs in one write */ + IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); } } #endif -- cgit v1.2.3 From 671c0adb5cb5f3acdc93527e54cf1e379fc980b1 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 18 May 2012 06:34:02 +0000 Subject: ixgbe: Cleanup logic for MRQC and MTQC configuration This change is meant to make the code much more readable for MTQC and MRQC configuration. The big change is that I simplified much of the logic so that we are essentially handling just 4 cases and their variants. In the cases where RSS is disabled we are actually just programming the RETA table with all 1s resulting in a single queue RSS. In the case of SR-IOV I am treating that as a subset of VMDq. This all results int he following configuration for the hardware: DCB En Dis VMDq En VMDQ/DCB VMDq/RSS Dis DCB/RSS RSS Cc: John Fastabend Signed-off-by: Alexander Duyck Tested-by: Stephen Ko Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 116 +++++++++++++++----------- 1 file changed, 66 insertions(+), 50 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 32c8cd649cb0..2b4b79178858 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2719,8 +2719,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 rttdcs; - u32 reg; + u32 rttdcs, mtqc; u8 tcs = netdev_get_num_tc(adapter->netdev); if (hw->mac.type == ixgbe_mac_82598EB) @@ -2732,28 +2731,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); /* set transmit pool layout */ - switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - case (IXGBE_FLAG_SRIOV_ENABLED): - IXGBE_WRITE_REG(hw, IXGBE_MTQC, - (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF)); - break; - default: - if (!tcs) - reg = IXGBE_MTQC_64Q_1PB; - else if (tcs <= 4) - reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + mtqc = IXGBE_MTQC_VT_ENA; + if (tcs > 4) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + mtqc |= IXGBE_MTQC_32VF; else - reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + mtqc |= IXGBE_MTQC_64VF; + } else { + if (tcs > 4) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; + else + mtqc = IXGBE_MTQC_64Q_1PB; + } - IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc); - /* Enable Security TX Buffer IFG for multiple pb */ - if (tcs) { - reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); - reg |= IXGBE_SECTX_DCB; - IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); - } - break; + /* Enable Security TX Buffer IFG for multiple pb */ + if (tcs) { + u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); + sectx |= IXGBE_SECTX_DCB; + IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx); } /* re-enable the arbiter */ @@ -2886,11 +2889,18 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) u32 mrqc = 0, reta = 0; u32 rxcsum; int i, j; - u8 tcs = netdev_get_num_tc(adapter->netdev); - int maxq = adapter->ring_feature[RING_F_RSS].indices; + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) + rss_i = 1; - if (tcs) - maxq = min(maxq, adapter->num_tx_queues / tcs); + /* + * Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 2; /* Fill out hash function seeds */ for (i = 0; i < 10; i++) @@ -2898,7 +2908,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) /* Fill out redirection table */ for (i = 0, j = 0; i < 128; i++, j++) { - if (j == maxq) + if (j == rss_i) j = 0; /* reta = 4-byte sliding window of * 0x00..(indices-1)(indices-1)00..etc. */ @@ -2912,35 +2922,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) rxcsum |= IXGBE_RXCSUM_PCSD; IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); - if (adapter->hw.mac.type == ixgbe_mac_82598EB && - (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { - mrqc = IXGBE_MRQC_RSSEN; + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) + mrqc = IXGBE_MRQC_RSSEN; } else { - int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED - | IXGBE_FLAG_SRIOV_ENABLED); - - switch (mask) { - case (IXGBE_FLAG_RSS_ENABLED): - if (!tcs) - mrqc = IXGBE_MRQC_RSSEN; - else if (tcs <= 4) - mrqc = IXGBE_MRQC_RTRSS4TCEN; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + if (tcs > 4) + mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ + else if (tcs > 1) + mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ + else if (adapter->ring_feature[RING_F_RSS].indices == 4) + mrqc = IXGBE_MRQC_VMDQRSS32EN; else + mrqc = IXGBE_MRQC_VMDQRSS64EN; + } else { + if (tcs > 4) mrqc = IXGBE_MRQC_RTRSS8TCEN; - break; - case (IXGBE_FLAG_SRIOV_ENABLED): - mrqc = IXGBE_MRQC_VMDQEN; - break; - default: - break; + else if (tcs > 1) + mrqc = IXGBE_MRQC_RTRSS4TCEN; + else + mrqc = IXGBE_MRQC_RSSEN; } } /* Perform hash on these packet types */ - mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 - | IXGBE_MRQC_RSS_FIELD_IPV4_TCP - | IXGBE_MRQC_RSS_FIELD_IPV6 - | IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | + IXGBE_MRQC_RSS_FIELD_IPV4_TCP | + IXGBE_MRQC_RSS_FIELD_IPV6 | + IXGBE_MRQC_RSS_FIELD_IPV6_TCP; if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; @@ -3103,8 +3114,13 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82598EB) return; - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) - psrtype |= (adapter->num_rx_queues_per_pool << 29); + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { + int rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; + } for (p = 0; p < adapter->num_rx_pools; p++) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), -- cgit v1.2.3 From 908421f6cc6b6e5db6e8e8c35ab8fc0fb64f25c2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:33:00 +0000 Subject: ixgbevf: Update descriptor macros to accept pointers and drop _ADV suffix This change updates the descriptor macros to accept pointers, updates the name to drop the _ADV suffix, and include the IXGBEVF name in the macro. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 12 ++++++------ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 18 +++++++++--------- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index f92daca249f8..1f1376515f70 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -164,12 +164,12 @@ struct ixgbevf_q_vector { ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ (R)->next_to_clean - (R)->next_to_use - 1) -#define IXGBE_RX_DESC_ADV(R, i) \ - (&(((union ixgbe_adv_rx_desc *)((R).desc))[i])) -#define IXGBE_TX_DESC_ADV(R, i) \ - (&(((union ixgbe_adv_tx_desc *)((R).desc))[i])) -#define IXGBE_TX_CTXTDESC_ADV(R, i) \ - (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i])) +#define IXGBEVF_RX_DESC(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) +#define IXGBEVF_TX_DESC(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) +#define IXGBEVF_TX_CTXTDESC(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 8e022c6f4b90..c98cdf7de49d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -195,7 +195,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, i = tx_ring->next_to_clean; eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && (count < tx_ring->count)) { @@ -206,7 +206,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, goto cont_loop; for ( ; !cleaned; count++) { struct sk_buff *skb; - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); tx_buffer_info = &tx_ring->tx_buffer_info[i]; cleaned = (i == eop); skb = tx_buffer_info->skb; @@ -235,7 +235,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, cont_loop: eop = tx_ring->tx_buffer_info[i].next_to_watch; - eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); + eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); } tx_ring->next_to_clean = i; @@ -339,7 +339,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, bi = &rx_ring->rx_buffer_info[i]; while (cleaned_count--) { - rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + rx_desc = IXGBEVF_RX_DESC(rx_ring, i); skb = bi->skb; if (!skb) { skb = netdev_alloc_skb(adapter->netdev, @@ -405,7 +405,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, unsigned int total_rx_bytes = 0, total_rx_packets = 0; i = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); + rx_desc = IXGBEVF_RX_DESC(rx_ring, i); staterr = le32_to_cpu(rx_desc->wb.upper.status_error); rx_buffer_info = &rx_ring->rx_buffer_info[i]; @@ -432,7 +432,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, if (i == rx_ring->count) i = 0; - next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); + next_rxd = IXGBEVF_RX_DESC(rx_ring, i); prefetch(next_rxd); cleaned_count++; @@ -2437,7 +2437,7 @@ static int ixgbevf_tso(struct ixgbevf_adapter *adapter, i = tx_ring->next_to_use; tx_buffer_info = &tx_ring->tx_buffer_info[i]; - context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); /* VLAN MACLEN IPLEN */ if (tx_flags & IXGBE_TX_FLAGS_VLAN) @@ -2497,7 +2497,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, (tx_flags & IXGBE_TX_FLAGS_VLAN)) { i = tx_ring->next_to_use; tx_buffer_info = &tx_ring->tx_buffer_info[i]; - context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); + context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); if (tx_flags & IXGBE_TX_FLAGS_VLAN) vlan_macip_lens |= (tx_flags & @@ -2700,7 +2700,7 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, i = tx_ring->next_to_use; while (count--) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); + tx_desc = IXGBEVF_TX_DESC(tx_ring, i); tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | tx_buffer_info->length); -- cgit v1.2.3 From 5d9a533bacff6fbaf711c92575f5f646c2c772f1 Mon Sep 17 00:00:00 2001 From: Pascal Bouchareine Date: Thu, 14 Jun 2012 02:18:18 +0000 Subject: ixgbevf: fix VF untagging when 802.1 prio is set We have had an issue when using ixgbe+ixgbevf and 802.1 VLAN tagging. When attaching a VLAN to a VF, frames with a 802.1q priority appeared untagged on the VF hence not reaching the VLAN, where frames with priority 0 where tagged as expected and seen by the VLAN device. This seems due to the way ixgbevf is looking up the full tag (prio+cfi+vlan) against the adapter active_vlans, as a condition to mark the skb tagged. Signed-off-by: Pascal Bouchareine Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c98cdf7de49d..b88218c7f60f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -279,7 +279,7 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, bool is_vlan = (status & IXGBE_RXD_STAT_VP); u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); - if (is_vlan && test_bit(tag, adapter->active_vlans)) + if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans)) __vlan_hwaccel_put_tag(skb, tag); napi_gro_receive(&q_vector->napi, skb); -- cgit v1.2.3 From 18c6308971028cc02838adc711c556d992ad8bdf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:33:11 +0000 Subject: ixgbevf: Do not rewind the Rx ring before bumping tail The driver is going back one step from its' previous location before bumping tail. This is incorrect. We should just be writing the value of next_to_use into the tail register. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b88218c7f60f..c27ce447e04e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -375,8 +375,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, no_buffers: if (rx_ring->next_to_use != i) { rx_ring->next_to_use = i; - if (i-- == 0) - i = (rx_ring->count - 1); ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); } @@ -1240,9 +1238,8 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) ixgbevf_configure_rx(adapter); for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbevf_ring *ring = &adapter->rx_ring[i]; - ixgbevf_alloc_rx_buffers(adapter, ring, ring->count); - ring->next_to_use = ring->count - 1; - writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail); + ixgbevf_alloc_rx_buffers(adapter, ring, + IXGBE_DESC_UNUSED(ring)); } } -- cgit v1.2.3 From fb40195cc975b14c5d4e44863ea996f999ba5aee Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:33:16 +0000 Subject: ixgbevf: Add netdev to ring structure This change adds the netdev to the ring structure. This allows for a quicker transition from ring to netdev without having to go from ring to adapter to netdev. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 6 +-- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 2 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 54 ++++++++++------------- 3 files changed, 28 insertions(+), 34 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 15947c9122e1..2c3b20edd84d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -359,8 +359,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, if (err) { while (i) { i--; - ixgbevf_free_tx_resources(adapter, - &tx_ring[i]); + ixgbevf_free_tx_resources(adapter, &tx_ring[i]); } goto err_tx_ring_setup; } @@ -374,8 +373,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, if (err) { while (i) { i--; - ixgbevf_free_rx_resources(adapter, - &rx_ring[i]); + ixgbevf_free_rx_resources(adapter, &rx_ring[i]); } goto err_rx_ring_setup; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 1f1376515f70..e167d1bb6dea 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -56,6 +56,8 @@ struct ixgbevf_rx_buffer { struct ixgbevf_ring { struct ixgbevf_ring *next; + struct net_device *netdev; + struct device *dev; struct ixgbevf_adapter *adapter; /* backlink */ void *desc; /* descriptor ring memory */ dma_addr_t dma; /* phys. address of descriptor ring */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index c27ce447e04e..1c53e13b466d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -187,7 +187,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_ring *tx_ring) { struct ixgbevf_adapter *adapter = q_vector->adapter; - struct net_device *netdev = adapter->netdev; union ixgbe_adv_tx_desc *tx_desc, *eop_desc; struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int i, eop, count = 0; @@ -241,15 +240,17 @@ cont_loop: tx_ring->next_to_clean = i; #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) - if (unlikely(count && netif_carrier_ok(netdev) && + if (unlikely(count && netif_carrier_ok(tx_ring->netdev) && (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this * sees the new next_to_clean. */ smp_mb(); - if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && + if (__netif_subqueue_stopped(tx_ring->netdev, + tx_ring->queue_index) && !test_bit(__IXGBEVF_DOWN, &adapter->state)) { - netif_wake_subqueue(netdev, tx_ring->queue_index); + netif_wake_subqueue(tx_ring->netdev, + tx_ring->queue_index); ++adapter->restart_queue; } } @@ -292,12 +293,13 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, * @skb: skb currently being received and modified **/ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, + struct ixgbevf_ring *ring, u32 status_err, struct sk_buff *skb) { skb_checksum_none_assert(skb); /* Rx csum disabled */ - if (!(adapter->netdev->features & NETIF_F_RXCSUM)) + if (!(ring->netdev->features & NETIF_F_RXCSUM)) return; /* if IP and error */ @@ -332,31 +334,21 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc; struct ixgbevf_rx_buffer *bi; struct sk_buff *skb; - unsigned int i; - unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; + unsigned int i = rx_ring->next_to_use; - i = rx_ring->next_to_use; bi = &rx_ring->rx_buffer_info[i]; while (cleaned_count--) { rx_desc = IXGBEVF_RX_DESC(rx_ring, i); skb = bi->skb; if (!skb) { - skb = netdev_alloc_skb(adapter->netdev, - bufsz); - + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len); if (!skb) { adapter->alloc_rx_buff_failed++; goto no_buffers; } - /* - * Make buffer alignment 2 beyond a 16 byte boundary - * this will result in a 16 byte aligned IP header after - * the 14 byte MAC header is removed - */ - skb_reserve(skb, NET_IP_ALIGN); - bi->skb = skb; } if (!bi->dma) { @@ -449,7 +441,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, goto next_desc; } - ixgbevf_rx_checksum(adapter, staterr, skb); + ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); /* probably a little skewed due to removing CRC */ total_rx_bytes += skb->len; @@ -464,7 +456,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, if (header_fixup_len < 14) skb_push(skb, header_fixup_len); } - skb->protocol = eth_type_trans(skb, adapter->netdev); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); @@ -1669,12 +1661,16 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) adapter->tx_ring[i].count = adapter->tx_ring_count; adapter->tx_ring[i].queue_index = i; adapter->tx_ring[i].reg_idx = i; + adapter->tx_ring[i].dev = &adapter->pdev->dev; + adapter->tx_ring[i].netdev = adapter->netdev; } for (i = 0; i < adapter->num_rx_queues; i++) { adapter->rx_ring[i].count = adapter->rx_ring_count; adapter->rx_ring[i].queue_index = i; adapter->rx_ring[i].reg_idx = i; + adapter->rx_ring[i].dev = &adapter->pdev->dev; + adapter->rx_ring[i].netdev = adapter->netdev; } return 0; @@ -2721,12 +2717,11 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, writel(i, adapter->hw.hw_addr + tx_ring->tail); } -static int __ixgbevf_maybe_stop_tx(struct net_device *netdev, - struct ixgbevf_ring *tx_ring, int size) +static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { - struct ixgbevf_adapter *adapter = netdev_priv(netdev); + struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); - netif_stop_subqueue(netdev, tx_ring->queue_index); + netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Herbert's original patch had: * smp_mb__after_netif_stop_queue(); * but since that doesn't exist yet, just open code it. */ @@ -2738,17 +2733,16 @@ static int __ixgbevf_maybe_stop_tx(struct net_device *netdev, return -EBUSY; /* A reprieve! - use start_queue because it doesn't call schedule */ - netif_start_subqueue(netdev, tx_ring->queue_index); + netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++adapter->restart_queue; return 0; } -static int ixgbevf_maybe_stop_tx(struct net_device *netdev, - struct ixgbevf_ring *tx_ring, int size) +static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) { if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) return 0; - return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size); + return __ixgbevf_maybe_stop_tx(tx_ring, size); } static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) @@ -2779,7 +2773,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) #else count += skb_shinfo(skb)->nr_frags; #endif - if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count + 3)) { + if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) { adapter->tx_busy++; return NETDEV_TX_BUSY; } @@ -2810,7 +2804,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), skb->len, hdr_len); - ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); + ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); return NETDEV_TX_OK; } -- cgit v1.2.3 From 70a10e258ce3d45b294de9190dee9dcc73a495cb Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:33:21 +0000 Subject: ixgbevf: Consolidate Tx context descriptor creation code There is a good bit of redundancy between the Tx checksum and segmentation offloads. In order to reduce some of this I am moving the code for creating a context descriptor into a separate function. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/defines.h | 1 + drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 342 ++++++++++------------ 2 files changed, 163 insertions(+), 180 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 10cede503fe8..418af827b230 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -251,6 +251,7 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 1c53e13b466d..ce81ce0698b3 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include @@ -144,18 +145,18 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, } } -static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, +static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *tx_buffer_info) { if (tx_buffer_info->dma) { if (tx_buffer_info->mapped_as_page) - dma_unmap_page(&adapter->pdev->dev, + dma_unmap_page(tx_ring->dev, tx_buffer_info->dma, tx_buffer_info->length, DMA_TO_DEVICE); else - dma_unmap_single(&adapter->pdev->dev, + dma_unmap_single(tx_ring->dev, tx_buffer_info->dma, tx_buffer_info->length, DMA_TO_DEVICE); @@ -222,7 +223,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, total_bytes += bytecount; } - ixgbevf_unmap_and_free_tx_resource(adapter, + ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); tx_desc->wb.status = 0; @@ -1443,7 +1444,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, for (i = 0; i < tx_ring->count; i++) { tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); + ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; @@ -2389,172 +2390,153 @@ static int ixgbevf_close(struct net_device *netdev) return 0; } -static int ixgbevf_tso(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) +static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, + u32 vlan_macip_lens, u32 type_tucmd, + u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; - unsigned int i; - int err; - struct ixgbevf_tx_buffer *tx_buffer_info; - u32 vlan_macip_lens = 0, type_tucmd_mlhl; - u32 mss_l4len_idx, l4len; + u16 i = tx_ring->next_to_use; - if (skb_is_gso(skb)) { - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); - adapter->hw_tso_ctxt++; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); - adapter->hw_tso6_ctxt++; - } + context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); - i = tx_ring->next_to_use; + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); - - /* VLAN MACLEN IPLEN */ - if (tx_flags & IXGBE_TX_FLAGS_VLAN) - vlan_macip_lens |= - (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); - vlan_macip_lens |= ((skb_network_offset(skb)) << - IXGBE_ADVTXD_MACLEN_SHIFT); - *hdr_len += skb_network_offset(skb); - vlan_macip_lens |= - (skb_transport_header(skb) - skb_network_header(skb)); - *hdr_len += - (skb_transport_header(skb) - skb_network_header(skb)); - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; - - /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ - type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | - IXGBE_ADVTXD_DTYP_CTXT); - - if (skb->protocol == htons(ETH_P_IP)) - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); - - /* MSS L4LEN IDX */ - mss_l4len_idx = - (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); - mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); - /* use index 1 for TSO */ - mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); - context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); - - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); +} + +static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) +{ + u32 vlan_macip_lens, type_tucmd; + u32 mss_l4len_idx, l4len; + + if (!skb_is_gso(skb)) + return 0; - return true; + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; } - return false; + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + /* compute header lengths */ + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + *hdr_len = skb_transport_offset(skb) + l4len; + + /* mss_l4len_id: use 1 as index for TSO */ + mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; + mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ + vlan_macip_lens = skb_network_header_len(skb); + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return 1; } -static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, +static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags) { - struct ixgbe_adv_tx_context_desc *context_desc; - unsigned int i; - struct ixgbevf_tx_buffer *tx_buffer_info; - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL || - (tx_flags & IXGBE_TX_FLAGS_VLAN)) { - i = tx_ring->next_to_use; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i); - - if (tx_flags & IXGBE_TX_FLAGS_VLAN) - vlan_macip_lens |= (tx_flags & - IXGBE_TX_FLAGS_VLAN_MASK); - vlan_macip_lens |= (skb_network_offset(skb) << - IXGBE_ADVTXD_MACLEN_SHIFT); - if (skb->ip_summed == CHECKSUM_PARTIAL) - vlan_macip_lens |= (skb_transport_header(skb) - - skb_network_header(skb)); - - context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; - - type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | - IXGBE_ADVTXD_DTYP_CTXT); - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - switch (skb->protocol) { - case __constant_htons(ETH_P_IP): - type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_TCP; - break; - case __constant_htons(ETH_P_IPV6): - /* XXX what about other V6 headers?? */ - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - type_tucmd_mlhl |= - IXGBE_ADVTXD_TUCMD_L4T_TCP; - break; - default: - if (unlikely(net_ratelimit())) { - pr_warn("partial checksum but " - "proto=%x!\n", skb->protocol); - } - break; - } - } - context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); - /* use index zero for tx checksum offload */ - context_desc->mss_l4len_idx = 0; - tx_buffer_info->time_stamp = jiffies; - tx_buffer_info->next_to_watch = i; + u32 vlan_macip_lens = 0; + u32 mss_l4len_idx = 0; + u32 type_tucmd = 0; - adapter->hw_csum_tx_good++; - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; + if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 l4_hdr = 0; + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; + case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but proto=%x!\n", + skb->protocol); + } + break; + } - return true; + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + mss_l4len_idx = tcp_hdrlen(skb) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_SCTP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } + break; + } } - return false; + /* vlan_macip_lens: MACLEN, VLAN tag */ + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + type_tucmd, mss_l4len_idx); + + return (skb->ip_summed == CHECKSUM_PARTIAL); } -static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, +static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, unsigned int first) { - struct pci_dev *pdev = adapter->pdev; struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; unsigned int total = skb->len; @@ -2573,12 +2555,11 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, tx_buffer_info->length = size; tx_buffer_info->mapped_as_page = false; - tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, + tx_buffer_info->dma = dma_map_single(tx_ring->dev, skb->data + offset, size, DMA_TO_DEVICE); - if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) + if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) goto dma_error; - tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; len -= size; @@ -2603,12 +2584,12 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, tx_buffer_info->length = size; tx_buffer_info->dma = - skb_frag_dma_map(&adapter->pdev->dev, frag, + skb_frag_dma_map(tx_ring->dev, frag, offset, size, DMA_TO_DEVICE); tx_buffer_info->mapped_as_page = true; - if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) + if (dma_mapping_error(tx_ring->dev, + tx_buffer_info->dma)) goto dma_error; - tx_buffer_info->time_stamp = jiffies; tx_buffer_info->next_to_watch = i; len -= size; @@ -2629,15 +2610,15 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, i = i - 1; tx_ring->tx_buffer_info[i].skb = skb; tx_ring->tx_buffer_info[first].next_to_watch = i; + tx_ring->tx_buffer_info[first].time_stamp = jiffies; return count; dma_error: - dev_err(&pdev->dev, "TX DMA map failed\n"); + dev_err(tx_ring->dev, "TX DMA map failed\n"); /* clear timestamp and dma mappings for failed tx_buffer_info map */ tx_buffer_info->dma = 0; - tx_buffer_info->time_stamp = 0; tx_buffer_info->next_to_watch = 0; count--; @@ -2648,14 +2629,13 @@ dma_error: if (i < 0) i += tx_ring->count; tx_buffer_info = &tx_ring->tx_buffer_info[i]; - ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); + ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } return count; } -static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, - struct ixgbevf_ring *tx_ring, int tx_flags, +static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, int count, u32 paylen, u8 hdr_len) { union ixgbe_adv_tx_desc *tx_desc = NULL; @@ -2672,21 +2652,24 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, if (tx_flags & IXGBE_TX_FLAGS_VLAN) cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + if (tx_flags & IXGBE_TX_FLAGS_CSUM) + olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM; + if (tx_flags & IXGBE_TX_FLAGS_TSO) { cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; - olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; - /* use index 1 context for tso */ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); if (tx_flags & IXGBE_TX_FLAGS_IPV4) - olinfo_status |= IXGBE_TXD_POPTS_IXSM << - IXGBE_ADVTXD_POPTS_SHIFT; + olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; + + } - } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) - olinfo_status |= IXGBE_TXD_POPTS_TXSM << - IXGBE_ADVTXD_POPTS_SHIFT; + /* + * Check Context must be set if Tx switch is enabled, which it + * always is for case where virtual functions are running + */ + olinfo_status |= IXGBE_ADVTXD_CC; olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); @@ -2705,16 +2688,7 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); - /* - * Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only - * applicable for weak-ordered memory model archs, - * such as IA-64). - */ - wmb(); - tx_ring->next_to_use = i; - writel(i, adapter->hw.hw_addr + tx_ring->tail); } static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) @@ -2788,21 +2762,29 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) if (skb->protocol == htons(ETH_P_IP)) tx_flags |= IXGBE_TX_FLAGS_IPV4; - tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); + tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (tso) - tx_flags |= IXGBE_TX_FLAGS_TSO; - else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) && - (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; + else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags)) tx_flags |= IXGBE_TX_FLAGS_CSUM; - ixgbevf_tx_queue(adapter, tx_ring, tx_flags, - ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), + ixgbevf_tx_queue(tx_ring, tx_flags, + ixgbevf_tx_map(tx_ring, skb, tx_flags, first), skb->len, hdr_len); + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED); -- cgit v1.2.3 From eb022d058f5ad48c45f1f31c36865d2c676aedc9 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 19 Jul 2012 00:18:05 +0000 Subject: ixgbevf: Fix multiple issues in ixgbevf_get/set_ringparam In ixgbevf_get_ringparam we could run into a NULL pointer dereference if the rings were not allocated when we attempted the call. To prevent that we can just access the tx/rx_ring_count values instead of attempting to access the rings to get the count. This change corrects a memory leak and memory corruption in ixgbevf_set_ringparam. The memory leak was due to us not freeing the resources from the ring before overwriting them. This change corrects the memory leak by making certain to call ixgbe_free_tx/rx_resources on the rings prior to freeing them. The memory corruption was because we were replacing the rings but not updating the q_vectors. It addresses the memory corruption by leaving the rings in place and instead just copying the contents of the new rings into the existing rings. Signed-off-by: Alexander Duyck Acked-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ethtool.c | 153 +++++++++++++++------------ 1 file changed, 83 insertions(+), 70 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 2c3b20edd84d..8f2070439b59 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -284,13 +284,11 @@ static void ixgbevf_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbevf_ring *tx_ring = adapter->tx_ring; - struct ixgbevf_ring *rx_ring = adapter->rx_ring; ring->rx_max_pending = IXGBEVF_MAX_RXD; ring->tx_max_pending = IXGBEVF_MAX_TXD; - ring->rx_pending = rx_ring->count; - ring->tx_pending = tx_ring->count; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; } static int ixgbevf_set_ringparam(struct net_device *netdev, @@ -298,33 +296,28 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL; - int i, err = 0; u32 new_rx_count, new_tx_count; + int i, err = 0; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; - new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD); - new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD); - new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); - - new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD); - new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD); + new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD); + new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD); new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE); - if ((new_tx_count == adapter->tx_ring->count) && - (new_rx_count == adapter->rx_ring->count)) { - /* nothing to do */ + new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD); + new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE); + + /* if nothing to do return success */ + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) return 0; - } while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) - msleep(1); + usleep_range(1000, 2000); - /* - * If the adapter isn't up and running then just set the - * new parameters and scurry for the exits. - */ if (!netif_running(adapter->netdev)) { for (i = 0; i < adapter->num_tx_queues; i++) adapter->tx_ring[i].count = new_tx_count; @@ -335,78 +328,98 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, goto clear_reset; } - tx_ring = kcalloc(adapter->num_tx_queues, - sizeof(struct ixgbevf_ring), GFP_KERNEL); - if (!tx_ring) { - err = -ENOMEM; - goto clear_reset; - } - - rx_ring = kcalloc(adapter->num_rx_queues, - sizeof(struct ixgbevf_ring), GFP_KERNEL); - if (!rx_ring) { - err = -ENOMEM; - goto err_rx_setup; - } - - ixgbevf_down(adapter); + if (new_tx_count != adapter->tx_ring_count) { + tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring)); + if (!tx_ring) { + err = -ENOMEM; + goto clear_reset; + } - memcpy(tx_ring, adapter->tx_ring, - adapter->num_tx_queues * sizeof(struct ixgbevf_ring)); - for (i = 0; i < adapter->num_tx_queues; i++) { - tx_ring[i].count = new_tx_count; - err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); - if (err) { + for (i = 0; i < adapter->num_tx_queues; i++) { + /* clone ring and setup updated count */ + tx_ring[i] = adapter->tx_ring[i]; + tx_ring[i].count = new_tx_count; + err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); + if (!err) + continue; while (i) { i--; ixgbevf_free_tx_resources(adapter, &tx_ring[i]); } - goto err_tx_ring_setup; + + vfree(tx_ring); + tx_ring = NULL; + + goto clear_reset; } } - memcpy(rx_ring, adapter->rx_ring, - adapter->num_rx_queues * sizeof(struct ixgbevf_ring)); - for (i = 0; i < adapter->num_rx_queues; i++) { - rx_ring[i].count = new_rx_count; - err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); - if (err) { + if (new_rx_count != adapter->rx_ring_count) { + rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); + if (!rx_ring) { + err = -ENOMEM; + goto clear_reset; + } + + for (i = 0; i < adapter->num_rx_queues; i++) { + /* clone ring and setup updated count */ + rx_ring[i] = adapter->rx_ring[i]; + rx_ring[i].count = new_rx_count; + err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); + if (!err) + continue; while (i) { i--; ixgbevf_free_rx_resources(adapter, &rx_ring[i]); } - goto err_rx_ring_setup; + + vfree(rx_ring); + rx_ring = NULL; + + goto clear_reset; } } - /* - * Only switch to new rings if all the prior allocations - * and ring setups have succeeded. - */ - kfree(adapter->tx_ring); - adapter->tx_ring = tx_ring; - adapter->tx_ring_count = new_tx_count; - - kfree(adapter->rx_ring); - adapter->rx_ring = rx_ring; - adapter->rx_ring_count = new_rx_count; + /* bring interface down to prepare for update */ + ixgbevf_down(adapter); - /* success! */ - ixgbevf_up(adapter); + /* Tx */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) { + ixgbevf_free_tx_resources(adapter, + &adapter->tx_ring[i]); + adapter->tx_ring[i] = tx_ring[i]; + } + adapter->tx_ring_count = new_tx_count; - goto clear_reset; + vfree(tx_ring); + tx_ring = NULL; + } -err_rx_ring_setup: - for(i = 0; i < adapter->num_tx_queues; i++) - ixgbevf_free_tx_resources(adapter, &tx_ring[i]); + /* Rx */ + if (rx_ring) { + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbevf_free_rx_resources(adapter, + &adapter->rx_ring[i]); + adapter->rx_ring[i] = rx_ring[i]; + } + adapter->rx_ring_count = new_rx_count; -err_tx_ring_setup: - kfree(rx_ring); + vfree(rx_ring); + rx_ring = NULL; + } -err_rx_setup: - kfree(tx_ring); + /* restore interface using new values */ + ixgbevf_up(adapter); clear_reset: + /* free Tx resources if Rx error is encountered */ + if (tx_ring) { + for (i = 0; i < adapter->num_tx_queues; i++) + ixgbevf_free_tx_resources(adapter, &tx_ring[i]); + vfree(tx_ring); + } + clear_bit(__IXGBEVF_RESETTING, &adapter->state); return err; } -- cgit v1.2.3 From 435b19f6210edc98e45c98f9d56eeca6c2140452 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 18 May 2012 06:34:08 +0000 Subject: ixgbe: Update configure virtualization to allow for multiple PF pools This change allows all pools from the default pool forward to be enabled vi ixgbe_configure_virtualization. This is needed as we are planning to use queues belonging to adjacent pools for FCoE when SR-IOV and FCoE are both enabled. In addition this patch contains some minor formatting changes as there were a few spots that seemed to be in need of some cleanup. Signed-off-by: Alexander Duyck Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 2b4b79178858..ea94fa24a1dd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3130,28 +3130,28 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 gcr_ext; - u32 vt_reg_bits; u32 reg_offset, vf_shift; - u32 vmdctl; + u32 gcr_ext, vmdctl; int i; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return; vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN; - vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); - IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits); + vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; + vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; + vmdctl |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); vf_shift = adapter->num_vfs % 32; reg_offset = (adapter->num_vfs >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); - IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ @@ -3168,9 +3168,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) /* enable Tx loopback for VF/PF communication */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); + /* Enable MAC Anti-Spoofing */ - hw->mac.ops.set_mac_anti_spoofing(hw, - (adapter->num_vfs != 0), + hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), adapter->num_vfs); /* For VFs that have spoof checking turned off */ for (i = 0; i < adapter->num_vfs; i++) { -- cgit v1.2.3 From 73079ea0414098ae83f341028434e04d63144ce2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 14 Jul 2012 06:48:49 +0000 Subject: ixgbe: Add support for SR-IOV w/ DCB or RSS This change essentially makes it so that we can enable almost all of the features all at once. This patch allows for the combination of SR-IOV, DCB, and FCoE in the case of the x540. It also beefs up the SR-IOV by adding support for RSS to the PF. The testing matrix gets to be very complex for this patch as there are a number of different features and subsets for queueing options. I tried to narrow these down a bit by restricting the PF to only supporting 4TC DCB when it is enabled in addition to SR-IOV. Cc: Greg Rose Cc: John Fastabend Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 4 + drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 377 +++++++++++++++++++++++-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 37 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 52 +++- 4 files changed, 423 insertions(+), 47 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5a75a9c8b813..67743aa7acef 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -284,6 +284,10 @@ struct ixgbe_ring_feature { u16 offset; /* offset to start of feature */ } ____cacheline_internodealigned_in_smp; +#define IXGBE_82599_VMDQ_8Q_MASK 0x78 +#define IXGBE_82599_VMDQ_4Q_MASK 0x7C +#define IXGBE_82599_VMDQ_2Q_MASK 0x7E + /* * FCoE requires that all Rx buffers be over 2200 bytes in length. Since * this is twice the size of a half page we need to double the page order diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 4c3822f04bb9..676e93f344f3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -29,6 +29,83 @@ #include "ixgbe_sriov.h" #ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV + * @adapter: board private structure to initialize + * + * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) +{ +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= tcs) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* nothing to do if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return true; + + /* The work is already done if the FCoE ring is shared */ + if (fcoe->offset < tcs) + return true; + + /* The FCoE rings exist separately, we need to move their reg_idx */ + if (fcoe->indices) { + u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->rx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; + for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; + adapter->tx_ring[i]->reg_idx = reg_idx; + reg_idx++; + } + } + +#endif /* IXGBE_FCOE */ + return true; +} + /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, unsigned int *tx, unsigned int *rx) @@ -120,14 +197,61 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) * no other mapping is used. * */ -static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) +static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) { - adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; - adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; - if (adapter->num_vfs) - return true; - else +#ifdef IXGBE_FCOE + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +#endif /* IXGBE_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; + u16 reg_idx; + + /* only proceed if VMDq is enabled */ + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) return false; + + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_rx_queues; i++, reg_idx++) + adapter->rx_ring[i]->reg_idx = reg_idx; + +#endif + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +#ifdef IXGBE_FCOE + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +#endif + /* If we are greater than indices move to next pool */ + if ((reg_idx & rss->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +#ifdef IXGBE_FCOE + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; + +#endif + + return true; } /** @@ -169,30 +293,20 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) adapter->rx_ring[0]->reg_idx = 0; adapter->tx_ring[0]->reg_idx = 0; - if (ixgbe_cache_ring_sriov(adapter)) +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_cache_ring_dcb_sriov(adapter)) return; -#ifdef CONFIG_IXGBE_DCB if (ixgbe_cache_ring_dcb(adapter)) return; + #endif + if (ixgbe_cache_ring_sriov(adapter)) + return; ixgbe_cache_ring_rss(adapter); } -/** - * ixgbe_set_sriov_queues - Allocate queues for IOV use - * @adapter: board private structure to initialize - * - * IOV doesn't actually use anything, so just NAK the - * request for now and let the other queue routines - * figure out what to do. - */ -static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) -{ - return false; -} - #define IXGBE_RSS_16Q_MASK 0xF #define IXGBE_RSS_8Q_MASK 0x7 #define IXGBE_RSS_4Q_MASK 0x3 @@ -200,6 +314,109 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) #define IXGBE_RSS_DISABLED_MASK 0x0 #ifdef CONFIG_IXGBE_DCB +/** + * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) +{ + int i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; +#ifdef IXGBE_FCOE + u16 fcoe_i = 0; +#endif + u8 tcs = netdev_get_num_tc(adapter->netdev); + + /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* 16 pools w/ 8 TC per pool */ + if (tcs > 4) { + vmdq_i = min_t(u16, vmdq_i, 16); + vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; + /* 32 pools w/ 4 TC per pool */ + } else { + vmdq_i = min_t(u16, vmdq_i, 32); + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + } + +#ifdef IXGBE_FCOE + /* queues in the remaining pools are available for FCoE */ + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; + +#endif + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* + * We do not support DCB, VMDq, and RSS all simultaneously + * so we will disable RSS since it is the lowest priority + */ + adapter->ring_feature[RING_F_RSS].indices = 1; + adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; + + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = tcs; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + +#ifdef IXGBE_FCOE + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (fcoe_i) { + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * tcs; + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } else if (tcs > 1) { + /* use queue belonging to FcoE TC */ + fcoe->indices = 1; + fcoe->offset = ixgbe_fcoe_get_tc(adapter); + } else { + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; + + fcoe->indices = 0; + fcoe->offset = 0; + } + } + +#endif /* IXGBE_FCOE */ + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, i, 1, i); + + return true; +} + static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) { struct net_device *dev = adapter->netdev; @@ -261,6 +478,117 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) } #endif +/** + * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices + * @adapter: board private structure to initialize + * + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) +{ + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = IXGBE_RSS_DISABLED_MASK; +#ifdef IXGBE_FCOE + u16 fcoe_i = 0; +#endif + + /* only proceed if SR-IOV is enabled */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return false; + + /* Add starting offset to total pool count */ + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; + + /* double check we are limited to maximum pools */ + vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); + + /* 64 pool mode with 2 queues per pool */ + if ((vmdq_i > 32) || (rss_i < 4)) { + vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; + rss_m = IXGBE_RSS_2Q_MASK; + rss_i = min_t(u16, rss_i, 2); + /* 32 pool mode with 4 queues per pool */ + } else { + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; + rss_m = IXGBE_RSS_4Q_MASK; + rss_i = 4; + } + +#ifdef IXGBE_FCOE + /* queues in the remaining pools are available for FCoE */ + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); + +#endif + /* remove the starting offset from the pool count */ + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; + + /* limit RSS based on user input and save for later use */ + adapter->ring_feature[RING_F_RSS].indices = rss_i; + adapter->ring_feature[RING_F_RSS].mask = rss_m; + + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; + adapter->num_tx_queues = vmdq_i * rss_i; + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +#ifdef IXGBE_FCOE + /* + * FCoE can use rings from adjacent buffers to allow RSS + * like behavior. To account for this we need to add the + * FCoE indices to the total ring count. + */ + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + + fcoe = &adapter->ring_feature[RING_F_FCOE]; + + /* limit ourselves based on feature limits */ + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); + + if (vmdq_i > 1 && fcoe_i) { + /* reserve no more than number of CPUs */ + fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); + + /* alloc queues for FCoE separately */ + fcoe->indices = fcoe_i; + fcoe->offset = vmdq_i * rss_i; + } else { + /* merge FCoE queues with RSS queues */ + fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); + + /* limit indices to rss_i if MSI-X is disabled */ + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) + fcoe_i = rss_i; + + /* attempt to reserve some queues for just FCoE */ + fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); + fcoe->offset = fcoe_i - fcoe->indices; + + fcoe_i -= rss_i; + } + + /* add queues to adapter */ + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } + +#endif + return true; +} + /** * ixgbe_set_rss_queues - Allocate queues for RSS * @adapter: board private structure to initialize @@ -353,14 +681,17 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) adapter->num_rx_pools = adapter->num_rx_queues; adapter->num_rx_queues_per_pool = 1; - if (ixgbe_set_sriov_queues(adapter)) +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_set_dcb_sriov_queues(adapter)) return; -#ifdef CONFIG_IXGBE_DCB if (ixgbe_set_dcb_queues(adapter)) return; #endif + if (ixgbe_set_sriov_queues(adapter)) + return; + ixgbe_set_rss_queues(adapter); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ea94fa24a1dd..454e556307f2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3161,9 +3161,18 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) * Set up VF register offsets for selected VT Mode, * i.e. 32 or 64 VFs for SR-IOV */ - gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); - gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; - gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; + switch (adapter->ring_feature[RING_F_VMDQ].mask) { + case IXGBE_82599_VMDQ_8Q_MASK: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; + break; + case IXGBE_82599_VMDQ_4Q_MASK: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; + break; + default: + gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; + break; + } + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); /* enable Tx loopback for VF/PF communication */ @@ -3947,7 +3956,18 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { gpie &= ~IXGBE_GPIE_VTMODE_MASK; - gpie |= IXGBE_GPIE_VTMODE_64; + + switch (adapter->ring_feature[RING_F_VMDQ].mask) { + case IXGBE_82599_VMDQ_8Q_MASK: + gpie |= IXGBE_GPIE_VTMODE_16; + break; + case IXGBE_82599_VMDQ_4Q_MASK: + gpie |= IXGBE_GPIE_VTMODE_32; + break; + default: + gpie |= IXGBE_GPIE_VTMODE_64; + break; + } } /* Enable Thermal over heat sensor interrupt */ @@ -6674,11 +6694,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return -EINVAL; } - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - e_err(drv, "Enable failed, SR-IOV enabled\n"); - return -EINVAL; - } - /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || (hw->mac.type == ixgbe_mac_82598EB && @@ -7225,10 +7240,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | - IXGBE_FLAG_DCB_ENABLED); - #ifdef CONFIG_IXGBE_DCB netdev->dcbnl_ops = &dcbnl_ops; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index eb3f67c854a7..d2854434ad12 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -107,15 +107,21 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, "VF drivers to avoid spoofed packet errors\n"); } else { err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); + goto err_novfs; + } } - if (err) { - e_err(probe, "Failed to enable PCI sriov: %d\n", err); - goto err_novfs; - } - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); + /* Enable VMDq flag so device will be set in VM mode */ + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; + if (!adapter->ring_feature[RING_F_VMDQ].limit) + adapter->ring_feature[RING_F_VMDQ].limit = 1; + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; + num_vf_macvlans = hw->mac.num_rar_entries - (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); @@ -146,12 +152,39 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, * and memory allocated set up the mailbox parameters */ ixgbe_init_mbx_params_pf(hw); - memcpy(&hw->mbx.ops, ii->mbx_ops, - sizeof(hw->mbx.ops)); + memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); + + /* limit trafffic classes based on VFs enabled */ + if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && + (adapter->num_vfs < 16)) { + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; + } else if (adapter->num_vfs < 32) { + adapter->dcb_cfg.num_tcs.pg_tcs = 4; + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + } else { + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + } + + /* We do not support RSS w/ SR-IOV */ + adapter->ring_feature[RING_F_RSS].limit = 1; /* Disable RSC when in SR-IOV mode */ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | IXGBE_FLAG2_RSC_ENABLED); + +#ifdef IXGBE_FCOE + /* + * When SR-IOV is enabled 82599 cannot support jumbo frames + * so we must disable FCoE because we cannot support FCoE MTU. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED | + IXGBE_FLAG_FCOE_CAPABLE); +#endif + + /* enable spoof checking for all VFs */ for (i = 0; i < adapter->num_vfs; i++) adapter->vfinfo[i].spoofchk_enabled = true; return; @@ -171,7 +204,6 @@ err_novfs: void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - u32 gcr; u32 gpie; u32 vmdctl; int i; @@ -182,9 +214,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) #endif /* turn off device IOV mode */ - gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); - gcr &= ~(IXGBE_GCR_EXT_SRIOV); - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); gpie &= ~IXGBE_GPIE_VTMODE_MASK; IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); -- cgit v1.2.3 From fbe7ca7f9bb60fdec91cce6b52dd0c6dbac641f7 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 14 Jul 2012 05:42:36 +0000 Subject: ixgbe: Retire RSS enabled and capable flags All of our hardware supports RSS even if it is only for a single queue. So instead of toting around the RSS enable flag I am updating the code so that all devices are enabled and if we want to disable RSS it is indicated via the RSS mask. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 -- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 4 ---- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 10 +------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 29 ++++++------------------ 4 files changed, 8 insertions(+), 37 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 67743aa7acef..4ca10e6da482 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -446,8 +446,6 @@ struct ixgbe_adapter { #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) #define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) #define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) -#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16) -#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17) #define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 8e1be50af70a..4104ea25d818 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2245,10 +2245,6 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, { cmd->data = 0; - /* if RSS is disabled then report no hashing */ - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - return 0; - /* Report default options for RSS on ixgbe */ switch (cmd->flow_type) { case TCP_V4_FLOW: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 676e93f344f3..38d1b65777ad 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -265,9 +265,6 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) { int i; - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - return false; - for (i = 0; i < adapter->num_rx_queues; i++) adapter->rx_ring[i]->reg_idx = i; for (i = 0; i < adapter->num_tx_queues; i++) @@ -602,11 +599,6 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) struct ixgbe_ring_feature *f; u16 rss_i; - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - return false; - } - /* set mask for 16 queue limit of RSS */ f = &adapter->ring_feature[RING_F_RSS]; rss_i = f->limit; @@ -1062,7 +1054,6 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) } adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { e_err(probe, "ATR is not supported while multiple " @@ -1073,6 +1064,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ixgbe_disable_sriov(adapter); + adapter->ring_feature[RING_F_RSS].limit = 1; ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 454e556307f2..a3dc9657f572 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2891,9 +2891,6 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) int i, j; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - rss_i = 1; - /* * Program table for at least 2 queues w/ SR-IOV so that VFs can * make full use of any rings they may have. We will use the @@ -2923,7 +2920,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); if (adapter->hw.mac.type == ixgbe_mac_82598EB) { - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) + if (adapter->ring_feature[RING_F_RSS].mask) mrqc = IXGBE_MRQC_RSSEN; } else { u8 tcs = netdev_get_num_tc(adapter->netdev); @@ -3102,6 +3099,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + int rss_i = adapter->ring_feature[RING_F_RSS].indices; int p; /* PSRTYPE must be initialized in non 82598 adapters */ @@ -3114,13 +3112,10 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82598EB) return; - if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { - int rss_i = adapter->ring_feature[RING_F_RSS].indices; - if (rss_i > 3) - psrtype |= 2 << 29; - else if (rss_i > 1) - psrtype |= 1 << 29; - } + if (rss_i > 3) + psrtype |= 2 << 29; + else if (rss_i > 1) + psrtype |= 1 << 29; for (p = 0; p < adapter->num_rx_pools; p++) IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), @@ -4408,7 +4403,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) /* Set capability flags */ rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); adapter->ring_feature[RING_F_RSS].limit = rss; - adapter->flags |= IXGBE_FLAG_RSS_ENABLED; switch (hw->mac.type) { case ixgbe_mac_82598EB: if (hw->device_id == IXGBE_DEV_ID_82598AT) @@ -6756,10 +6750,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); - /* return error if RXHASH is being enabled when RSS is not supported */ - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) - features &= ~NETIF_F_RXHASH; - /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ if (!(features & NETIF_F_RXCSUM)) features &= ~NETIF_F_LRO; @@ -6802,7 +6792,7 @@ static int ixgbe_set_features(struct net_device *netdev, if (!(features & NETIF_F_NTUPLE)) { if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { /* turn off Flow Director, set ATR and reset */ - if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; need_reset = true; @@ -7294,11 +7284,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (err) goto err_sw_init; - if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) { - netdev->hw_features &= ~NETIF_F_RXHASH; - netdev->features &= ~NETIF_F_RXHASH; - } - /* WOL not supported for all devices */ adapter->wol = 0; hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); -- cgit v1.2.3 From a16a0d2fb8e0726f5d92572cc6ba5ba52f5cdcc1 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 19 May 2012 01:10:50 +0000 Subject: ixgbe: Cleanup holes in flags after removing several of them This change is just meant to defragment the flags as there are several hole that have been introduced since several features, or the flags for them, have been removed. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 50 ++++++++++++++++---------------- 1 file changed, 25 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4ca10e6da482..f7f6fe2255da 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -433,33 +433,33 @@ struct ixgbe_adapter { * thus the additional *_CAPABLE flags. */ u32 flags; -#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1) -#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2) -#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3) -#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4) -#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6) -#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7) -#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8) -#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9) -#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11) -#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12) -#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13) -#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14) -#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18) -#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19) -#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20) -#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22) -#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23) -#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24) -#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25) -#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26) -#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27) -#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28) -#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29) +#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) +#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) +#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) +#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7) +#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) +#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) +#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11) +#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12) +#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13) +#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14) +#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15) +#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16) +#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17) +#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18) +#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19) +#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20) +#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) +#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) +#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) u32 flags2; -#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1) +#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) #define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) -- cgit v1.2.3 From 8b0d2f9ed3d8e92feada7c5d70fa85be46e6f948 Mon Sep 17 00:00:00 2001 From: Bjørn Mork Date: Thu, 19 Jul 2012 06:28:40 +0000 Subject: net: e100: ucode is optional in some cases MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 9ac32e1b firmware: convert e100 driver to request_firmware() did a straight conversion of the in-driver ucode to external files. This introduced the possibility of the driver failing to enable an interface due to missing ucode. There was no evaluation of the importance of the ucode at the time. Based on comments in earlier versions of this driver, and in the source code for the FreeBSD fxp driver, we can assume that the ucode implements the "CPU Cycle Saver" feature on supported adapters. Although generally wanted, this is an optional feature. The ucode source is not available, preventing it from being included in free distributions. This creates unnecessary problems for the end users. Doing a network install based on a free distribution installer requires the user to download and insert the ucode into the installer. Making the ucode optional when possible improves the user experience and driver usability. The ucode for some adapters include a bugfix, making it essential. We continue to fail for these adapters unless the ucode is available. Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e100.c | 40 ++++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index ada720b42ff6..535f94fac4a1 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1249,20 +1249,35 @@ static const struct firmware *e100_request_firmware(struct nic *nic) const struct firmware *fw = nic->fw; u8 timer, bundle, min_size; int err = 0; + bool required = false; /* do not load u-code for ICH devices */ if (nic->flags & ich) return NULL; - /* Search for ucode match against h/w revision */ - if (nic->mac == mac_82559_D101M) + /* Search for ucode match against h/w revision + * + * Based on comments in the source code for the FreeBSD fxp + * driver, the FIRMWARE_D102E ucode includes both CPUSaver and + * + * "fixes for bugs in the B-step hardware (specifically, bugs + * with Inline Receive)." + * + * So we must fail if it cannot be loaded. + * + * The other microcode files are only required for the optional + * CPUSaver feature. Nice to have, but no reason to fail. + */ + if (nic->mac == mac_82559_D101M) { fw_name = FIRMWARE_D101M; - else if (nic->mac == mac_82559_D101S) + } else if (nic->mac == mac_82559_D101S) { fw_name = FIRMWARE_D101S; - else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) + } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) { fw_name = FIRMWARE_D102E; - else /* No ucode on other devices */ + required = true; + } else { /* No ucode on other devices */ return NULL; + } /* If the firmware has not previously been loaded, request a pointer * to it. If it was previously loaded, we are reinitializing the @@ -1273,10 +1288,17 @@ static const struct firmware *e100_request_firmware(struct nic *nic) err = request_firmware(&fw, fw_name, &nic->pdev->dev); if (err) { - netif_err(nic, probe, nic->netdev, - "Failed to load firmware \"%s\": %d\n", - fw_name, err); - return ERR_PTR(err); + if (required) { + netif_err(nic, probe, nic->netdev, + "Failed to load firmware \"%s\": %d\n", + fw_name, err); + return ERR_PTR(err); + } else { + netif_info(nic, probe, nic->netdev, + "CPUSaver disabled. Needs \"%s\": %d\n", + fw_name, err); + return NULL; + } } /* Firmware should be precisely UCODE_SIZE (words) plus three bytes -- cgit v1.2.3 From 1d9c0bfd0f0470c8fb0033999f623d4eec7b1a2c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:32:21 +0000 Subject: ixgbe: Use VMDq offset to indicate the default pool This change makes it so that we can use the VMDq ring feature offset value to determine the default pool instead of using num_vfs. The reason for this change is to avoid issues should we fail to allocate vfinfo but have pre-existing VFs. What should happen in this case is that num_vfs will go to 0, but the VMDq offset will contain the location of the first PF pool. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 33 ++++++++++++-------------- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 5 ++++ 3 files changed, 21 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index f7f6fe2255da..c2365005b545 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -113,7 +113,7 @@ #define IXGBE_MAX_VFTA_ENTRIES 128 #define MAX_EMULATION_MAC_ADDRS 16 #define IXGBE_MAX_PF_MACVLANS 15 -#define VMDQ_P(p) ((p) + adapter->num_vfs) +#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) #define IXGBE_82599_VF_DEVICE_ID 0x10ED #define IXGBE_X540_VF_DEVICE_ID 0x1515 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a3dc9657f572..f110e8868bc6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3118,7 +3118,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) psrtype |= 1 << 29; for (p = 0; p < adapter->num_rx_pools; p++) - IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p), + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype); } @@ -3135,12 +3135,12 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; - vmdctl |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT); + vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; vmdctl |= IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); - vf_shift = adapter->num_vfs % 32; - reg_offset = (adapter->num_vfs >= 32) ? 1 : 0; + vf_shift = VMDQ_P(0) % 32; + reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; /* Enable only the PF's pool for Tx/Rx */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); @@ -3150,7 +3150,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ - hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs); + hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); /* * Set up VF register offsets for selected VT Mode, @@ -3310,10 +3310,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int pool_ndx = adapter->num_vfs; /* add VID to filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true); + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true); set_bit(vid, adapter->active_vlans); return 0; @@ -3323,10 +3322,9 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - int pool_ndx = adapter->num_vfs; /* remove VID from filter table */ - hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); + hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false); clear_bit(vid, adapter->active_vlans); return 0; @@ -3444,7 +3442,6 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - unsigned int vfn = adapter->num_vfs; unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; int count = 0; @@ -3462,7 +3459,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev) if (!rar_entries) break; hw->mac.ops.set_rar(hw, rar_entries--, ha->addr, - vfn, IXGBE_RAH_AV); + VMDQ_P(0), IXGBE_RAH_AV); count++; } } @@ -3536,12 +3533,14 @@ void ixgbe_set_rx_mode(struct net_device *netdev) vmolr |= IXGBE_VMOLR_ROPE; } - if (adapter->num_vfs) { + if (adapter->num_vfs) ixgbe_restore_vf_multicasts(adapter); - vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) & + + if (hw->mac.type != ixgbe_mac_82598EB) { + vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) & ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE); - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr); + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); } /* This is useful for sniffing bad packets. */ @@ -4120,8 +4119,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); /* reprogram the RAR[0] in case user changed it. */ - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, - IXGBE_RAH_AV); + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); } /** @@ -6445,8 +6443,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); - hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs, - IXGBE_RAH_AV); + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d2854434ad12..c7d831d6e212 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -225,6 +225,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); IXGBE_WRITE_FLUSH(hw); + /* Disable VMDq flag so device will be set in VM mode */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; + adapter->ring_feature[RING_F_VMDQ].offset = 0; + /* take a breather then clean up driver data */ msleep(100); -- cgit v1.2.3 From d773d1310625be3b040b436178ad59a0af8888f1 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:32:26 +0000 Subject: ixgbe: Fix memory leak when SR-IOV VFs are direct assigned The VF driver had a memory leak that would occur if VFs were assigned to a guest. The amount of leak would vary with the number of VFs but could max out at about 14K per PF. To reproduce the leak all you would need to do is enable all the VFs on the first PF. Then start a loop of loading and unloading the driver with max_vfs=63 for the first port. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index c7d831d6e212..4f22668b6aea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -208,6 +208,17 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) u32 vmdctl; int i; + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; + + /* free macvlan list */ + kfree(adapter->mv_list); + adapter->mv_list = NULL; + #ifdef CONFIG_PCI_IOV /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); @@ -238,11 +249,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) if (adapter->vfinfo[i].vfdev) pci_dev_put(adapter->vfinfo[i].vfdev); } - kfree(adapter->vfinfo); - kfree(adapter->mv_list); - adapter->vfinfo = NULL; - adapter->num_vfs = 0; adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; } -- cgit v1.2.3 From 1bf91cdc1bba94ea062a9147d924815c13f029f2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:32:32 +0000 Subject: ixgbe: Drop references to deprecated pci_ DMA api and instead use dma_ API The networking side of the code had already been updated to use dma_ calls instead of the old pci_ calls. However it looks like the FCoE code was never updated. This change goes through and moves everything from the pci APIs to the dma APIs. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 30 +++++++++++++-------------- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h | 4 ++-- 2 files changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index cc28c44a048c..9b0909ffd7a6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -104,10 +104,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) udelay(100); } if (ddp->sgl) - pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, + dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); if (ddp->pool) { - pci_pool_free(ddp->pool, ddp->udl, ddp->udp); + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); ddp->pool = NULL; } @@ -144,7 +144,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, unsigned int thislen = 0; u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; dma_addr_t addr = 0; - struct pci_pool *pool; + struct dma_pool *pool; unsigned int cpu; if (!netdev || !sgl) @@ -176,7 +176,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, ixgbe_fcoe_clear_ddp(ddp); /* setup dma from scsi command sgl */ - dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); if (dmacount == 0) { e_err(drv, "xid 0x%x DMA map error\n", xid); return 0; @@ -185,7 +185,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, /* alloc the udl from per cpu ddp pool */ cpu = get_cpu(); pool = *per_cpu_ptr(fcoe->pool, cpu); - ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); + ddp->udl = dma_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; @@ -293,11 +293,11 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 1; out_noddp_free: - pci_pool_free(pool, ddp->udl, ddp->udp); + dma_pool_free(pool, ddp->udl, ddp->udp); ixgbe_fcoe_clear_ddp(ddp); out_noddp_unmap: - pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); + dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); put_cpu(); return 0; } @@ -409,7 +409,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, break; /* unmap the sg list when FCPRSP is received */ case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): - pci_unmap_sg(adapter->pdev, ddp->sgl, + dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); ddp->err = ddp_err; ddp->sgl = NULL; @@ -566,12 +566,12 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) { unsigned int cpu; - struct pci_pool **pool; + struct dma_pool **pool; for_each_possible_cpu(cpu) { pool = per_cpu_ptr(fcoe->pool, cpu); if (*pool) - pci_pool_destroy(*pool); + dma_pool_destroy(*pool); } free_percpu(fcoe->pool); fcoe->pool = NULL; @@ -581,10 +581,10 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; unsigned int cpu; - struct pci_pool **pool; + struct dma_pool **pool; char pool_name[32]; - fcoe->pool = alloc_percpu(struct pci_pool *); + fcoe->pool = alloc_percpu(struct dma_pool *); if (!fcoe->pool) return; @@ -592,9 +592,9 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) for_each_possible_cpu(cpu) { snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); pool = per_cpu_ptr(fcoe->pool, cpu); - *pool = pci_pool_create(pool_name, - adapter->pdev, IXGBE_FCPTR_MAX, - IXGBE_FCPTR_ALIGN, PAGE_SIZE); + *pool = dma_pool_create(pool_name, &adapter->pdev->dev, + IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN, + PAGE_SIZE); if (!*pool) { e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); ixgbe_fcoe_ddp_pools_free(fcoe); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 1dbed17c8107..0ef231a4579f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -62,11 +62,11 @@ struct ixgbe_fcoe_ddp { struct scatterlist *sgl; dma_addr_t udp; u64 *udl; - struct pci_pool *pool; + struct dma_pool *pool; }; struct ixgbe_fcoe { - struct pci_pool **pool; + struct dma_pool **pool; atomic_t refcnt; spinlock_t lock; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; -- cgit v1.2.3 From 81faddefc7da7410059c036d8a5cea442c929d0a Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:32:37 +0000 Subject: ixgbe: Cleanup configuration of FCoE registers This change makes it so we always use the FCoE redirection table. We just set all 8 entries to the same value in the case of only having one queue for FCoE. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 58 ++++++++++++++------------- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + 2 files changed, 32 insertions(+), 27 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 9b0909ffd7a6..a994570eb906 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -618,6 +618,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; unsigned int cpu; + u32 etqf; if (!fcoe->pool) { spin_lock_init(&fcoe->lock); @@ -665,40 +666,43 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) } } - /* Enable L2 eth type filter for FCoE */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), - (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); - /* Enable L2 eth type filter for FIP */ - IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), - (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); - if (adapter->ring_feature[RING_F_FCOE].indices) { - /* Use multiple rx queues for FCoE by redirection table */ - for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { - fcoe_i = f->offset + i % f->indices; - fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); - } - IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); - } else { - /* Use single rx queue for FCoE */ - fcoe_i = f->offset; + /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */ + etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + etqf |= IXGBE_ETQF_POOL_ENABLE; + etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + + /* Use one or more Rx queues for FCoE by redirection table */ + for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { + fcoe_i = f->offset + (i % f->indices); + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; - IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); - IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), - IXGBE_ETQS_QUEUE_EN | - (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); + IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); } - /* send FIP frames to the first FCoE queue */ - fcoe_i = f->offset; - fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); + + /* Enable L2 EtherType filter for FIP */ + etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + etqf |= IXGBE_ETQF_POOL_ENABLE; + etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); + + /* Send FIP frames to the first FCoE queue */ + fcoe_q = adapter->rx_ring[f->offset]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), IXGBE_ETQS_QUEUE_EN | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); - IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO | + /* Configure FCoE Rx control */ + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, + IXGBE_FCRXCTRL_FCCRCBO | (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); + return; out_pcpu_noddp_extra_buff_alloc_fail: free_percpu(fcoe->pcpu_noddp); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 7416d22ec227..482a33b0eb46 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -1452,6 +1452,7 @@ enum { #define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ #define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ #define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ +#define IXGBE_ETQF_POOL_SHIFT 20 #define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ #define IXGBE_ETQS_RX_QUEUE_SHIFT 16 -- cgit v1.2.3 From 5a1ee2704bff078bd58abde38266caa10fbcd714 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 17:14:28 +0000 Subject: ixgbe: Merge all FCoE percpu values into a single structure This change merges the 2 statistics values for noddp and noddp_ext_buff and the dma_pool into a single structure that can be allocated per CPU. The advantages to this are several fold. First we only need to do one alloc_percpu call now instead of 3, so that means less overhead for handling memory allocation failures. Secondly in the case of ixgbe_fcoe_ddp_setup we only need to call get_cpu once which makes things a bit cleaner since we can drop a put_cpu() from the exception path. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 138 +++++++++++++------------- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h | 11 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 23 ++--- 3 files changed, 86 insertions(+), 86 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index a994570eb906..e7c463c7b6a1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, struct ixgbe_hw *hw; struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_fcoe_ddp_pool *ddp_pool; struct scatterlist *sg; unsigned int i, j, dmacount; unsigned int len; @@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, unsigned int thislen = 0; u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; dma_addr_t addr = 0; - struct dma_pool *pool; - unsigned int cpu; if (!netdev || !sgl) return 0; @@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 0; fcoe = &adapter->fcoe; - if (!fcoe->pool) { - e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); - return 0; - } - ddp = &fcoe->ddp[xid]; if (ddp->sgl) { e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", @@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, } ixgbe_fcoe_clear_ddp(ddp); + + if (!fcoe->ddp_pool) { + e_warn(drv, "No ddp_pool resources allocated\n"); + return 0; + } + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); + if (!ddp_pool->pool) { + e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); + goto out_noddp; + } + /* setup dma from scsi command sgl */ dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); if (dmacount == 0) { e_err(drv, "xid 0x%x DMA map error\n", xid); - return 0; + goto out_noddp; } /* alloc the udl from per cpu ddp pool */ - cpu = get_cpu(); - pool = *per_cpu_ptr(fcoe->pool, cpu); - ddp->udl = dma_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) { e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap; } - ddp->pool = pool; + ddp->pool = ddp_pool->pool; ddp->sgl = sgl; ddp->sgc = sgc; @@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, while (len) { /* max number of buffers allowed in one DDP context */ if (j >= IXGBE_BUFFCNT_MAX) { - *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1; + ddp_pool->noddp++; goto out_noddp_free; } @@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, */ if (lastsize == bufflen) { if (j >= IXGBE_BUFFCNT_MAX) { - *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1; + ddp_pool->noddp_ext_buff++; goto out_noddp_free; } @@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, return 1; out_noddp_free: - dma_pool_free(pool, ddp->udl, ddp->udp); + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); ixgbe_fcoe_clear_ddp(ddp); out_noddp_unmap: dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); +out_noddp: put_cpu(); return 0; } @@ -563,44 +568,63 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, return 0; } +static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) +{ + struct ixgbe_fcoe_ddp_pool *ddp_pool; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + if (ddp_pool->pool) + dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; +} + static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) { unsigned int cpu; - struct dma_pool **pool; - for_each_possible_cpu(cpu) { - pool = per_cpu_ptr(fcoe->pool, cpu); - if (*pool) - dma_pool_destroy(*pool); - } - free_percpu(fcoe->pool); - fcoe->pool = NULL; + for_each_possible_cpu(cpu) + ixgbe_fcoe_dma_pool_free(fcoe, cpu); + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; +} + +static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, + struct device *dev, + unsigned int cpu) +{ + struct ixgbe_fcoe_ddp_pool *ddp_pool; + struct dma_pool *pool; + char pool_name[32]; + + snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); + + pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, + IXGBE_FCPTR_ALIGN, PAGE_SIZE); + if (!pool) + return -ENOMEM; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + ddp_pool->pool = pool; + ddp_pool->noddp = 0; + ddp_pool->noddp_ext_buff = 0; + + return 0; } static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct device *dev = &adapter->pdev->dev; unsigned int cpu; - struct dma_pool **pool; - char pool_name[32]; - fcoe->pool = alloc_percpu(struct dma_pool *); - if (!fcoe->pool) + fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); + if (!fcoe->ddp_pool) return; /* allocate pci pool for each cpu */ - for_each_possible_cpu(cpu) { - snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); - pool = per_cpu_ptr(fcoe->pool, cpu); - *pool = dma_pool_create(pool_name, &adapter->pdev->dev, - IXGBE_FCPTR_MAX, IXGBE_FCPTR_ALIGN, - PAGE_SIZE); - if (!*pool) { - e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); - ixgbe_fcoe_ddp_pools_free(fcoe); - return; - } - } + for_each_possible_cpu(cpu) + ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); } /** @@ -617,14 +641,13 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_fcoe *fcoe = &adapter->fcoe; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - unsigned int cpu; u32 etqf; - if (!fcoe->pool) { + if (!fcoe->ddp_pool) { spin_lock_init(&fcoe->lock); ixgbe_fcoe_ddp_pools_alloc(adapter); - if (!fcoe->pool) { + if (!fcoe->ddp_pool) { e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); return; } @@ -646,24 +669,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) e_err(drv, "failed to map extra DDP buffer\n"); goto out_extra_ddp_buffer; } - - /* Alloc per cpu mem to count the ddp alloc failure number */ - fcoe->pcpu_noddp = alloc_percpu(u64); - if (!fcoe->pcpu_noddp) { - e_err(drv, "failed to alloc noddp counter\n"); - goto out_pcpu_noddp_alloc_fail; - } - - fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64); - if (!fcoe->pcpu_noddp_ext_buff) { - e_err(drv, "failed to alloc noddp extra buff cnt\n"); - goto out_pcpu_noddp_extra_buff_alloc_fail; - } - - for_each_possible_cpu(cpu) { - *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0; - *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0; - } } /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */ @@ -704,13 +709,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); return; -out_pcpu_noddp_extra_buff_alloc_fail: - free_percpu(fcoe->pcpu_noddp); -out_pcpu_noddp_alloc_fail: - dma_unmap_single(&adapter->pdev->dev, - fcoe->extra_ddp_buffer_dma, - IXGBE_FCBUFF_MIN, - DMA_FROM_DEVICE); out_extra_ddp_buffer: kfree(fcoe->extra_ddp_buffer); out_ddp_pools: @@ -730,18 +728,18 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) int i; struct ixgbe_fcoe *fcoe = &adapter->fcoe; - if (!fcoe->pool) + if (!fcoe->ddp_pool) return; for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); + dma_unmap_single(&adapter->pdev->dev, fcoe->extra_ddp_buffer_dma, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); - free_percpu(fcoe->pcpu_noddp); - free_percpu(fcoe->pcpu_noddp_ext_buff); kfree(fcoe->extra_ddp_buffer); + ixgbe_fcoe_ddp_pools_free(fcoe); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 0ef231a4579f..5d028739fe3f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -65,16 +65,21 @@ struct ixgbe_fcoe_ddp { struct dma_pool *pool; }; +/* per cpu variables */ +struct ixgbe_fcoe_ddp_pool { + struct dma_pool *pool; + u64 noddp; + u64 noddp_ext_buff; +}; + struct ixgbe_fcoe { - struct dma_pool **pool; + struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; atomic_t refcnt; spinlock_t lock; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; unsigned char *extra_ddp_buffer; dma_addr_t extra_ddp_buffer_dma; unsigned long mode; - u64 __percpu *pcpu_noddp; - u64 __percpu *pcpu_noddp_ext_buff; #ifdef CONFIG_IXGBE_DCB u8 up; #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f110e8868bc6..c66625945534 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -5052,11 +5052,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; -#ifdef IXGBE_FCOE - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - unsigned int cpu; - u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0; -#endif /* IXGBE_FCOE */ if (test_bit(__IXGBE_DOWN, &adapter->state) || test_bit(__IXGBE_RESETTING, &adapter->state)) @@ -5187,17 +5182,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); /* Add up per cpu counters for total ddp aloc fail */ - if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) { + if (adapter->fcoe.ddp_pool) { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct ixgbe_fcoe_ddp_pool *ddp_pool; + unsigned int cpu; + u64 noddp = 0, noddp_ext_buff = 0; for_each_possible_cpu(cpu) { - fcoe_noddp_counts_sum += - *per_cpu_ptr(fcoe->pcpu_noddp, cpu); - fcoe_noddp_ext_buff_counts_sum += - *per_cpu_ptr(fcoe-> - pcpu_noddp_ext_buff, cpu); + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); + noddp += ddp_pool->noddp; + noddp_ext_buff += ddp_pool->noddp_ext_buff; } + hwstats->fcoe_noddp = noddp; + hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; } - hwstats->fcoe_noddp = fcoe_noddp_counts_sum; - hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum; #endif /* IXGBE_FCOE */ break; default: -- cgit v1.2.3 From 7c8ae65a6248518b2775a03129424a7e08fd058a Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:32:47 +0000 Subject: ixgbe: Make FCoE allocation and configuration closer to how rings work This patch changes the behavior of the FCoE configuration so that it is much closer to how the main body of the ixgbe driver works for ring allocation. The first piece is the ixgbe_fcoe_ddp_enable/disable calls. These allocate the percpu values and if successful set the fcoe_ddp_xid value indicating that we can support DDP. The next piece is the ixgbe_setup/free_ddp_resources calls. These are called on open/close and will allocate and free the DMA pools. Finally ixgbe_configure_fcoe is now just register configuration. It can go through and enable the registers for the FCoE redirection offload, and FIP configuration without any interference from the DDP pool allocation. The net result of all this is two fold. First it adds a certain amount of exception handling. So for example if ixgbe_setup_fcoe_resources fails we will actually generate an error in open and refuse to bring up the interface. Secondly it provides a much more graceful failure case than the previous model which would skip setting up the registers for FCoE on failure to allocate DDP resources leaving no Rx functionality enabled instead of just disabling DDP. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 3 +- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 228 ++++++++++++++------------ drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 43 +++-- 4 files changed, 154 insertions(+), 122 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index c2365005b545..5a286adc65c0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -691,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, u8 *hdr_len); -extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); @@ -700,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsigned int sgc); extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); +extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); +extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); extern int ixgbe_fcoe_enable(struct net_device *netdev); extern int ixgbe_fcoe_disable(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index e7c463c7b6a1..e79ba3927344 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -578,17 +578,6 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) ddp_pool->pool = NULL; } -static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) - ixgbe_fcoe_dma_pool_free(fcoe, cpu); - - free_percpu(fcoe->ddp_pool); - fcoe->ddp_pool = NULL; -} - static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, struct device *dev, unsigned int cpu) @@ -612,21 +601,6 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, return 0; } -static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) -{ - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct device *dev = &adapter->pdev->dev; - unsigned int cpu; - - fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); - if (!fcoe->ddp_pool) - return; - - /* allocate pci pool for each cpu */ - for_each_possible_cpu(cpu) - ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); -} - /** * ixgbe_configure_fcoe - configures registers for fcoe at start * @adapter: ptr to ixgbe adapter @@ -637,39 +611,14 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) */ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) { - int i, fcoe_q, fcoe_i; + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; + int i, fcoe_q, fcoe_i; u32 etqf; - if (!fcoe->ddp_pool) { - spin_lock_init(&fcoe->lock); - - ixgbe_fcoe_ddp_pools_alloc(adapter); - if (!fcoe->ddp_pool) { - e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); - return; - } - - /* Extra buffer to be shared by all DDPs for HW work around */ - fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); - if (fcoe->extra_ddp_buffer == NULL) { - e_err(drv, "failed to allocated extra DDP buffer\n"); - goto out_ddp_pools; - } - - fcoe->extra_ddp_buffer_dma = - dma_map_single(&adapter->pdev->dev, - fcoe->extra_ddp_buffer, - IXGBE_FCBUFF_MIN, - DMA_FROM_DEVICE); - if (dma_mapping_error(&adapter->pdev->dev, - fcoe->extra_ddp_buffer_dma)) { - e_err(drv, "failed to map extra DDP buffer\n"); - goto out_extra_ddp_buffer; - } - } + /* leave registers unconfigued if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return; /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */ etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; @@ -682,7 +631,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) /* Use one or more Rx queues for FCoE by redirection table */ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { - fcoe_i = f->offset + (i % f->indices); + fcoe_i = fcoe->offset + (i % fcoe->indices); fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); @@ -698,7 +647,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf); /* Send FIP frames to the first FCoE queue */ - fcoe_q = adapter->rx_ring[f->offset]->reg_idx; + fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx; IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), IXGBE_ETQS_QUEUE_EN | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); @@ -707,40 +656,122 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO | (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); - - return; -out_extra_ddp_buffer: - kfree(fcoe->extra_ddp_buffer); -out_ddp_pools: - ixgbe_fcoe_ddp_pools_free(fcoe); } /** - * ixgbe_cleanup_fcoe - release all fcoe ddp context resources + * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources * @adapter : ixgbe adapter * * Cleans up outstanding ddp context resources * * Returns : none */ -void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) +void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { - int i; struct ixgbe_fcoe *fcoe = &adapter->fcoe; + int cpu, i; + /* do nothing if no DDP pools were allocated */ if (!fcoe->ddp_pool) return; for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); + for_each_possible_cpu(cpu) + ixgbe_fcoe_dma_pool_free(fcoe, cpu); + dma_unmap_single(&adapter->pdev->dev, fcoe->extra_ddp_buffer_dma, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); kfree(fcoe->extra_ddp_buffer); - ixgbe_fcoe_ddp_pools_free(fcoe); + fcoe->extra_ddp_buffer = NULL; + fcoe->extra_ddp_buffer_dma = 0; +} + +/** + * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources + * @adapter: ixgbe adapter + * + * Sets up ddp context resouces + * + * Returns : 0 indicates success or -EINVAL on failure + */ +int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct device *dev = &adapter->pdev->dev; + void *buffer; + dma_addr_t dma; + unsigned int cpu; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return 0; + + /* Extra buffer to be shared by all DDPs for HW work around */ + buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); + if (!buffer) { + e_err(drv, "failed to allocate extra DDP buffer\n"); + return -ENOMEM; + } + + dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) { + e_err(drv, "failed to map extra DDP buffer\n"); + kfree(buffer); + return -ENOMEM; + } + + fcoe->extra_ddp_buffer = buffer; + fcoe->extra_ddp_buffer_dma = dma; + + /* allocate pci pool for each cpu */ + for_each_possible_cpu(cpu) { + int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu); + if (!err) + continue; + + e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); + ixgbe_free_fcoe_ddp_resources(adapter); + return -ENOMEM; + } + + return 0; +} + +static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) + return -EINVAL; + + fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool); + + if (!fcoe->ddp_pool) { + e_err(drv, "failed to allocate percpu DDP resources\n"); + return -ENOMEM; + } + + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + + return 0; +} + +static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) +{ + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + + adapter->netdev->fcoe_ddp_xid = 0; + + if (!fcoe->ddp_pool) + return; + + free_percpu(fcoe->ddp_pool); + fcoe->ddp_pool = NULL; } /** @@ -753,40 +784,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) */ int ixgbe_fcoe_enable(struct net_device *netdev) { - int rc = -EINVAL; struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_fcoe *fcoe = &adapter->fcoe; + atomic_inc(&fcoe->refcnt); if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) - goto out_enable; + return -EINVAL; - atomic_inc(&fcoe->refcnt); if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - goto out_enable; + return -EINVAL; e_info(drv, "Enabling FCoE offload features.\n"); if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); - ixgbe_clear_interrupt_scheme(adapter); + /* Allocate per CPU memory to track DDP pools */ + ixgbe_fcoe_ddp_enable(adapter); + /* enable FCoE and notify stack */ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; - netdev->features |= NETIF_F_FCOE_CRC; - netdev->features |= NETIF_F_FSO; - netdev->features |= NETIF_F_FCOE_MTU; - netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + netdev->features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU; + netdev_features_change(netdev); + /* release existing queues and reallocate them */ + ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); - netdev_features_change(netdev); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); - rc = 0; -out_enable: - return rc; + return 0; } /** @@ -799,41 +827,37 @@ out_enable: */ int ixgbe_fcoe_disable(struct net_device *netdev) { - int rc = -EINVAL; struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) - goto out_disable; + if (!atomic_dec_and_test(&adapter->fcoe.refcnt)) + return -EINVAL; if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) - goto out_disable; - - if (!atomic_dec_and_test(&fcoe->refcnt)) - goto out_disable; + return -EINVAL; e_info(drv, "Disabling FCoE offload features.\n"); - netdev->features &= ~NETIF_F_FCOE_CRC; - netdev->features &= ~NETIF_F_FSO; - netdev->features &= ~NETIF_F_FCOE_MTU; - netdev->fcoe_ddp_xid = 0; - netdev_features_change(netdev); - if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); - ixgbe_clear_interrupt_scheme(adapter); + /* Free per CPU memory to track DDP pools */ + ixgbe_fcoe_ddp_disable(adapter); + + /* disable FCoE and notify stack */ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - adapter->ring_feature[RING_F_FCOE].indices = 0; - ixgbe_cleanup_fcoe(adapter); + netdev->features &= ~(NETIF_F_FCOE_CRC | + NETIF_F_FSO | + NETIF_F_FCOE_MTU); + + netdev_features_change(netdev); + + /* release existing queues and reallocate them */ + ixgbe_clear_interrupt_scheme(adapter); ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); - rc = 0; -out_disable: - return rc; + return 0; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 5d028739fe3f..bf724da99375 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -77,7 +77,7 @@ struct ixgbe_fcoe { atomic_t refcnt; spinlock_t lock; struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; - unsigned char *extra_ddp_buffer; + void *extra_ddp_buffer; dma_addr_t extra_ddp_buffer_dma; unsigned long mode; #ifdef CONFIG_IXGBE_DCB diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c66625945534..e006c05580ec 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3807,12 +3807,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_set_rx_mode(adapter->netdev); ixgbe_restore_vlan(adapter); -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - ixgbe_configure_fcoe(adapter); - -#endif /* IXGBE_FCOE */ - switch (hw->mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: @@ -3842,6 +3836,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_virtualization(adapter); +#ifdef IXGBE_FCOE + /* configure FCoE L2 filters, redirection table, and Rx control */ + ixgbe_configure_fcoe(adapter); + +#endif /* IXGBE_FCOE */ ixgbe_configure_tx(adapter); ixgbe_configure_rx(adapter); } @@ -4434,6 +4433,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) break; } +#ifdef IXGBE_FCOE + /* FCoE support exists, always init the FCoE lock */ + spin_lock_init(&adapter->fcoe.lock); + +#endif /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock); @@ -4662,7 +4666,11 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) goto err_setup_rx; } - return 0; +#ifdef IXGBE_FCOE + err = ixgbe_setup_fcoe_ddp_resources(adapter); + if (!err) +#endif + return 0; err_setup_rx: /* rewind the index freeing the rings as we go */ while (i--) @@ -4741,6 +4749,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) { int i; +#ifdef IXGBE_FCOE + ixgbe_free_fcoe_ddp_resources(adapter); + +#endif for (i = 0; i < adapter->num_rx_queues; i++) if (adapter->rx_ring[i]->desc) ixgbe_free_rx_resources(adapter->rx_ring[i]); @@ -7235,11 +7247,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; } - } - if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { - netdev->vlan_features |= NETIF_F_FCOE_CRC; - netdev->vlan_features |= NETIF_F_FSO; - netdev->vlan_features |= NETIF_F_FCOE_MTU; + + adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; + + netdev->vlan_features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC | + NETIF_F_FCOE_MTU; } #endif /* IXGBE_FCOE */ if (pci_using_dac) { @@ -7436,12 +7449,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) ixgbe_sysfs_exit(adapter); #endif /* CONFIG_IXGBE_HWMON */ -#ifdef IXGBE_FCOE - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - ixgbe_cleanup_fcoe(adapter); - -#endif /* IXGBE_FCOE */ - /* remove the added san mac */ ixgbe_del_sanmac_netdev(netdev); -- cgit v1.2.3 From 7fa7c9dcadcff800a897232204e2128e92dd44cd Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:32:52 +0000 Subject: ixgbe: Correctly set SAN MAC RAR pool to default pool of PF This change corrects an issue in which an FCoE enabled adapter was always setting the FCoE SAN MAC MPSAR register to 0x1. This results in the first VF being assigned the SAN MAC address in the case of SR-IOV and as such is incorrect. To resolve this I am adding a new function that will update the SAN MAC pool address after reset. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 4 ++++ drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 25 +++++++++++++++++++++++++ drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 13 ++++++++++--- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 2 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 4 ++++ 6 files changed, 46 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e7dddfd97cb9..50fc137501da 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -1025,6 +1025,9 @@ mac_reset_top: hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } @@ -2106,6 +2109,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_generic, + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, .clear_vmdq = &ixgbe_clear_vmdq_generic, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index bb7fde45c057..bbe9d45c8436 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -2847,6 +2847,31 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) return 0; } +/** + * This function should only be involved in the IOV mode. + * In IOV mode, Default pool is next pool after the number of + * VFs advertized and not 0. + * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index] + * + * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) +{ + u32 rar = hw->mac.san_mac_rar_index; + + if (vmdq < 32) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + } else { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); + } + + return 0; +} + /** * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 6222fdb3d3f1..d813d1188c36 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -85,6 +85,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e006c05580ec..aa0155848d39 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4119,6 +4119,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) /* reprogram the RAR[0] in case user changed it. */ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV); + + /* update SAN MAC vmdq pool selection */ + if (hw->mac.san_mac_rar_index) + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); } /** @@ -6509,12 +6513,15 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev) { int err = 0; struct ixgbe_adapter *adapter = netdev_priv(dev); - struct ixgbe_mac_info *mac = &adapter->hw.mac; + struct ixgbe_hw *hw = &adapter->hw; - if (is_valid_ether_addr(mac->san_addr)) { + if (is_valid_ether_addr(hw->mac.san_addr)) { rtnl_lock(); - err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN); + err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); rtnl_unlock(); + + /* update SAN MAC vmdq pool selection */ + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); } return err; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 482a33b0eb46..d26e07a8140f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2847,6 +2847,7 @@ struct ixgbe_mac_operations { s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); s32 (*clear_rar)(struct ixgbe_hw *, u32); s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); s32 (*init_rx_addrs)(struct ixgbe_hw *); s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); @@ -2922,6 +2923,7 @@ struct ixgbe_mac_info { bool orig_link_settings_stored; bool autotry_restart; u8 flags; + u8 san_mac_rar_index; struct ixgbe_thermal_sensor_data thermal_sensor_data; }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index f90ec078ece2..de4da5219b71 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -156,6 +156,9 @@ mac_reset_top: hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } @@ -832,6 +835,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = { .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_generic, + .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, .clear_vmdq = &ixgbe_clear_vmdq_generic, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, -- cgit v1.2.3 From ef89e0a24ea97fc9209074a19cf60e63bba18c22 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 5 May 2012 05:32:58 +0000 Subject: ixgbe: Only enable anti-spoof on VF pools The current logic is enabling anti-spoof on all pools and then clearing anti-spoof on just the first PF pool. The correct approach is to only set anti-spoof on the VF pools and to leave all of the PF pools unchecked. This allows for items such as FCoE to use adjacent pools within the PF for transmit and receive queues without the traffic being blocked by this security feature. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index bbe9d45c8436..90e41db3cb69 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3225,20 +3225,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) * PFVFSPOOF register array is size 8 with 8 bits assigned to * MAC anti-spoof enables in each register array element. */ - for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + for (j = 0; j < pf_target_reg; j++) IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - /* If not enabling anti-spoofing then done */ - if (!enable) - return; - /* * The PF should be allowed to spoof so that it can support - * emulation mode NICs. Reset the bit assigned to the PF + * emulation mode NICs. Do not set the bits assigned to the PF + */ + pfvfspoof &= (1 << pf_target_shift) - 1; + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* + * Remaining pools belong to the PF so they do not need to have + * anti-spoofing enabled. */ - pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); - pfvfspoof ^= (1 << pf_target_shift); - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); + for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); } /** -- cgit v1.2.3 From a58915c7ecba89bef0914664ecf87c2156c68630 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 25 May 2012 06:38:18 +0000 Subject: ixgbe: Enable FCoE FSO and CRC offloads based on CAPABLE instead of ENABLED flag Instead of only setting the FCOE segmentation offload and CRC offload flags if we enable FCoE, we could just set them always since there are no modifications needed to the hardware or adapter FCoE structure in order to use these features. The advantage to this is that if FCoE enablement fails, for example because SR-IOV was enabled on 82599, we will still have use of the FCoE segmentation offload and Tx/Rx CRC offloads which should still help to improve the FCoE performance. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 16 +++++++++------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 5 ++++- 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index e79ba3927344..ae73ef14fdf3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -616,11 +616,11 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) int i, fcoe_q, fcoe_i; u32 etqf; - /* leave registers unconfigued if FCoE is disabled */ - if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + /* Minimal functionality for FCoE requires at least CRC offloads */ + if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) return; - /* Enable L2 EtherType filter for FCoE, necessary for FCoE Rx CRC */ + /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */ etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN; if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { etqf |= IXGBE_ETQF_POOL_ENABLE; @@ -629,6 +629,10 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + /* leave registers un-configured if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return; + /* Use one or more Rx queues for FCoE by redirection table */ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { fcoe_i = fcoe->offset + (i % fcoe->indices); @@ -804,7 +808,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev) /* enable FCoE and notify stack */ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; - netdev->features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU; + netdev->features |= NETIF_F_FCOE_MTU; netdev_features_change(netdev); /* release existing queues and reallocate them */ @@ -844,9 +848,7 @@ int ixgbe_fcoe_disable(struct net_device *netdev) /* disable FCoE and notify stack */ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; - netdev->features &= ~(NETIF_F_FCOE_CRC | - NETIF_F_FSO | - NETIF_F_FCOE_MTU); + netdev->features &= ~NETIF_F_FCOE_MTU; netdev_features_change(netdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index aa0155848d39..c521007c54b3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6382,7 +6382,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #ifdef IXGBE_FCOE /* setup tx offload for FCoE */ if ((protocol == __constant_htons(ETH_P_FCOE)) && - (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { + (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { tso = ixgbe_fso(tx_ring, first, &hdr_len); if (tso < 0) goto out_drop; @@ -7257,6 +7257,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE; + netdev->features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC; + netdev->vlan_features |= NETIF_F_FSO | NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU; -- cgit v1.2.3 From 61dc53341dd127372c71ee31b04ba0e8f70d3e8f Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Thu, 19 Jul 2012 21:02:08 +0000 Subject: ixgb: use PCI_VENDOR_ID_INTEL Use PCI_VENDOR_ID_INTEL from pci_ids.h instead of creating its own vendor ID #define. Signed-off-by: Jon Mason Cc: Jeff Kirsher Cc: Jesse Brandeburg Cc: Bruce Allan Cc: Carolyn Wyborny Cc: Don Skidmore Cc: Greg Rose Cc: Peter P Waskiewicz Jr Cc: Alex Duyck Cc: John Ronciak Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgb/ixgb_hw.c | 5 +++-- drivers/net/ethernet/intel/ixgb/ixgb_ids.h | 5 ----- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 10 +++++----- 3 files changed, 8 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c index 99b69adb4a0f..bf9a220f71fb 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c @@ -32,6 +32,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include "ixgb_hw.h" #include "ixgb_ids.h" @@ -96,7 +97,7 @@ static u32 ixgb_mac_reset(struct ixgb_hw *hw) ASSERT(!(ctrl_reg & IXGB_CTRL0_RST)); #endif - if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) { + if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) { ctrl_reg = /* Enable interrupt from XFP and SerDes */ IXGB_CTRL1_GPI0_EN | IXGB_CTRL1_SDP6_DIR | @@ -271,7 +272,7 @@ ixgb_identify_phy(struct ixgb_hw *hw) } /* update phy type for sun specific board */ - if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) + if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) phy_type = ixgb_phy_type_bcm; return phy_type; diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h index 2a58847f46e8..32c1b302d791 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h +++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h @@ -33,11 +33,6 @@ ** The Device and Vendor IDs for 10 Gigabit MACs **********************************************************************/ -#define INTEL_VENDOR_ID 0x8086 -#define INTEL_SUBVENDOR_ID 0x8086 -#define SUN_VENDOR_ID 0x108E -#define SUN_SUBVENDOR_ID 0x108E - #define IXGB_DEVICE_ID_82597EX 0x1048 #define IXGB_DEVICE_ID_82597EX_SR 0x1A48 #define IXGB_DEVICE_ID_82597EX_LR 0x1B48 diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index aab649f8c5f0..d05fc95befc5 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -54,13 +54,13 @@ MODULE_PARM_DESC(copybreak, * Class, Class Mask, private data (not used) } */ static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = { - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, + {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4, + {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, + {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR, + {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* required last entry */ @@ -195,7 +195,7 @@ ixgb_irq_enable(struct ixgb_adapter *adapter) { u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW | IXGB_INT_LSC; - if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID) + if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN) val |= IXGB_INT_GPI0; IXGB_WRITE_REG(&adapter->hw, IMS, val); IXGB_WRITE_FLUSH(&adapter->hw); -- cgit v1.2.3 From 36e90319f30e8d1f22cca9f3eb7d593b833ada17 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Thu, 19 Jul 2012 21:02:09 +0000 Subject: ixgbe: use PCI_VENDOR_ID_INTEL Use PCI_VENDOR_ID_INTEL from pci_ids.h instead of creating its own vendor ID #define. Signed-off-by: Jon Mason Cc: Jeff Kirsher Cc: Jesse Brandeburg Cc: Bruce Allan Cc: Carolyn Wyborny Cc: Don Skidmore Cc: Greg Rose Cc: Peter P Waskiewicz Jr Cc: Alex Duyck Cc: John Ronciak Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 8 ++++---- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 3 --- 3 files changed, 6 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index a3dc9657f572..2e4523c7ab9e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7551,11 +7551,11 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, } /* Find the pci device of the offending VF */ - vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); while (vfdev) { if (vfdev->devfn == (req_id & 0xFF)) break; - vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, vfdev); } /* diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d2854434ad12..089468224e7e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -65,13 +65,13 @@ static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter) } vf_devfn = pdev->devfn + 0x80; - pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); + pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); while (pvfdev) { if (pvfdev->devfn == vf_devfn && (pvfdev->bus->number >= pdev->bus->number)) vfs_found++; vf_devfn += 2; - pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, + pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, pvfdev); } @@ -518,11 +518,11 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) break; } - pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL); + pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); while (pvfdev) { if (pvfdev->devfn == thisvf_devfn) break; - pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, + pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, pvfdev); } if (pvfdev) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 7416d22ec227..a5ceea4d329a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -32,9 +32,6 @@ #include #include -/* Vendor ID */ -#define IXGBE_INTEL_VENDOR_ID 0x8086 - /* Device IDs */ #define IXGBE_DEV_ID_82598 0x10B6 #define IXGBE_DEV_ID_82598_BX 0x1508 -- cgit v1.2.3 From 99d744875d01d57d832b8dbfc36d9a1990d503b8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 9 May 2012 08:09:25 +0000 Subject: ixgbe: Drop probe_vf and merge functionality into ixgbe_enable_sriov This is meant to fix a bug in which we were not checking for pre-existing VFs if we were not setting the max_vfs value at driver load. What happens now is that we always call ixgbe_enable_sriov and this checks for pre-existing VFs ore requested VFs prior to deciding on no SR-IOV. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 3 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 33 ++++++++------------------ drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 24 +++++++++++++------ 3 files changed, 28 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 38d1b65777ad..3ff5aa801a6b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1061,8 +1061,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) } adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->atr_sample_rate = 0; - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - ixgbe_disable_sriov(adapter); + ixgbe_disable_sriov(adapter); adapter->ring_feature[RING_F_RSS].limit = 1; ixgbe_set_num_queues(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f4e53c1a7338..24f2b455a23f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -4490,6 +4490,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) hw->fc.send_xon = true; hw->fc.disable_fc_autoneg = false; +#ifdef CONFIG_PCI_IOV + /* assign number of SR-IOV VFs */ + if (hw->mac.type != ixgbe_mac_82598EB) + adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs; + +#endif /* enable itr by default in dynamic mode */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; @@ -6942,26 +6948,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_fdb_dump = ixgbe_ndo_fdb_dump, }; -static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, - const struct ixgbe_info *ii) -{ -#ifdef CONFIG_PCI_IOV - struct ixgbe_hw *hw = &adapter->hw; - - if (hw->mac.type == ixgbe_mac_82598EB) - return; - - /* The 82599 supports up to 64 VFs per physical function - * but this implementation limits allocation to 63 so that - * basic networking resources are still available to the - * physical function. If the user requests greater thn - * 63 VFs then it is an error - reset to default of zero. - */ - adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs; - ixgbe_enable_sriov(adapter, ii); -#endif /* CONFIG_PCI_IOV */ -} - /** * ixgbe_wol_supported - Check whether device supports WoL * @hw: hw specific details @@ -7206,8 +7192,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_sw_init; } - ixgbe_probe_vf(adapter, ii); +#ifdef CONFIG_PCI_IOV + ixgbe_enable_sriov(adapter, ii); +#endif netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -7411,8 +7399,7 @@ err_register: ixgbe_release_hw_control(adapter); ixgbe_clear_interrupt_scheme(adapter); err_sw_init: - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - ixgbe_disable_sriov(adapter); + ixgbe_disable_sriov(adapter); adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; iounmap(hw->hw_addr); err_ioremap: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index a825d4808cd2..593fdd54f9ab 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -82,7 +82,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, const struct ixgbe_info *ii) { struct ixgbe_hw *hw = &adapter->hw; - int err = 0; int num_vf_macvlans, i; struct vf_macvlans *mv_list; int pre_existing_vfs = 0; @@ -106,10 +105,21 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, "enabled for this device - Please reload all " "VF drivers to avoid spoofed packet errors\n"); } else { + int err; + /* + * The 82599 supports up to 64 VFs per physical function + * but this implementation limits allocation to 63 so that + * basic networking resources are still available to the + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ + adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63); + err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); if (err) { e_err(probe, "Failed to enable PCI sriov: %d\n", err); - goto err_novfs; + adapter->num_vfs = 0; + return; } } @@ -193,11 +203,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, /* Oh oh */ e_err(probe, "Unable to allocate memory for VF Data Storage - " "SRIOV disabled\n"); - pci_disable_sriov(adapter->pdev); - -err_novfs: - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; - adapter->num_vfs = 0; + ixgbe_disable_sriov(adapter); } #endif /* #ifdef CONFIG_PCI_IOV */ @@ -219,6 +225,10 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) kfree(adapter->mv_list); adapter->mv_list = NULL; + /* if SR-IOV is already disabled then there is nothing to do */ + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return; + #ifdef CONFIG_PCI_IOV /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); -- cgit v1.2.3 From 9297127b9cdd8d30c829ef5fd28b7cc0323a7bcd Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 23 May 2012 02:58:40 +0000 Subject: ixgbe: Change how we check for pre-existing and assigned VFs This patch does two things. First it drops the unnecessary work of searching for enabled VFs when we first bring up the adapter and instead just uses pci_num_vf to determine how many VFs are enabled on the adapter. The second thing it does is drop the use of vfdev from the vf_data_storage structure. Instead we just search the entire system for a VF that has us as it's PF, and then if that VF is assigned we indicate that the VFs are assigned. This allows us to still check for assigned VFs even if the vfinfo allocation has failed, or vfinfo has been freed. Signed-off-by: Alexander Duyck Acked-by: Greg Rose Tested-by: Phil Schmitt Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8 +- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 129 +++++++++---------------- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | 1 - 4 files changed, 45 insertions(+), 94 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 5a286adc65c0..eb5928228670 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -130,7 +130,6 @@ struct vf_data_storage { u16 tx_rate; u16 vlan_count; u8 spoofchk_enabled; - struct pci_dev *vfdev; }; struct vf_macvlans { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 24f2b455a23f..9c42679ad83e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7452,13 +7452,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - if (!(ixgbe_check_vf_assignment(adapter))) - ixgbe_disable_sriov(adapter); - else - e_dev_warn("Unloading driver while VFs are assigned " - "- VFs will not be deallocated\n"); - } + ixgbe_disable_sriov(adapter); ixgbe_clear_interrupt_scheme(adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 593fdd54f9ab..47b2ce740ec1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -44,40 +44,6 @@ #include "ixgbe_sriov.h" #ifdef CONFIG_PCI_IOV -static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter) -{ - struct pci_dev *pdev = adapter->pdev; - struct pci_dev *pvfdev; - u16 vf_devfn = 0; - int device_id; - int vfs_found = 0; - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - device_id = IXGBE_DEV_ID_82599_VF; - break; - case ixgbe_mac_X540: - device_id = IXGBE_DEV_ID_X540_VF; - break; - default: - device_id = 0; - break; - } - - vf_devfn = pdev->devfn + 0x80; - pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); - while (pvfdev) { - if (pvfdev->devfn == vf_devfn && - (pvfdev->bus->number >= pdev->bus->number)) - vfs_found++; - vf_devfn += 2; - pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, - device_id, pvfdev); - } - - return vfs_found; -} - void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, const struct ixgbe_info *ii) { @@ -86,7 +52,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, struct vf_macvlans *mv_list; int pre_existing_vfs = 0; - pre_existing_vfs = ixgbe_find_enabled_vfs(adapter); + pre_existing_vfs = pci_num_vf(adapter->pdev); if (!pre_existing_vfs && !adapter->num_vfs) return; @@ -205,14 +171,46 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, "SRIOV disabled\n"); ixgbe_disable_sriov(adapter); } -#endif /* #ifdef CONFIG_PCI_IOV */ +static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *vfdev; + int dev_id; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + dev_id = IXGBE_DEV_ID_82599_VF; + break; + case ixgbe_mac_X540: + dev_id = IXGBE_DEV_ID_X540_VF; + break; + default: + return false; + } + + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); + while (vfdev) { + /* if we don't own it we don't care */ + if (vfdev->is_virtfn && vfdev->physfn == pdev) { + /* if it is assigned we cannot release it */ + if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) + return true; + } + + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev); + } + + return false; +} + +#endif /* #ifdef CONFIG_PCI_IOV */ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 gpie; u32 vmdctl; - int i; /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; @@ -230,6 +228,15 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) return; #ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (ixgbe_vfs_are_assigned(adapter)) { + e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + return; + /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); #endif @@ -254,12 +261,6 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) /* take a breather then clean up driver data */ msleep(100); - /* Release reference to VF devices */ - for (i = 0; i < adapter->num_vfs; i++) { - if (adapter->vfinfo[i].vfdev) - pci_dev_put(adapter->vfinfo[i].vfdev); - } - adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; } @@ -493,28 +494,11 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, return 0; } -int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter) -{ -#ifdef CONFIG_PCI_IOV - int i; - for (i = 0; i < adapter->num_vfs; i++) { - if (adapter->vfinfo[i].vfdev->dev_flags & - PCI_DEV_FLAGS_ASSIGNED) - return true; - } -#endif - return false; -} - int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) { unsigned char vf_mac_addr[6]; struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); unsigned int vfn = (event_mask & 0x3f); - struct pci_dev *pvfdev; - unsigned int device_id; - u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) | - (pdev->devfn & 1); bool enable = ((event_mask & 0x10000000U) != 0); @@ -527,31 +511,6 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) * for it later. */ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); - - switch (adapter->hw.mac.type) { - case ixgbe_mac_82599EB: - device_id = IXGBE_DEV_ID_82599_VF; - break; - case ixgbe_mac_X540: - device_id = IXGBE_DEV_ID_X540_VF; - break; - default: - device_id = 0; - break; - } - - pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL); - while (pvfdev) { - if (pvfdev->devfn == thisvf_devfn) - break; - pvfdev = pci_get_device(PCI_VENDOR_ID_INTEL, - device_id, pvfdev); - } - if (pvfdev) - adapter->vfinfo[vfn].vfdev = pvfdev; - else - e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n", - thisvf_devfn); } return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 2ab38d5fda92..1be1d30e4e78 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -42,7 +42,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); void ixgbe_disable_sriov(struct ixgbe_adapter *adapter); -int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter); #ifdef CONFIG_PCI_IOV void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, const struct ixgbe_info *ii); -- cgit v1.2.3 From 1c55ed768bb8b6aee0e1c88e963a429a3c14be07 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:33:06 +0000 Subject: ixgbevf: Add lock around mailbox ops to prevent simultaneous access This change adds a spinlock around the mailbox accesses to prevent simultaneous access to the mailboxes. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Tested-by: Sibai Li Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 2 ++ drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 41 +++++++++++++++++++++-- 2 files changed, 41 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index e167d1bb6dea..66858b529f13 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -249,6 +249,8 @@ struct ixgbevf_adapter { bool link_up; struct work_struct watchdog_task; + + spinlock_t mbx_lock; }; enum ixbgevf_state_t { diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 2dc78d7e297a..7cb678d2d2a2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1120,9 +1120,14 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + spin_lock(&adapter->mbx_lock); + /* add VID to filter table */ if (hw->mac.ops.set_vfta) hw->mac.ops.set_vfta(hw, vid, 0, true); + + spin_unlock(&adapter->mbx_lock); + set_bit(vid, adapter->active_vlans); return 0; @@ -1133,9 +1138,14 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + spin_lock(&adapter->mbx_lock); + /* remove VID from filter table */ if (hw->mac.ops.set_vfta) hw->mac.ops.set_vfta(hw, vid, 0, false); + + spin_unlock(&adapter->mbx_lock); + clear_bit(vid, adapter->active_vlans); return 0; @@ -1190,11 +1200,15 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + spin_lock(&adapter->mbx_lock); + /* reprogram multicast list */ if (hw->mac.ops.update_mc_addr_list) hw->mac.ops.update_mc_addr_list(hw, netdev); ixgbevf_write_uc_addr_list(netdev); + + spin_unlock(&adapter->mbx_lock); } static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) @@ -1339,6 +1353,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) ixgbevf_configure_msix(adapter); + spin_lock(&adapter->mbx_lock); + if (hw->mac.ops.set_rar) { if (is_valid_ether_addr(hw->mac.addr)) hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); @@ -1350,6 +1366,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; hw->mbx.ops.write_posted(hw, msg, 2); + spin_unlock(&adapter->mbx_lock); + clear_bit(__IXGBEVF_DOWN, &adapter->state); ixgbevf_napi_enable_all(adapter); @@ -1562,11 +1580,15 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) struct ixgbe_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; + spin_lock(&adapter->mbx_lock); + if (hw->mac.ops.reset_hw(hw)) hw_dbg(hw, "PF still resetting\n"); else hw->mac.ops.init_hw(hw); + spin_unlock(&adapter->mbx_lock); + if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); @@ -1893,6 +1915,9 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) adapter->netdev->addr_len); } + /* lock to protect mailbox accesses */ + spin_lock_init(&adapter->mbx_lock); + /* Enable dynamic interrupt throttling rates */ adapter->rx_itr_setting = 1; adapter->tx_itr_setting = 1; @@ -2032,8 +2057,16 @@ static void ixgbevf_watchdog_task(struct work_struct *work) * no LSC interrupt */ if (hw->mac.ops.check_link) { - if ((hw->mac.ops.check_link(hw, &link_speed, - &link_up, false)) != 0) { + s32 need_reset; + + spin_lock(&adapter->mbx_lock); + + need_reset = hw->mac.ops.check_link(hw, &link_speed, + &link_up, false); + + spin_unlock(&adapter->mbx_lock); + + if (need_reset) { adapter->link_up = link_up; adapter->link_speed = link_speed; netif_carrier_off(netdev); @@ -2813,9 +2846,13 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + spin_lock(&adapter->mbx_lock); + if (hw->mac.ops.set_rar) hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); + spin_unlock(&adapter->mbx_lock); + return 0; } -- cgit v1.2.3 From 9f19f31dd4903d9c6a7ce33740eadd2b6bdd8ce2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 11 May 2012 08:33:32 +0000 Subject: ixgbevf: Add support for PCI error handling This change adds support for handling IO errors and slot resets. Signed-off-by: Alexander Duyck Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 80 +++++++++++++++++++++++ 1 file changed, 80 insertions(+) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 7cb678d2d2a2..ccc801e99610 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3189,12 +3189,92 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +/** + * ixgbevf_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + ixgbevf_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * ixgbevf_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the ixgbevf_resume routine. + */ +static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + pci_set_master(pdev); + + ixgbevf_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * ixgbevf_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the ixgbevf_resume routine. + */ +static void ixgbevf_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct ixgbevf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + ixgbevf_up(adapter); + + netif_device_attach(netdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers ixgbevf_err_handler = { + .error_detected = ixgbevf_io_error_detected, + .slot_reset = ixgbevf_io_slot_reset, + .resume = ixgbevf_io_resume, +}; + static struct pci_driver ixgbevf_driver = { .name = ixgbevf_driver_name, .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = __devexit_p(ixgbevf_remove), .shutdown = ixgbevf_shutdown, + .err_handler = &ixgbevf_err_handler }; /** -- cgit v1.2.3 From 39cb681b3bb4da17e74d48e553d1bb9a1b759aa5 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jun 2012 05:38:20 +0000 Subject: ixgbe: Fix handling of FDIR_HASH flag This change makes it so that we can use the atr_sample_rate to determine if we are capable of supporting ATR. The advantage to this approach is that it allows us to now determine the setting of the IXGBE_FLAG_FDIR_HASH_CAPABLE based on the queueing scheme, instead of the queueing scheme being based on the flag. Using this approach there are essentially 5 conditions that must be checked prior to trying to enable ATR: 1. Is SR-IOV disabled? 2. Are the number of TCs <= 1? 3. Is RSS queueing limit greater than 1? 4. Is atr_sample_rate set? 5. Is Flow Director perfect filtering disabled? If any of these conditions are enabled they should disable ATR filtering. Note that in the case of conditions 1 through 4 being met we will set things up for ATR queueing, however if test 5 fails we will still leave the queues allocated for use by perfect filters. The reason for this is to allow for us to switch back and forth between ntuple and ATR without needing to reallocate the descriptor rings. Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 22 +++++++----- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 48 ++++++++++++++++++--------- 2 files changed, 46 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 3ff5aa801a6b..29a2a8528073 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -370,6 +370,9 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) adapter->ring_feature[RING_F_RSS].indices = 1; adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + adapter->num_rx_pools = vmdq_i; adapter->num_rx_queues_per_pool = tcs; @@ -450,6 +453,9 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) f->indices = rss_i; f->mask = rss_m; + /* disable ATR as it is not supported when multiple TCs are enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + #ifdef IXGBE_FCOE /* FCoE enabled queues require special configuration indexed * by feature specific indices and offset. Here we map FCoE @@ -606,16 +612,22 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) f->indices = rss_i; f->mask = IXGBE_RSS_16Q_MASK; + /* disable ATR by default, it will be configured below */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + /* * Use Flow Director in addition to RSS to ensure the best * distribution of flows across cores, even when an FDIR flow * isn't matched. */ - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + if (rss_i > 1 && adapter->atr_sample_rate) { f = &adapter->ring_feature[RING_F_FDIR]; f->indices = min_t(u16, num_online_cpus(), f->limit); rss_i = max_t(u16, rss_i, f->indices); + + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } #ifdef IXGBE_FCOE @@ -1054,13 +1066,7 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) } adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { - e_err(probe, - "ATR is not supported while multiple " - "queues are disabled. Disabling Flow Director\n"); - } - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; - adapter->atr_sample_rate = 0; + ixgbe_disable_sriov(adapter); adapter->ring_feature[RING_F_RSS].limit = 1; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9c42679ad83e..7be35043b751 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2688,8 +2688,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, 32; /* PTHRESH = 32 */ /* reinitialize flowdirector state */ - if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && - adapter->atr_sample_rate) { + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ring->atr_sample_rate = adapter->atr_sample_rate; ring->atr_count = 0; set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state); @@ -4419,7 +4418,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; /* Flow Director hash filters enabled */ - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->atr_sample_rate = 20; adapter->ring_feature[RING_F_FDIR].limit = IXGBE_MAX_FDIR_INDICES; @@ -6726,7 +6724,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ixgbe_set_prio_tc_map(adapter); adapter->flags |= IXGBE_FLAG_DCB_ENABLED; - adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; if (adapter->hw.mac.type == ixgbe_mac_82598EB) { adapter->last_lfc_mode = adapter->hw.fc.requested_mode; @@ -6739,7 +6736,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; @@ -6808,20 +6804,40 @@ static int ixgbe_set_features(struct net_device *netdev, * Check if Flow Director n-tuple support was enabled or disabled. If * the state changed, we need to reset. */ - if (!(features & NETIF_F_NTUPLE)) { - if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { - /* turn off Flow Director, set ATR and reset */ - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && - !(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - need_reset = true; - } - adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: /* turn off ATR, enable perfect filters and reset */ + if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) + need_reset = true; + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - need_reset = true; + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) + need_reset = true; + + adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + + /* We cannot enable ATR if SR-IOV is enabled */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + break; + + /* We cannot enable ATR if we have 2 or more traffic classes */ + if (netdev_get_num_tc(netdev) > 1) + break; + + /* We cannot enable ATR if RSS is disabled */ + if (adapter->ring_feature[RING_F_RSS].limit <= 1) + break; + + /* A sample rate of 0 indicates ATR disabled */ + if (!adapter->atr_sample_rate) + break; + + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + break; } if (features & NETIF_F_HW_VLAN_RX) -- cgit v1.2.3 From ce422606696f137e610fd0e677ec72ac33c17842 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Tue, 22 May 2012 02:17:49 +0000 Subject: ixgbevf: Fix namespace issue with ixgbe_write_eitr Make the function static to cleanup namespace. Signed-off-by: Greg Rose Tested-by: Phil Schmitt Tested-by: Sibai Li --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 1 - drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 43 ++++++++++------------- 2 files changed, 19 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 66858b529f13..98cadb0c4dab 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -286,7 +286,6 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); -void ixgbevf_write_eitr(struct ixgbevf_q_vector *); extern int ethtool_ioctl(struct ifreq *ifr); extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index ccc801e99610..3f9841d619ad 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -540,6 +540,25 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) return 0; } +/** + * ixgbevf_write_eitr - write VTEITR register in hardware specific way + * @q_vector: structure containing interrupt and ring information + */ +static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) +{ + struct ixgbevf_adapter *adapter = q_vector->adapter; + struct ixgbe_hw *hw = &adapter->hw; + int v_idx = q_vector->v_idx; + u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; + + /* + * set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt + */ + itr_reg |= IXGBE_EITR_CNT_WDIS; + + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); +} /** * ixgbevf_configure_msix - Configure MSI-X hardware @@ -662,30 +681,6 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, ring_container->itr = itr_setting; } -/** - * ixgbevf_write_eitr - write VTEITR register in hardware specific way - * @q_vector: structure containing interrupt and ring information - * - * This function is made to be called by ethtool and by the driver - * when it needs to update VTEITR registers at runtime. Hardware - * specific quirks/differences are taken care of here. - */ -void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) -{ - struct ixgbevf_adapter *adapter = q_vector->adapter; - struct ixgbe_hw *hw = &adapter->hw; - int v_idx = q_vector->v_idx; - u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; - - /* - * set the WDIS bit to not clear the timer bits and cause an - * immediate assertion of the interrupt - */ - itr_reg |= IXGBE_EITR_CNT_WDIS; - - IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); -} - static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) { u32 new_itr = q_vector->itr; -- cgit v1.2.3 From 252562c207a850106d9d5b41a41d29f96c0530b7 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Thu, 24 May 2012 01:59:27 +0000 Subject: ixgbe: Reduce Rx header size to what is actually used The recent changes to netdev_alloc_skb actually make it so that the size of the buffer now actually has a more direct input on the truesize. So in order to make best use of the piece of a page we are allocated I am reducing the IXGBE_RX_HDR_SIZE to 256 so that our truesize will be reduced by 256 bytes as well. This should result in performance improvements since the number of uses per page should increase from 4 to 6 in the case of a 4K page. In addition we should see socket performance improvements due to the truesize dropping to less than 1K for buffers less than 256 bytes. Cc: Eric Dumazet Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 15 ++++++++------- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++-- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index eb5928228670..b9623e9ea895 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -77,17 +77,18 @@ #define IXGBE_MAX_FCPAUSE 0xFFFF /* Supported Rx Buffer Sizes */ -#define IXGBE_RXBUFFER_512 512 /* Used for packet split */ +#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ /* - * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we - * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, - * this adds up to 512 bytes of extra data meaning the smallest allocation - * we could have is 1K. - * i.e. RXBUFFER_512 --> size-1024 slab + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we + * reserve 64 more, and skb_shared_info adds an additional 320 bytes more, + * this adds up to 448 bytes of extra data. + * + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. */ -#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512 +#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7be35043b751..b376926af890 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1517,8 +1517,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, * 60 bytes if the skb->len is less than 60 for skb_pad. */ pull_len = skb_frag_size(frag); - if (pull_len > 256) - pull_len = ixgbe_get_headlen(va, pull_len); + if (pull_len > IXGBE_RX_HDR_SIZE) + pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); /* align pull length to size of long to optimize memcpy performance */ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); -- cgit v1.2.3 From b92ad72dea9925359e9dfa70c4cbf8db6f1b2d65 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 25 May 2012 01:45:38 +0000 Subject: ixgbe: Use num_tcs.pg_tcs as upper limit for TC when checking based on UP This change makes it so the function ixgbe_dcb_get_tc_from_up will use the num_tcs.pg_tcs to determine the starting value for determining a traffic class based on a user priority. The main motivation for this change is to address possible bad configurations in which more TCs worth of data are populated then there are actual TCs. By limiting this value we can at least make certain we are not providing a map with values that are out of range. As a result any user priorities that are setup in the configuration with a traffic class mapping higher than what the hardware supports will be reported as being on TC 0. Signed-off-by: Alexander Duyck Acked-by: Greg Rose Tested-by: Stephen Ko Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c index 5442b359141e..9bc17c0cb972 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c @@ -232,18 +232,22 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) { struct tc_configuration *tc_config = &cfg->tc_config[0]; u8 prio_mask = 1 << up; - u8 tc; + u8 tc = cfg->num_tcs.pg_tcs; + + /* If tc is 0 then DCB is likely not enabled or supported */ + if (!tc) + goto out; /* - * Test for TCs 7 through 1 and report the first match we find. If + * Test from maximum TC to 1 and report the first match we find. If * we find no match we can assume that the TC is 0 since the TC must * be set for all user priorities */ - for (tc = MAX_TRAFFIC_CLASS - 1; tc; tc--) { + for (tc--; tc; tc--) { if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap) break; } - +out: return tc; } -- cgit v1.2.3 From 95447461fa9ae3d879c84c1d2f2f8da2fdcd8f34 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 31 May 2012 12:42:26 +0000 Subject: ixgbe: fix RAR entry counting for generic and fdb_add() Do RAR entry accounting correctly so that errors are reported and promisc mode is set correctly when the number of entries exceeds the hardware limits. This can happen with many macvlan devices attached to the PF or by adding many fdb entries in SR-IOV modes. Also this includes a small refactor to fdb_add() to avoid having so many nested if/else statements after adding a check for the number or RAR entries. The max entries for the PF is currently 16 we allow 15 additional entries to account for the defined MAC. Signed-off-by: John Fastabend Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b376926af890..84370e78a0fb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3441,14 +3441,18 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS; + unsigned int rar_entries = hw->mac.num_rar_entries - 1; int count = 0; + /* In SR-IOV mode significantly less RAR entries are available */ + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + rar_entries = IXGBE_MAX_PF_MACVLANS - 1; + /* return ENOMEM indicating insufficient memory for addresses */ if (netdev_uc_count(netdev) > rar_entries) return -ENOMEM; - if (!netdev_uc_empty(netdev) && rar_entries) { + if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; /* return error if we do not support writing to RAR table */ if (!hw->mac.ops.set_rar) @@ -6861,7 +6865,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, u16 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); - int err = -EOPNOTSUPP; + int err; + + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return -EOPNOTSUPP; if (ndm->ndm_state & NUD_PERMANENT) { pr_info("%s: FDB only supports static addresses\n", @@ -6869,13 +6876,17 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, return -EINVAL; } - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { - if (is_unicast_ether_addr(addr)) + if (is_unicast_ether_addr(addr)) { + u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; + + if (netdev_uc_count(dev) < rar_uc_entries) err = dev_uc_add_excl(dev, addr); - else if (is_multicast_ether_addr(addr)) - err = dev_mc_add_excl(dev, addr); else - err = -EINVAL; + err = -ENOMEM; + } else if (is_multicast_ether_addr(addr)) { + err = dev_mc_add_excl(dev, addr); + } else { + err = -EINVAL; } /* Only return duplicate errors if NLM_F_EXCL is set */ -- cgit v1.2.3 From 3f4a6f009f6319d2fb7f6fe654264e1eabe0e50b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 5 Jun 2012 05:58:52 +0000 Subject: ixgbe: remove extra unused queues in DCB + FCoE case With DCB and FCoE configured extra queues may be allocated and never used. After this patch we calculate the max correctly. Signed-off-by: John Fastabend Tested-by: Phil Schmitt Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 84370e78a0fb..bc7e79b92b63 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7048,6 +7048,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, int i, err, pci_using_dac; u8 part_str[IXGBE_PBANUM_LENGTH]; unsigned int indices = num_possible_cpus(); + unsigned int dcb_max = 0; #ifdef IXGBE_FCOE u16 device_caps; #endif @@ -7097,15 +7098,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, pci_save_state(pdev); #ifdef CONFIG_IXGBE_DCB - indices *= MAX_TRAFFIC_CLASS; + if (ii->mac == ixgbe_mac_82598EB) + dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, + IXGBE_MAX_RSS_INDICES); + else + dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS, + IXGBE_MAX_FDIR_INDICES); #endif if (ii->mac == ixgbe_mac_82598EB) -#ifdef CONFIG_IXGBE_DCB - indices = min_t(unsigned int, indices, MAX_TRAFFIC_CLASS * 4); -#else indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES); -#endif else indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); @@ -7113,6 +7115,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, indices += min_t(unsigned int, num_possible_cpus(), IXGBE_MAX_FCOE_INDICES); #endif + indices = max_t(unsigned int, dcb_max, indices); netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); if (!netdev) { err = -ENOMEM; -- cgit v1.2.3 From b6dfd939fdc249fcf8cd7b8006f76239b33eb581 Mon Sep 17 00:00:00 2001 From: Don Skidmore Date: Wed, 11 Jul 2012 07:17:42 +0000 Subject: ixgbe: add support for new 82599 device This patch adds support for a new 82599 device that supports WoL. Signed-off-by: Don Skidmore Tested-by: Phil Schmitt Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index bc7e79b92b63..30699815451c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7001,6 +7001,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, if (hw->bus.func != 0) break; case IXGBE_SUBDEV_ID_82599_SFP: + case IXGBE_SUBDEV_ID_82599_RNDC: is_wol_supported = 1; break; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index fe0a19d91d4a..400f86a31174 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -54,6 +54,7 @@ #define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a #define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 #define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 #define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 #define IXGBE_DEV_ID_82599_SFP_EM 0x1507 #define IXGBE_DEV_ID_82599_SFP_SF2 0x154D -- cgit v1.2.3 From b724e9f2505513df1455b03efea166f7dfa0f7d5 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 17 Jul 2012 01:20:28 +0000 Subject: ixgbe: Use 1TC DCB instead of disabling DCB for MSI and legacy interrupts This change makes it so that we can use 1TC DCB in the case of MSI and legacy interrupts. The advantage to this is that it allows us to fully support FCoE w/ DCB instead of having to drop to link flow control only when using these interrupt modes. Cc: John Fastabend Signed-off-by: Alexander Duyck Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 18 +++++++++++++++++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 ------ 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 29a2a8528073..17ecbcedd548 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1065,11 +1065,27 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) return; } - adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + /* disable DCB if number of TCs exceeds 1 */ + if (netdev_get_num_tc(adapter->netdev) > 1) { + e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); + netdev_reset_tc(adapter->netdev); + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + adapter->hw.fc.requested_mode = adapter->last_lfc_mode; + + adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + + /* disable SR-IOV */ ixgbe_disable_sriov(adapter); + /* disable RSS */ adapter->ring_feature[RING_F_RSS].limit = 1; + ixgbe_set_num_queues(adapter); adapter->num_q_vectors = 1; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 30699815451c..3b6784cf134a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6703,12 +6703,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_hw *hw = &adapter->hw; - /* Multiple traffic classes requires multiple queues */ - if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { - e_err(drv, "Enable failed, needs MSI-X\n"); - return -EINVAL; - } - /* Hardware supports up to 8 traffic classes */ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || (hw->mac.type == ixgbe_mac_82598EB && -- cgit v1.2.3 From 76886596921dd0e058f7f0a16de6151629390d15 Mon Sep 17 00:00:00 2001 From: "Akeem G. Abodunrin" Date: Tue, 17 Jul 2012 04:51:18 +0000 Subject: igb: reset PHY in the link_up process to recover PHY setting after power down. There was a previous patch to resolve issue with 82576 losing PHY setting after PHY power down. However that previous implementation triggered speed mismatch and occasional link lost. Now, this patch resolves both initial PHY setting and speed mismatch issues. Signed-off-by: Akeem G. Abodunrin Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 8adeca9787ca..1050411e7ca3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1500,11 +1500,12 @@ static void igb_configure(struct igb_adapter *adapter) **/ void igb_power_up_link(struct igb_adapter *adapter) { + igb_reset_phy(&adapter->hw); + if (adapter->hw.phy.media_type == e1000_media_type_copper) igb_power_up_phy_copper(&adapter->hw); else igb_power_up_serdes_link_82575(&adapter->hw); - igb_reset_phy(&adapter->hw); } /** -- cgit v1.2.3 From e10df2c63b5500ac32a250bb54f55b764abacf0d Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Sun, 22 Jul 2012 07:15:40 +0000 Subject: e1000: advertise transmit time stamping This driver now offers software transmit time stamping, so it should advertise that fact via ethtool. Compile tested only. Signed-off-by: Richard Cochran Cc: Willem de Bruijn Cc: Jeff Kirsher Cc: e1000-devel@lists.sourceforge.net Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e1000/e1000_ethtool.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3103f0b6bf5e..736a7d987db5 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1851,6 +1851,7 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_sset_count = e1000_get_sset_count, .get_coalesce = e1000_get_coalesce, .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, }; void e1000_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From 7b1115e017569219eccbcad2bc48b81e6b98df29 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Sun, 22 Jul 2012 07:15:41 +0000 Subject: e1000e: advertise transmit time stamping This driver now offers software transmit time stamping, so it should advertise that fact via ethtool. Compile tested only. Signed-off-by: Richard Cochran Cc: Willem de Bruijn Cc: Jeff Kirsher Cc: e1000-devel@lists.sourceforge.net Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e1000e/ethtool.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 105d554ea9db..0349e2478df8 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -2061,6 +2061,7 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_coalesce = e1000_get_coalesce, .set_coalesce = e1000_set_coalesce, .get_rxnfc = e1000_get_rxnfc, + .get_ts_info = ethtool_op_get_ts_info, }; void e1000e_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From d47e12d63e745f667a977226c3f59ae14521592d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 22 Jul 2012 12:36:41 -0700 Subject: ixgbe: Fix build with PCI_IOV enabled. Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/intel') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 47b2ce740ec1..4fea8716ab64 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -236,7 +236,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) if (ixgbe_vfs_are_assigned(adapter)) { e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n"); return; - + } /* disable iov and allow time for transactions to clear */ pci_disable_sriov(adapter->pdev); #endif -- cgit v1.2.3