summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/acenic.c45
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/bonding/bond_sysfs.c133
-rw-r--r--drivers/net/forcedeth.c16
-rw-r--r--drivers/net/gianfar.c6
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/tg3.c287
-rw-r--r--drivers/net/tg3.h9
-rw-r--r--drivers/net/tun.c1
-rw-r--r--drivers/net/usb/asix.c7
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/wan/hdlc_fr.c5
-rw-r--r--drivers/net/wireless/airo.c1
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/bus.c2
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1
20 files changed, 318 insertions, 223 deletions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b7622c3745fa..e1eca2ab505e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -282,6 +282,7 @@ obj-$(CONFIG_USB_HSO) += usb/
obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
+obj-$(CONFIG_USB_CDC_PHONET) += usb/
obj-$(CONFIG_WLAN) += wireless/
obj-$(CONFIG_NET_TULIP) += tulip/
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 536038b22710..31798f5f5d06 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1502,13 +1502,13 @@ static int __devinit ace_init(struct net_device *dev)
* firmware to wipe the ring without re-initializing it.
*/
if (!test_and_set_bit(0, &ap->std_refill_busy))
- ace_load_std_rx_ring(ap, RX_RING_SIZE);
+ ace_load_std_rx_ring(dev, RX_RING_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n",
ap->name);
if (ap->version >= 2) {
if (!test_and_set_bit(0, &ap->mini_refill_busy))
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE);
+ ace_load_mini_rx_ring(dev, RX_MINI_SIZE);
else
printk(KERN_ERR "%s: Someone is busy refilling "
"the RX mini ring\n", ap->name);
@@ -1584,9 +1584,10 @@ static void ace_watchdog(struct net_device *data)
}
-static void ace_tasklet(unsigned long dev)
+static void ace_tasklet(unsigned long arg)
{
- struct ace_private *ap = netdev_priv((struct net_device *)dev);
+ struct net_device *dev = (struct net_device *) arg;
+ struct ace_private *ap = netdev_priv(dev);
int cur_size;
cur_size = atomic_read(&ap->cur_rx_bufs);
@@ -1595,7 +1596,7 @@ static void ace_tasklet(unsigned long dev)
#ifdef DEBUG
printk("refilling buffers (current %i)\n", cur_size);
#endif
- ace_load_std_rx_ring(ap, RX_RING_SIZE - cur_size);
+ ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size);
}
if (ap->version >= 2) {
@@ -1606,7 +1607,7 @@ static void ace_tasklet(unsigned long dev)
printk("refilling mini buffers (current %i)\n",
cur_size);
#endif
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+ ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size);
}
}
@@ -1616,7 +1617,7 @@ static void ace_tasklet(unsigned long dev)
#ifdef DEBUG
printk("refilling jumbo buffers (current %i)\n", cur_size);
#endif
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size);
}
ap->tasklet_pending = 0;
}
@@ -1642,8 +1643,9 @@ static void ace_dump_trace(struct ace_private *ap)
* done only before the device is enabled, thus no interrupts are
* generated and by the interrupt handler/tasklet handler.
*/
-static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1657,11 +1659,10 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = dev_alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_STD_BUFSIZE,
@@ -1705,8 +1706,9 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
}
-static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1718,11 +1720,10 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = dev_alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_MINI_BUFSIZE,
@@ -1762,8 +1763,9 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
* Load the jumbo rx ring, this may happen at any time if the MTU
* is changed to a value > 1500.
*/
-static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
+static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
{
+ struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
short i, idx;
@@ -1774,11 +1776,10 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
struct rx_desc *rd;
dma_addr_t mapping;
- skb = dev_alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN);
+ skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE);
if (!skb)
break;
- skb_reserve(skb, NET_IP_ALIGN);
mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
offset_in_page(skb->data),
ACE_JUMBO_BUFSIZE,
@@ -2196,7 +2197,7 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
#ifdef DEBUG
printk("low on std buffers %i\n", cur_size);
#endif
- ace_load_std_rx_ring(ap,
+ ace_load_std_rx_ring(dev,
RX_RING_SIZE - cur_size);
} else
run_tasklet = 1;
@@ -2212,7 +2213,8 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
printk("low on mini buffers %i\n",
cur_size);
#endif
- ace_load_mini_rx_ring(ap, RX_MINI_SIZE - cur_size);
+ ace_load_mini_rx_ring(dev,
+ RX_MINI_SIZE - cur_size);
} else
run_tasklet = 1;
}
@@ -2228,7 +2230,8 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
printk("low on jumbo buffers %i\n",
cur_size);
#endif
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE - cur_size);
+ ace_load_jumbo_rx_ring(dev,
+ RX_JUMBO_SIZE - cur_size);
} else
run_tasklet = 1;
}
@@ -2267,7 +2270,7 @@ static int ace_open(struct net_device *dev)
if (ap->jumbo &&
!test_and_set_bit(0, &ap->jumbo_refill_busy))
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
if (dev->flags & IFF_PROMISC) {
cmd.evt = C_SET_PROMISC_MODE;
@@ -2575,7 +2578,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
"support\n", dev->name);
ap->jumbo = 1;
if (!test_and_set_bit(0, &ap->jumbo_refill_busy))
- ace_load_jumbo_rx_ring(ap, RX_JUMBO_SIZE);
+ ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE);
ace_set_rxtx_parms(dev, 1);
}
} else {
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index f67dc9b0eb80..51c486cfbb8c 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -766,9 +766,9 @@ static inline void ace_unmask_irq(struct net_device *dev)
* Prototypes
*/
static int ace_init(struct net_device *dev);
-static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs);
-static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs);
-static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs);
+static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs);
+static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs);
+static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs);
static irqreturn_t ace_interrupt(int irq, void *dev_id);
static int ace_load_firmware(struct net_device *dev);
static int ace_open(struct net_device *dev);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 02842d05c11f..38a83acd502e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1557,8 +1557,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_dev->type != ARPHRD_ETHER)
bond_setup_by_slave(bond_dev, slave_dev);
- else
+ else {
ether_setup(bond_dev);
+ bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ }
netdev_bonding_change(bond_dev,
NETDEV_POST_TYPE_CHANGE);
@@ -4330,7 +4332,7 @@ static void bond_setup(struct net_device *bond_dev)
bond_dev->tx_queue_len = 0;
bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
bond_dev->priv_flags |= IFF_BONDING;
- bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
/* At first, we block adding VLANs. That's the only way to
* prevent problems that occur when adding VLANs over an
@@ -4691,7 +4693,7 @@ static int bond_check_params(struct bond_params *params)
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_warning("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b60835f58650..2dfb4bf90087 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1025,6 +1025,7 @@ static ssize_t bonding_store_primary(struct device *d,
int i;
struct slave *slave;
struct bonding *bond = to_bond(d);
+ char ifname[IFNAMSIZ];
if (!rtnl_trylock())
return restart_syscall();
@@ -1035,32 +1036,33 @@ static ssize_t bonding_store_primary(struct device *d,
if (!USES_PRIMARY(bond->params.mode)) {
pr_info("%s: Unable to set primary slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
- } else {
- bond_for_each_slave(bond, slave, i) {
- if (strnicmp
- (slave->dev->name, buf,
- strlen(slave->dev->name)) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
- bond->dev->name, slave->dev->name);
- bond->primary_slave = slave;
- strcpy(bond->params.primary, slave->dev->name);
- bond_select_active_slave(bond);
- goto out;
- }
- }
+ goto out;
+ }
- /* if we got here, then we didn't match the name of any slave */
+ sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
- if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- bond_select_active_slave(bond);
- } else {
- pr_info("%s: Unable to set %.*s as primary slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ /* check to see if we are clearing primary */
+ if (!strlen(ifname) || buf[0] == '\n') {
+ pr_info("%s: Setting primary slave to None.\n",
+ bond->dev->name);
+ bond->primary_slave = NULL;
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
+ pr_info("%s: Setting %s as primary slave.\n",
+ bond->dev->name, slave->dev->name);
+ bond->primary_slave = slave;
+ strcpy(bond->params.primary, slave->dev->name);
+ bond_select_active_slave(bond);
+ goto out;
}
}
+
+ pr_info("%s: Unable to set %.*s as primary slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
@@ -1195,6 +1197,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct slave *old_active = NULL;
struct slave *new_active = NULL;
struct bonding *bond = to_bond(d);
+ char ifname[IFNAMSIZ];
if (!rtnl_trylock())
return restart_syscall();
@@ -1203,56 +1206,62 @@ static ssize_t bonding_store_active_slave(struct device *d,
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
- if (!USES_PRIMARY(bond->params.mode))
+ if (!USES_PRIMARY(bond->params.mode)) {
pr_info("%s: Unable to change active slave; %s is in mode %d\n",
bond->dev->name, bond->dev->name, bond->params.mode);
- else {
- bond_for_each_slave(bond, slave, i) {
- if (strnicmp
- (slave->dev->name, buf,
- strlen(slave->dev->name)) == 0) {
- old_active = bond->curr_active_slave;
- new_active = slave;
- if (new_active == old_active) {
- /* do nothing */
- pr_info("%s: %s is already the current active slave.\n",
+ goto out;
+ }
+
+ sscanf(buf, "%16s", ifname); /* IFNAMSIZ */
+
+ /* check to see if we are clearing active */
+ if (!strlen(ifname) || buf[0] == '\n') {
+ pr_info("%s: Clearing current active slave.\n",
+ bond->dev->name);
+ bond->curr_active_slave = NULL;
+ bond_select_active_slave(bond);
+ goto out;
+ }
+
+ bond_for_each_slave(bond, slave, i) {
+ if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
+ old_active = bond->curr_active_slave;
+ new_active = slave;
+ if (new_active == old_active) {
+ /* do nothing */
+ pr_info("%s: %s is already the current"
+ " active slave.\n",
+ bond->dev->name,
+ slave->dev->name);
+ goto out;
+ }
+ else {
+ if ((new_active) &&
+ (old_active) &&
+ (new_active->link == BOND_LINK_UP) &&
+ IS_UP(new_active->dev)) {
+ pr_info("%s: Setting %s as active"
+ " slave.\n",
bond->dev->name,
slave->dev->name);
- goto out;
+ bond_change_active_slave(bond,
+ new_active);
}
else {
- if ((new_active) &&
- (old_active) &&
- (new_active->link == BOND_LINK_UP) &&
- IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active slave.\n",
- bond->dev->name,
- slave->dev->name);
- bond_change_active_slave(bond, new_active);
- }
- else {
- pr_info("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
- bond->dev->name,
- slave->dev->name,
- slave->dev->name);
- }
- goto out;
+ pr_info("%s: Could not set %s as"
+ " active slave; either %s is"
+ " down or the link is down.\n",
+ bond->dev->name,
+ slave->dev->name,
+ slave->dev->name);
}
+ goto out;
}
}
-
- /* if we got here, then we didn't match the name of any slave */
-
- if (strlen(buf) == 0 || buf[0] == '\n') {
- pr_info("%s: Setting active slave to None.\n",
- bond->dev->name);
- bond->primary_slave = NULL;
- bond_select_active_slave(bond);
- } else {
- pr_info("%s: Unable to set %.*s as active slave as it is not a slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
- }
}
+
+ pr_info("%s: Unable to set %.*s as active slave.\n",
+ bond->dev->name, (int)strlen(buf) - 1, buf);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index e64cd9ceac3f..e55df308a3af 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2764,7 +2764,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
prefetch(skb->data);
vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
- if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
+
+ /*
+ * There's need to check for NETIF_F_HW_VLAN_RX here.
+ * Even if vlan rx accel is disabled,
+ * NV_RX3_VLAN_TAG_PRESENT is pseudo randomly set.
+ */
+ if (dev->features & NETIF_F_HW_VLAN_RX &&
+ vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
u16 vid = vlanflags & NV_RX3_VLAN_TAG_MASK;
__vlan_hwaccel_put_tag(skb, vid);
@@ -5331,15 +5338,16 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM;
- dev->features |= dev->hw_features;
}
np->vlanctl_bits = 0;
if (id->driver_data & DEV_HAS_VLAN) {
np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
- dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
+ dev->hw_features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
}
+ dev->features |= dev->hw_features;
+
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5607,6 +5615,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
goto out_error;
}
+ nv_vlan_mode(dev, dev->features);
+
netif_carrier_off(dev);
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 835cd2588148..2659daad783d 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -388,12 +388,8 @@ static void gfar_init_mac(struct net_device *ndev)
if (priv->hwts_rx_en)
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
- /* keep vlan related bits if it's enabled */
- if (ndev->features & NETIF_F_HW_VLAN_TX)
- rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
-
if (ndev->features & NETIF_F_HW_VLAN_RX)
- tctrl |= TCTRL_VLINS;
+ rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 6e82dd32e806..46b5f5fd686b 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -183,7 +183,7 @@ static void ifb_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index ba631fcece34..05172c39a0ce 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -572,7 +572,7 @@ void macvlan_common_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->netdev_ops = &macvlan_netdev_ops;
dev->destructor = free_netdev;
dev->header_ops = &macvlan_hard_header_ops,
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 803576568154..dc3fbf61910b 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -190,6 +190,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
+#define TG3_TX_BD_DMA_MAX 4096
#define TG3_RAW_IP_ALIGN 2
@@ -4824,7 +4825,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
txq = netdev_get_tx_queue(tp->dev, index);
while (sw_idx != hw_idx) {
- struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
+ struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
struct sk_buff *skb = ri->skb;
int i, tx_bug = 0;
@@ -4840,6 +4841,12 @@ static void tg3_tx(struct tg3_napi *tnapi)
ri->skb = NULL;
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
sw_idx = NEXT_TX(sw_idx);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -4851,6 +4858,13 @@ static void tg3_tx(struct tg3_napi *tnapi)
dma_unmap_addr(ri, mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
+
+ while (ri->fragmented) {
+ ri->fragmented = false;
+ sw_idx = NEXT_TX(sw_idx);
+ ri = &tnapi->tx_buffers[sw_idx];
+ }
+
sw_idx = NEXT_TX(sw_idx);
}
@@ -5901,40 +5915,100 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
#endif
}
-static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
- dma_addr_t mapping, int len, u32 flags,
- u32 mss_and_is_end)
+static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
+ dma_addr_t mapping, u32 len, u32 flags,
+ u32 mss, u32 vlan)
+{
+ txbd->addr_hi = ((u64) mapping >> 32);
+ txbd->addr_lo = ((u64) mapping & 0xffffffff);
+ txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
+ txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
+}
+
+static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
+ dma_addr_t map, u32 len, u32 flags,
+ u32 mss, u32 vlan)
{
- struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
- int is_end = (mss_and_is_end & 0x1);
- u32 mss = (mss_and_is_end >> 1);
- u32 vlan_tag = 0;
+ struct tg3 *tp = tnapi->tp;
+ bool hwbug = false;
+
+ if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
+ hwbug = 1;
+
+ if (tg3_4g_overflow_test(map, len))
+ hwbug = 1;
+
+ if (tg3_40bit_overflow_test(tp, map, len))
+ hwbug = 1;
+
+ if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ u32 tmp_flag = flags & ~TXD_FLAG_END;
+ while (len > TG3_TX_BD_DMA_MAX) {
+ u32 frag_len = TG3_TX_BD_DMA_MAX;
+ len -= TG3_TX_BD_DMA_MAX;
+
+ if (len) {
+ tnapi->tx_buffers[*entry].fragmented = true;
+ /* Avoid the 8byte DMA problem */
+ if (len <= 8) {
+ len += TG3_TX_BD_DMA_MAX / 2;
+ frag_len = TG3_TX_BD_DMA_MAX / 2;
+ }
+ } else
+ tmp_flag = flags;
+
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ frag_len, tmp_flag, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ break;
+ }
+
+ map += frag_len;
+ }
- if (is_end)
- flags |= TXD_FLAG_END;
- if (flags & TXD_FLAG_VLAN) {
- vlan_tag = flags >> 16;
- flags &= 0xffff;
+ if (len) {
+ if (*budget) {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ (*budget)--;
+ *entry = NEXT_TX(*entry);
+ } else {
+ hwbug = 1;
+ }
+ }
+ } else {
+ tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+ len, flags, mss, vlan);
+ *entry = NEXT_TX(*entry);
}
- vlan_tag |= (mss << TXD_MSS_SHIFT);
- txd->addr_hi = ((u64) mapping >> 32);
- txd->addr_lo = ((u64) mapping & 0xffffffff);
- txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
- txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
+ return hwbug;
}
-static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
- struct sk_buff *skb, int last)
+static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
{
int i;
- u32 entry = tnapi->tx_prod;
- struct ring_info *txb = &tnapi->tx_buffers[entry];
+ struct sk_buff *skb;
+ struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
+
+ skb = txb->skb;
+ txb->skb = NULL;
pci_unmap_single(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
+
for (i = 0; i < last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -5944,18 +6018,24 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
pci_unmap_page(tnapi->tp->pdev,
dma_unmap_addr(txb, mapping),
frag->size, PCI_DMA_TODEVICE);
+
+ while (txb->fragmented) {
+ txb->fragmented = false;
+ entry = NEXT_TX(entry);
+ txb = &tnapi->tx_buffers[entry];
+ }
}
}
/* Workaround 4GB and 40-bit hardware DMA bugs. */
static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
struct sk_buff *skb,
- u32 base_flags, u32 mss)
+ u32 *entry, u32 *budget,
+ u32 base_flags, u32 mss, u32 vlan)
{
struct tg3 *tp = tnapi->tp;
struct sk_buff *new_skb;
dma_addr_t new_addr = 0;
- u32 entry = tnapi->tx_prod;
int ret = 0;
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
@@ -5976,24 +6056,22 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
PCI_DMA_TODEVICE);
/* Make sure the mapping succeeded */
if (pci_dma_mapping_error(tp->pdev, new_addr)) {
- ret = -1;
dev_kfree_skb(new_skb);
-
- /* Make sure new skb does not cross any 4G boundaries.
- * Drop the packet if it does.
- */
- } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
- pci_unmap_single(tp->pdev, new_addr, new_skb->len,
- PCI_DMA_TODEVICE);
ret = -1;
- dev_kfree_skb(new_skb);
} else {
- tnapi->tx_buffers[entry].skb = new_skb;
- dma_unmap_addr_set(&tnapi->tx_buffers[entry],
+ base_flags |= TXD_FLAG_END;
+
+ tnapi->tx_buffers[*entry].skb = new_skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
mapping, new_addr);
- tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
- base_flags, 1 | (mss << 1));
+ if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
+ new_skb->len, base_flags,
+ mss, vlan)) {
+ tg3_tx_skb_unmap(tnapi, *entry, 0);
+ dev_kfree_skb(new_skb);
+ ret = -1;
+ }
}
}
@@ -6051,7 +6129,8 @@ tg3_tso_bug_end:
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
- u32 len, entry, base_flags, mss;
+ u32 len, entry, base_flags, mss, vlan = 0;
+ u32 budget;
int i = -1, would_hit_hwbug;
dma_addr_t mapping;
struct tg3_napi *tnapi;
@@ -6063,12 +6142,14 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (tg3_flag(tp, ENABLE_TSS))
tnapi++;
+ budget = tg3_tx_avail(tnapi);
+
/* We are running in BH disabled context with netif_tx_lock
* and TX reclaim runs via tp->napi.poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
- if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
+ if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
if (!netif_tx_queue_stopped(txq)) {
netif_tx_stop_queue(txq);
@@ -6153,9 +6234,12 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (vlan_tx_tag_present(skb))
- base_flags |= (TXD_FLAG_VLAN |
- (vlan_tx_tag_get(skb) << 16));
+#ifdef BCM_KERNEL_SUPPORTS_8021Q
+ if (vlan_tx_tag_present(skb)) {
+ base_flags |= TXD_FLAG_VLAN;
+ vlan = vlan_tx_tag_get(skb);
+ }
+#endif
if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
@@ -6174,25 +6258,23 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
would_hit_hwbug = 0;
- if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
- would_hit_hwbug = 1;
-
- if (tg3_4g_overflow_test(mapping, len))
- would_hit_hwbug = 1;
-
- if (tg3_40bit_overflow_test(tp, mapping, len))
- would_hit_hwbug = 1;
-
if (tg3_flag(tp, 5701_DMA_BUG))
would_hit_hwbug = 1;
- tg3_set_txd(tnapi, entry, mapping, len, base_flags,
- (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
-
- entry = NEXT_TX(entry);
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
+ ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
+ mss, vlan))
+ would_hit_hwbug = 1;
/* Now loop through additional data fragments, and queue them. */
if (skb_shinfo(skb)->nr_frags > 0) {
+ u32 tmp_mss = mss;
+
+ if (!tg3_flag(tp, HW_TSO_1) &&
+ !tg3_flag(tp, HW_TSO_2) &&
+ !tg3_flag(tp, HW_TSO_3))
+ tmp_mss = 0;
+
last = skb_shinfo(skb)->nr_frags - 1;
for (i = 0; i <= last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -6209,39 +6291,25 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pci_dma_mapping_error(tp->pdev, mapping))
goto dma_error;
- if (tg3_flag(tp, SHORT_DMA_BUG) &&
- len <= 8)
+ if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+ len, base_flags |
+ ((i == last) ? TXD_FLAG_END : 0),
+ tmp_mss, vlan))
would_hit_hwbug = 1;
-
- if (tg3_4g_overflow_test(mapping, len))
- would_hit_hwbug = 1;
-
- if (tg3_40bit_overflow_test(tp, mapping, len))
- would_hit_hwbug = 1;
-
- if (tg3_flag(tp, HW_TSO_1) ||
- tg3_flag(tp, HW_TSO_2) ||
- tg3_flag(tp, HW_TSO_3))
- tg3_set_txd(tnapi, entry, mapping, len,
- base_flags, (i == last)|(mss << 1));
- else
- tg3_set_txd(tnapi, entry, mapping, len,
- base_flags, (i == last));
-
- entry = NEXT_TX(entry);
}
}
if (would_hit_hwbug) {
- tg3_skb_error_unmap(tnapi, skb, i);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
- if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
+ entry = tnapi->tx_prod;
+ budget = tg3_tx_avail(tnapi);
+ if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
+ base_flags, mss, vlan))
goto out_unlock;
-
- entry = NEXT_TX(tnapi->tx_prod);
}
skb_tx_timestamp(skb);
@@ -6269,7 +6337,7 @@ out_unlock:
return NETDEV_TX_OK;
dma_error:
- tg3_skb_error_unmap(tnapi, skb, i);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
dev_kfree_skb(skb);
tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
return NETDEV_TX_OK;
@@ -6602,35 +6670,13 @@ static void tg3_free_rings(struct tg3 *tp)
if (!tnapi->tx_buffers)
continue;
- for (i = 0; i < TG3_TX_RING_SIZE; ) {
- struct ring_info *txp;
- struct sk_buff *skb;
- unsigned int k;
-
- txp = &tnapi->tx_buffers[i];
- skb = txp->skb;
+ for (i = 0; i < TG3_TX_RING_SIZE; i++) {
+ struct sk_buff *skb = tnapi->tx_buffers[i].skb;
- if (skb == NULL) {
- i++;
+ if (!skb)
continue;
- }
-
- pci_unmap_single(tp->pdev,
- dma_unmap_addr(txp, mapping),
- skb_headlen(skb),
- PCI_DMA_TODEVICE);
- txp->skb = NULL;
- i++;
-
- for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
- txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
- pci_unmap_page(tp->pdev,
- dma_unmap_addr(txp, mapping),
- skb_shinfo(skb)->frags[k].size,
- PCI_DMA_TODEVICE);
- i++;
- }
+ tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
dev_kfree_skb_any(skb);
}
@@ -6762,9 +6808,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
*/
if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
(i && tg3_flag(tp, ENABLE_TSS))) {
- tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
- TG3_TX_RING_SIZE,
- GFP_KERNEL);
+ tnapi->tx_buffers = kzalloc(
+ sizeof(struct tg3_tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
if (!tnapi->tx_buffers)
goto err_out;
@@ -8360,7 +8406,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Program the jumbo buffer descriptor ring control
* blocks on those devices that have them.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
(tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
@@ -11204,6 +11250,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
{
u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
+ u32 budget;
struct sk_buff *skb, *rx_skb;
u8 *tx_data;
dma_addr_t map;
@@ -11363,6 +11410,10 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
return -EIO;
}
+ val = tnapi->tx_prod;
+ tnapi->tx_buffers[val].skb = skb;
+ dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
+
tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
rnapi->coal_now);
@@ -11370,8 +11421,13 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
- tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
- base_flags, (mss << 1) | 1);
+ budget = tg3_tx_avail(tnapi);
+ if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
+ base_flags | TXD_FLAG_END, mss, 0)) {
+ tnapi->tx_buffers[val].skb = NULL;
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
tnapi->tx_prod++;
@@ -11394,7 +11450,7 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
break;
}
- pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
+ tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
dev_kfree_skb(skb);
if (tx_idx != tnapi->tx_prod)
@@ -13817,7 +13873,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, 5705_PLUS);
/* Determine TSO capabilities */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
; /* Do nothing. HW bug. */
else if (tg3_flag(tp, 57765_PLUS))
tg3_flag_set(tp, HW_TSO_3);
@@ -13880,11 +13936,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, 5755_PLUS))
tg3_flag_set(tp, SHORT_DMA_BUG);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ tg3_flag_set(tp, 4K_FIFO_LIMIT);
+
if (tg3_flag(tp, 5717_PLUS))
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
+ tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
tg3_flag_set(tp, USE_JUMBO_BDFLAG);
if (!tg3_flag(tp, 5705_PLUS) ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 691539ba17b3..2ea456dd5880 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2652,6 +2652,12 @@ struct ring_info {
DEFINE_DMA_UNMAP_ADDR(mapping);
};
+struct tg3_tx_ring_info {
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ bool fragmented;
+};
+
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
@@ -2816,7 +2822,7 @@ struct tg3_napi {
u32 last_tx_cons;
u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
- struct ring_info *tx_buffers;
+ struct tg3_tx_ring_info *tx_buffers;
dma_addr_t status_mapping;
dma_addr_t rx_rcb_mapping;
@@ -2899,6 +2905,7 @@ enum TG3_FLAGS {
TG3_FLAG_57765_PLUS,
TG3_FLAG_APE_HAS_NCSI,
TG3_FLAG_5717_PLUS,
+ TG3_FLAG_4K_FIFO_LIMIT,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 9a6b3824da14..71f3d1a35b74 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -528,6 +528,7 @@ static void tun_net_init(struct net_device *dev)
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
random_ether_addr(dev->dev_addr);
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 52502883523e..c5c4b4def7fb 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -314,12 +314,11 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, 4);
while (skb->len > 0) {
- if ((short)(header & 0x0000ffff) !=
- ~((short)((header & 0xffff0000) >> 16))) {
+ if ((header & 0x07ff) != ((~header >> 16) & 0x07ff))
netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
- }
+
/* get the packet length */
- size = (u16) (header & 0x0000ffff);
+ size = (u16) (header & 0x000007ff);
if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
u8 alignment = (unsigned long)skb->data & 0x3;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 7f78db7bd68d..5b23767ea817 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -263,6 +263,8 @@ static void veth_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index b25c9229a6a9..eb2028187fbe 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1074,9 +1074,10 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
used = pvc_is_used(pvc);
- if (type == ARPHRD_ETHER)
+ if (type == ARPHRD_ETHER) {
dev = alloc_netdev(0, "pvceth%d", ether_setup);
- else
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ } else
dev = alloc_netdev(0, "pvc%d", pvc_setup);
if (!dev) {
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 55cf71fbffe3..e1b3e3c134fd 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2823,6 +2823,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
dev->wireless_data = &ai->wireless_data;
dev->irq = irq;
dev->base_addr = port;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
SET_NETDEV_DEV(dev, dmdev);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index d2293dcc117f..3cab843afb05 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -28,7 +28,7 @@ config B43
config B43_BCMA
bool "Support for BCMA bus"
- depends on B43 && BCMA && BROKEN
+ depends on B43 && BCMA
default y
config B43_SSB
diff --git a/drivers/net/wireless/b43/bus.c b/drivers/net/wireless/b43/bus.c
index 64c3f65ff8c0..05f6c7bff6ab 100644
--- a/drivers/net/wireless/b43/bus.c
+++ b/drivers/net/wireless/b43/bus.c
@@ -244,10 +244,12 @@ void b43_bus_set_wldev(struct b43_bus_dev *dev, void *wldev)
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
bcma_set_drvdata(dev->bdev, wldev);
+ break;
#endif
#ifdef CONFIG_B43_SSB
case B43_BUS_SSB:
ssb_set_drvdata(dev->sdev, wldev);
+ break;
#endif
}
}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 032d46674f6b..26f1ab840cc7 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5350,6 +5350,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
{
struct b43_wl *wl = ssb_get_devtypedata(sdev);
struct b43_wldev *wldev = ssb_get_drvdata(sdev);
+ struct b43_bus_dev *dev = wldev->dev;
/* We must cancel any work here before unregistering from ieee80211,
* as the ieee80211 unreg will destroy the workqueue. */
@@ -5365,14 +5366,14 @@ static void b43_ssb_remove(struct ssb_device *sdev)
ieee80211_unregister_hw(wl->hw);
}
- b43_one_core_detach(wldev->dev);
+ b43_one_core_detach(dev);
if (list_empty(&wl->devlist)) {
b43_leds_unregister(wl);
/* Last core on the chip unregistered.
* We can destroy common struct b43_wl.
*/
- b43_wireless_exit(wldev->dev, wl);
+ b43_wireless_exit(dev, wl);
}
}
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index d5084829c9e5..89a116fba1de 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -855,6 +855,7 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
iface = netdev_priv(dev);
ether_setup(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
/* kernel callbacks */
if (iface) {