diff options
Diffstat (limited to 'drivers/net')
211 files changed, 12971 insertions, 4469 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 0c2bd806950e..707ab7bd4ea5 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -107,8 +107,6 @@ config MII or internal device. It is safe to say Y or M here even if your ethernet card lacks MII. -source "drivers/ieee802154/Kconfig" - config IFB tristate "Intermediate Functional Block support" depends on NET_CLS_ACT @@ -290,6 +288,8 @@ source "drivers/net/wimax/Kconfig" source "drivers/net/wan/Kconfig" +source "drivers/net/ieee802154/Kconfig" + config XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" depends on XEN diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 3d375ca128a6..b682a1de7be8 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -53,6 +53,7 @@ obj-$(CONFIG_SUNGEM_PHY) += sungem_phy.o obj-$(CONFIG_WAN) += wan/ obj-$(CONFIG_WLAN) += wireless/ obj-$(CONFIG_WIMAX) += wimax/ +obj-$(CONFIG_IEEE802154) += ieee802154/ obj-$(CONFIG_VMXNET3) += vmxnet3/ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 545c09ed9079..cff6f023c03a 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -996,9 +996,7 @@ static int __init cops_module_init(void) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", cardname); cops_dev = cops_probe(-1); - if (IS_ERR(cops_dev)) - return PTR_ERR(cops_dev); - return 0; + return PTR_RET(cops_dev); } static void __exit cops_module_exit(void) diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 0910dce3996d..b5782cdf0bca 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void) "ltpc: Autoprobing is not recommended for modules\n"); dev_ltpc = ltpc_probe(); - if (IS_ERR(dev_ltpc)) - return PTR_ERR(dev_ltpc); - return 0; + return PTR_RET(dev_ltpc); } module_init(ltpc_module_init); #endif diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6fae5f3ec7f6..7858c58df4a3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -398,7 +398,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; - if (unlikely(netpoll_tx_running(slave_dev))) + if (unlikely(netpoll_tx_running(bond->dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); else dev_queue_xmit(skb); @@ -1120,10 +1120,10 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); - netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER); + call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev); if (should_notify_peers) - netdev_bonding_change(bond->dev, - NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, + bond->dev); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); @@ -1235,12 +1235,12 @@ static inline int slave_enable_netpoll(struct slave *slave) struct netpoll *np; int err = 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), GFP_ATOMIC); err = -ENOMEM; if (!np) goto out; - err = __netpoll_setup(np, slave->dev); + err = __netpoll_setup(np, slave->dev, GFP_ATOMIC); if (err) { kfree(np); goto out; @@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave) return; slave->np = NULL; - synchronize_rcu_bh(); - __netpoll_cleanup(np); - kfree(np); + __netpoll_free_rcu(np); } static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) { @@ -1292,7 +1290,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) read_unlock(&bond->lock); } -static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) { struct bonding *bond = netdev_priv(dev); struct slave *slave; @@ -1560,8 +1558,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->name, bond_dev->type, slave_dev->type); - res = netdev_bonding_change(bond_dev, - NETDEV_PRE_TYPE_CHANGE); + res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, + bond_dev); res = notifier_to_errno(res); if (res) { pr_err("%s: refused to change device type\n", @@ -1581,8 +1579,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; } - netdev_bonding_change(bond_dev, - NETDEV_POST_TYPE_CHANGE); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, + bond_dev); } } else if (bond_dev->type != slave_dev->type) { pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", @@ -1943,7 +1941,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) } block_netpoll_tx(); - netdev_bonding_change(bond_dev, NETDEV_RELEASE); + call_netdevice_notifiers(NETDEV_RELEASE, bond_dev); write_lock_bh(&bond->lock); slave = bond_get_slave_by_dev(bond, slave_dev); @@ -2586,7 +2584,7 @@ re_arm: read_unlock(&bond->lock); return; } - netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); rtnl_unlock(); } } @@ -2813,12 +2811,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work) arp_work.work); struct slave *slave, *oldcurrent; int do_failover = 0; - int delta_in_ticks; + int delta_in_ticks, extra_ticks; int i; read_lock(&bond->lock); delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); + extra_ticks = delta_in_ticks / 2; if (bond->slave_cnt == 0) goto re_arm; @@ -2841,10 +2840,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work) if (slave->link != BOND_LINK_UP) { if (time_in_range(jiffies, trans_start - delta_in_ticks, - trans_start + delta_in_ticks) && + trans_start + delta_in_ticks + extra_ticks) && time_in_range(jiffies, slave->dev->last_rx - delta_in_ticks, - slave->dev->last_rx + delta_in_ticks)) { + slave->dev->last_rx + delta_in_ticks + extra_ticks)) { slave->link = BOND_LINK_UP; bond_set_active_slave(slave); @@ -2874,10 +2873,10 @@ void bond_loadbalance_arp_mon(struct work_struct *work) */ if (!time_in_range(jiffies, trans_start - delta_in_ticks, - trans_start + 2 * delta_in_ticks) || + trans_start + 2 * delta_in_ticks + extra_ticks) || !time_in_range(jiffies, slave->dev->last_rx - delta_in_ticks, - slave->dev->last_rx + 2 * delta_in_ticks)) { + slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) { slave->link = BOND_LINK_DOWN; bond_set_backup_slave(slave); @@ -2935,6 +2934,14 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) struct slave *slave; int i, commit = 0; unsigned long trans_start; + int extra_ticks; + + /* All the time comparisons below need some extra time. Otherwise, on + * fast networks the ARP probe/reply may arrive within the same jiffy + * as it was sent. Then, the next time the ARP monitor is run, one + * arp_interval will already have passed in the comparisons. + */ + extra_ticks = delta_in_ticks / 2; bond_for_each_slave(bond, slave, i) { slave->new_link = BOND_LINK_NOCHANGE; @@ -2942,7 +2949,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) if (slave->link != BOND_LINK_UP) { if (time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + delta_in_ticks)) { + slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) { slave->new_link = BOND_LINK_UP; commit++; @@ -2958,7 +2965,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) */ if (time_in_range(jiffies, slave->jiffies - delta_in_ticks, - slave->jiffies + 2 * delta_in_ticks)) + slave->jiffies + 2 * delta_in_ticks + extra_ticks)) continue; /* @@ -2978,7 +2985,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) !bond->current_arp_slave && !time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + 3 * delta_in_ticks)) { + slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) { slave->new_link = BOND_LINK_DOWN; commit++; @@ -2994,10 +3001,10 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks) if (bond_is_active_slave(slave) && (!time_in_range(jiffies, trans_start - delta_in_ticks, - trans_start + 2 * delta_in_ticks) || + trans_start + 2 * delta_in_ticks + extra_ticks) || !time_in_range(jiffies, slave_last_rx(bond, slave) - delta_in_ticks, - slave_last_rx(bond, slave) + 2 * delta_in_ticks))) { + slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) { slave->new_link = BOND_LINK_DOWN; commit++; @@ -3029,7 +3036,7 @@ static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks) if ((!bond->curr_active_slave && time_in_range(jiffies, trans_start - delta_in_ticks, - trans_start + delta_in_ticks)) || + trans_start + delta_in_ticks + delta_in_ticks / 2)) || bond->curr_active_slave != slave) { slave->link = BOND_LINK_UP; if (bond->current_arp_slave) { @@ -3205,7 +3212,7 @@ re_arm: read_unlock(&bond->lock); return; } - netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS); + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); rtnl_unlock(); } } @@ -3354,56 +3361,93 @@ static struct notifier_block bond_netdev_notifier = { /*---------------------------- Hashing Policies -----------------------------*/ /* + * Hash for the output device based upon layer 2 data + */ +static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) +{ + struct ethhdr *data = (struct ethhdr *)skb->data; + + if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto)) + return (data->h_dest[5] ^ data->h_source[5]) % count; + + return 0; +} + +/* * Hash for the output device based upon layer 2 and layer 3 data. If - * the packet is not IP mimic bond_xmit_hash_policy_l2() + * the packet is not IP, fall back on bond_xmit_hash_policy_l2() */ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count) { struct ethhdr *data = (struct ethhdr *)skb->data; - struct iphdr *iph = ip_hdr(skb); - - if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph; + struct ipv6hdr *ipv6h; + u32 v6hash; + __be32 *s, *d; + + if (skb->protocol == htons(ETH_P_IP) && + skb_network_header_len(skb) >= sizeof(*iph)) { + iph = ip_hdr(skb); return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ (data->h_dest[5] ^ data->h_source[5])) % count; + } else if (skb->protocol == htons(ETH_P_IPV6) && + skb_network_header_len(skb) >= sizeof(*ipv6h)) { + ipv6h = ipv6_hdr(skb); + s = &ipv6h->saddr.s6_addr32[0]; + d = &ipv6h->daddr.s6_addr32[0]; + v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); + v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8); + return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count; } - return (data->h_dest[5] ^ data->h_source[5]) % count; + return bond_xmit_hash_policy_l2(skb, count); } /* * Hash for the output device based upon layer 3 and layer 4 data. If * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is - * altogether not IP, mimic bond_xmit_hash_policy_l2() + * altogether not IP, fall back on bond_xmit_hash_policy_l2() */ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count) { - struct ethhdr *data = (struct ethhdr *)skb->data; - struct iphdr *iph = ip_hdr(skb); - __be16 *layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); - int layer4_xor = 0; - - if (skb->protocol == htons(ETH_P_IP)) { + u32 layer4_xor = 0; + struct iphdr *iph; + struct ipv6hdr *ipv6h; + __be32 *s, *d; + __be16 *layer4hdr; + + if (skb->protocol == htons(ETH_P_IP) && + skb_network_header_len(skb) >= sizeof(*iph)) { + iph = ip_hdr(skb); if (!ip_is_fragment(iph) && (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP)) { - layer4_xor = ntohs((*layer4hdr ^ *(layer4hdr + 1))); + iph->protocol == IPPROTO_UDP) && + (skb_headlen(skb) - skb_network_offset(skb) >= + iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) { + layer4hdr = (__be16 *)((u32 *)iph + iph->ihl); + layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); } return (layer4_xor ^ ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count; - + } else if (skb->protocol == htons(ETH_P_IPV6) && + skb_network_header_len(skb) >= sizeof(*ipv6h)) { + ipv6h = ipv6_hdr(skb); + if ((ipv6h->nexthdr == IPPROTO_TCP || + ipv6h->nexthdr == IPPROTO_UDP) && + (skb_headlen(skb) - skb_network_offset(skb) >= + sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) { + layer4hdr = (__be16 *)(ipv6h + 1); + layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1)); + } + s = &ipv6h->saddr.s6_addr32[0]; + d = &ipv6h->daddr.s6_addr32[0]; + layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]); + layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^ + (layer4_xor >> 8); + return layer4_xor % count; } - return (data->h_dest[5] ^ data->h_source[5]) % count; -} - -/* - * Hash for the output device based upon layer 2 data - */ -static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count) -{ - struct ethhdr *data = (struct ethhdr *)skb->data; - - return (data->h_dest[5] ^ data->h_source[5]) % count; + return bond_xmit_hash_policy_l2(skb, count); } /*-------------------------- Device entry points ----------------------------*/ diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 4c538e388655..2c4a21f98442 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -34,6 +34,7 @@ #include <linux/if_ether.h> #include <linux/list.h> #include <linux/io.h> +#include <linux/pm_runtime.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -45,6 +46,9 @@ #define IF_ENUM_REG_LEN 11 #define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN) +/* control extension register D_CAN specific */ +#define CONTROL_EX_PDR BIT(8) + /* control register */ #define CONTROL_TEST BIT(7) #define CONTROL_CCE BIT(6) @@ -64,6 +68,7 @@ #define TEST_BASIC BIT(2) /* status register */ +#define STATUS_PDA BIT(10) #define STATUS_BOFF BIT(7) #define STATUS_EWARN BIT(6) #define STATUS_EPASS BIT(5) @@ -163,6 +168,9 @@ /* minimum timeout for checking BUSY status */ #define MIN_TIMEOUT_VALUE 6 +/* Wait for ~1 sec for INIT bit */ +#define INIT_WAIT_MS 1000 + /* napi related */ #define C_CAN_NAPI_WEIGHT C_CAN_MSG_OBJ_RX_NUM @@ -201,6 +209,30 @@ static const struct can_bittiming_const c_can_bittiming_const = { .brp_inc = 1, }; +static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv) +{ + if (priv->device) + pm_runtime_enable(priv->device); +} + +static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv) +{ + if (priv->device) + pm_runtime_disable(priv->device); +} + +static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) +{ + if (priv->device) + pm_runtime_get_sync(priv->device); +} + +static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv) +{ + if (priv->device) + pm_runtime_put_sync(priv->device); +} + static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) { return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) + @@ -673,11 +705,15 @@ static int c_can_get_berr_counter(const struct net_device *dev, unsigned int reg_err_counter; struct c_can_priv *priv = netdev_priv(dev); + c_can_pm_runtime_get_sync(priv); + reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> ERR_CNT_REC_SHIFT; bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; + c_can_pm_runtime_put_sync(priv); + return 0; } @@ -1053,11 +1089,13 @@ static int c_can_open(struct net_device *dev) int err; struct c_can_priv *priv = netdev_priv(dev); + c_can_pm_runtime_get_sync(priv); + /* open the can device */ err = open_candev(dev); if (err) { netdev_err(dev, "failed to open can device\n"); - return err; + goto exit_open_fail; } /* register interrupt handler */ @@ -1079,6 +1117,8 @@ static int c_can_open(struct net_device *dev) exit_irq_fail: close_candev(dev); +exit_open_fail: + c_can_pm_runtime_put_sync(priv); return err; } @@ -1091,6 +1131,7 @@ static int c_can_close(struct net_device *dev) c_can_stop(dev); free_irq(dev->irq, dev); close_candev(dev); + c_can_pm_runtime_put_sync(priv); return 0; } @@ -1119,6 +1160,77 @@ struct net_device *alloc_c_can_dev(void) } EXPORT_SYMBOL_GPL(alloc_c_can_dev); +#ifdef CONFIG_PM +int c_can_power_down(struct net_device *dev) +{ + u32 val; + unsigned long time_out; + struct c_can_priv *priv = netdev_priv(dev); + + if (!(dev->flags & IFF_UP)) + return 0; + + WARN_ON(priv->type != BOSCH_D_CAN); + + /* set PDR value so the device goes to power down mode */ + val = priv->read_reg(priv, C_CAN_CTRL_EX_REG); + val |= CONTROL_EX_PDR; + priv->write_reg(priv, C_CAN_CTRL_EX_REG, val); + + /* Wait for the PDA bit to get set */ + time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); + while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && + time_after(time_out, jiffies)) + cpu_relax(); + + if (time_after(jiffies, time_out)) + return -ETIMEDOUT; + + c_can_stop(dev); + + c_can_pm_runtime_put_sync(priv); + + return 0; +} +EXPORT_SYMBOL_GPL(c_can_power_down); + +int c_can_power_up(struct net_device *dev) +{ + u32 val; + unsigned long time_out; + struct c_can_priv *priv = netdev_priv(dev); + + if (!(dev->flags & IFF_UP)) + return 0; + + WARN_ON(priv->type != BOSCH_D_CAN); + + c_can_pm_runtime_get_sync(priv); + + /* Clear PDR and INIT bits */ + val = priv->read_reg(priv, C_CAN_CTRL_EX_REG); + val &= ~CONTROL_EX_PDR; + priv->write_reg(priv, C_CAN_CTRL_EX_REG, val); + val = priv->read_reg(priv, C_CAN_CTRL_REG); + val &= ~CONTROL_INIT; + priv->write_reg(priv, C_CAN_CTRL_REG, val); + + /* Wait for the PDA bit to get clear */ + time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); + while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && + time_after(time_out, jiffies)) + cpu_relax(); + + if (time_after(jiffies, time_out)) + return -ETIMEDOUT; + + c_can_start(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(c_can_power_up); +#endif + void free_c_can_dev(struct net_device *dev) { free_candev(dev); @@ -1133,10 +1245,19 @@ static const struct net_device_ops c_can_netdev_ops = { int register_c_can_dev(struct net_device *dev) { + struct c_can_priv *priv = netdev_priv(dev); + int err; + + c_can_pm_runtime_enable(priv); + dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &c_can_netdev_ops; - return register_candev(dev); + err = register_candev(dev); + if (err) + c_can_pm_runtime_disable(priv); + + return err; } EXPORT_SYMBOL_GPL(register_c_can_dev); @@ -1148,6 +1269,8 @@ void unregister_c_can_dev(struct net_device *dev) c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); unregister_candev(dev); + + c_can_pm_runtime_disable(priv); } EXPORT_SYMBOL_GPL(unregister_c_can_dev); diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index 01a7049ab990..e5ed41dafa1b 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -24,6 +24,7 @@ enum reg { C_CAN_CTRL_REG = 0, + C_CAN_CTRL_EX_REG, C_CAN_STS_REG, C_CAN_ERR_CNT_REG, C_CAN_BTR_REG, @@ -104,6 +105,7 @@ static const u16 reg_map_c_can[] = { static const u16 reg_map_d_can[] = { [C_CAN_CTRL_REG] = 0x00, + [C_CAN_CTRL_EX_REG] = 0x02, [C_CAN_STS_REG] = 0x04, [C_CAN_ERR_CNT_REG] = 0x08, [C_CAN_BTR_REG] = 0x0C, @@ -143,8 +145,9 @@ static const u16 reg_map_d_can[] = { }; enum c_can_dev_id { - C_CAN_DEVTYPE, - D_CAN_DEVTYPE, + BOSCH_C_CAN_PLATFORM, + BOSCH_C_CAN, + BOSCH_D_CAN, }; /* c_can private data structure */ @@ -152,6 +155,7 @@ struct c_can_priv { struct can_priv can; /* must be the first member */ struct napi_struct napi; struct net_device *dev; + struct device *device; int tx_object; int current_status; int last_status; @@ -164,6 +168,7 @@ struct c_can_priv { unsigned int tx_echo; void *priv; /* for board-specific data */ u16 irqstatus; + enum c_can_dev_id type; }; struct net_device *alloc_c_can_dev(void); @@ -171,4 +176,9 @@ void free_c_can_dev(struct net_device *dev); int register_c_can_dev(struct net_device *dev); void unregister_c_can_dev(struct net_device *dev); +#ifdef CONFIG_PM +int c_can_power_up(struct net_device *dev); +int c_can_power_down(struct net_device *dev); +#endif + #endif /* C_CAN_H */ diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index 1011146ea513..3d7830bcd2bf 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -120,10 +120,10 @@ static int __devinit c_can_pci_probe(struct pci_dev *pdev, /* Configure CAN type */ switch (c_can_pci_data->type) { - case C_CAN_DEVTYPE: + case BOSCH_C_CAN: priv->regs = reg_map_c_can; break; - case D_CAN_DEVTYPE: + case BOSCH_D_CAN: priv->regs = reg_map_d_can; priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; break; @@ -192,7 +192,7 @@ static void __devexit c_can_pci_remove(struct pci_dev *pdev) } static struct c_can_pci_data c_can_sta2x11= { - .type = C_CAN_DEVTYPE, + .type = BOSCH_C_CAN, .reg_align = C_CAN_REG_ALIGN_32, .freq = 52000000, /* 52 Mhz */ }; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 6ff7ad006c30..ee1416132aba 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -30,6 +30,9 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/consumer.h> #include <linux/can/dev.h> @@ -65,17 +68,58 @@ static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv, writew(val, priv->base + 2 * priv->regs[index]); } +static struct platform_device_id c_can_id_table[] = { + [BOSCH_C_CAN_PLATFORM] = { + .name = KBUILD_MODNAME, + .driver_data = BOSCH_C_CAN, + }, + [BOSCH_C_CAN] = { + .name = "c_can", + .driver_data = BOSCH_C_CAN, + }, + [BOSCH_D_CAN] = { + .name = "d_can", + .driver_data = BOSCH_D_CAN, + }, { + } +}; + +static const struct of_device_id c_can_of_table[] = { + { .compatible = "bosch,c_can", .data = &c_can_id_table[BOSCH_C_CAN] }, + { .compatible = "bosch,d_can", .data = &c_can_id_table[BOSCH_D_CAN] }, + { /* sentinel */ }, +}; + static int __devinit c_can_plat_probe(struct platform_device *pdev) { int ret; void __iomem *addr; struct net_device *dev; struct c_can_priv *priv; + const struct of_device_id *match; const struct platform_device_id *id; + struct pinctrl *pinctrl; struct resource *mem; int irq; struct clk *clk; + if (pdev->dev.of_node) { + match = of_match_device(c_can_of_table, &pdev->dev); + if (!match) { + dev_err(&pdev->dev, "Failed to find matching dt id\n"); + ret = -EINVAL; + goto exit; + } + id = match->data; + } else { + id = platform_get_device_id(pdev); + } + + pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + if (IS_ERR(pinctrl)) + dev_warn(&pdev->dev, + "failed to configure pins from driver\n"); + /* get the appropriate clk */ clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { @@ -114,9 +158,8 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) } priv = netdev_priv(dev); - id = platform_get_device_id(pdev); switch (id->driver_data) { - case C_CAN_DEVTYPE: + case BOSCH_C_CAN: priv->regs = reg_map_c_can; switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: @@ -130,7 +173,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) break; } break; - case D_CAN_DEVTYPE: + case BOSCH_D_CAN: priv->regs = reg_map_d_can; priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; @@ -143,8 +186,10 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev) dev->irq = irq; priv->base = addr; + priv->device = &pdev->dev; priv->can.clock.freq = clk_get_rate(clk); priv->priv = clk; + priv->type = id->driver_data; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -195,27 +240,75 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev) return 0; } -static const struct platform_device_id c_can_id_table[] = { - { - .name = KBUILD_MODNAME, - .driver_data = C_CAN_DEVTYPE, - }, { - .name = "c_can", - .driver_data = C_CAN_DEVTYPE, - }, { - .name = "d_can", - .driver_data = D_CAN_DEVTYPE, - }, { +#ifdef CONFIG_PM +static int c_can_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret; + struct net_device *ndev = platform_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(ndev); + + if (priv->type != BOSCH_D_CAN) { + dev_warn(&pdev->dev, "Not supported\n"); + return 0; } -}; + + if (netif_running(ndev)) { + netif_stop_queue(ndev); + netif_device_detach(ndev); + } + + ret = c_can_power_down(ndev); + if (ret) { + netdev_err(ndev, "failed to enter power down mode\n"); + return ret; + } + + priv->can.state = CAN_STATE_SLEEPING; + + return 0; +} + +static int c_can_resume(struct platform_device *pdev) +{ + int ret; + struct net_device *ndev = platform_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(ndev); + + if (priv->type != BOSCH_D_CAN) { + dev_warn(&pdev->dev, "Not supported\n"); + return 0; + } + + ret = c_can_power_up(ndev); + if (ret) { + netdev_err(ndev, "Still in power down mode\n"); + return ret; + } + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + if (netif_running(ndev)) { + netif_device_attach(ndev); + netif_start_queue(ndev); + } + + return 0; +} +#else +#define c_can_suspend NULL +#define c_can_resume NULL +#endif static struct platform_driver c_can_plat_driver = { .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(c_can_of_table), }, .probe = c_can_plat_probe, .remove = __devexit_p(c_can_plat_remove), + .suspend = c_can_suspend, + .resume = c_can_resume, .id_table = c_can_id_table, }; diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index a580db29e503..26e7129332ab 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c @@ -83,6 +83,11 @@ #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n)) #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94) #define INSTRUCTION_RESET 0xC0 +#define RTS_TXB0 0x01 +#define RTS_TXB1 0x02 +#define RTS_TXB2 0x04 +#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07)) + /* MPC251x registers */ #define CANSTAT 0x0e @@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, int tx_buf_idx) { + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); u32 sid, eid, exide, rtr; u8 buf[SPI_TRANSFER_BUF_LEN]; @@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc; memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc); mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx); - mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ); + + /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ + priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); + mcp251x_spi_trans(priv->spi, 1); } static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index 06adf881ea24..524ef96dc24d 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -181,7 +181,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, if (!clock_name || !strcmp(clock_name, "sys")) { sys_clk = clk_get(&ofdev->dev, "sys_clk"); - if (!sys_clk) { + if (IS_ERR(sys_clk)) { dev_err(&ofdev->dev, "couldn't get sys_clk\n"); goto exit_unmap; } @@ -204,7 +204,7 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, if (clocksrc < 0) { ref_clk = clk_get(&ofdev->dev, "ref_clk"); - if (!ref_clk) { + if (IS_ERR(ref_clk)) { dev_err(&ofdev->dev, "couldn't get ref_clk\n"); goto exit_unmap; } diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 4c4f33d482d2..25011dbe1b96 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -156,8 +156,13 @@ static void set_normal_mode(struct net_device *dev) } /* set chip to normal mode */ - priv->write_reg(priv, REG_MOD, 0x00); + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + priv->write_reg(priv, REG_MOD, MOD_LOM); + else + priv->write_reg(priv, REG_MOD, 0x00); + udelay(10); + status = priv->read_reg(priv, REG_MOD); } @@ -310,7 +315,10 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, can_put_echo_skb(skb, dev, 0); - sja1000_write_cmdreg(priv, CMD_TR); + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) + sja1000_write_cmdreg(priv, CMD_TR | CMD_AT); + else + sja1000_write_cmdreg(priv, CMD_TR); return NETDEV_TX_OK; } @@ -505,10 +513,18 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) netdev_warn(dev, "wakeup interrupt\n"); if (isrc & IRQ_TI) { - /* transmission complete interrupt */ - stats->tx_bytes += priv->read_reg(priv, REG_FI) & 0xf; - stats->tx_packets++; - can_get_echo_skb(dev, 0); + /* transmission buffer released */ + if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && + !(status & SR_TCS)) { + stats->tx_errors++; + can_free_echo_skb(dev, 0); + } else { + /* transmission complete */ + stats->tx_bytes += + priv->read_reg(priv, REG_FI) & 0xf; + stats->tx_packets++; + can_get_echo_skb(dev, 0); + } netif_wake_queue(dev); } if (isrc & IRQ_RI) { @@ -605,7 +621,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv) priv->can.do_set_mode = sja1000_set_mode; priv->can.do_get_berr_counter = sja1000_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | - CAN_CTRLMODE_BERR_REPORTING; + CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_ONE_SHOT; spin_lock_init(&priv->cmdreg_lock); diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 4f50145f6483..662c5f7eb0c5 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev) priv = netdev_priv(dev); dev->irq = res_irq->start; - priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); + priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; + if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) + priv->irq_flags |= IRQF_SHARED; priv->reg_base = addr; /* The CAN clock frequency is half the oscillator clock frequency */ priv->can.clock.freq = pdata->osc_freq / 2; diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c index 310596175676..b595d3422b9f 100644 --- a/drivers/net/can/softing/softing_fw.c +++ b/drivers/net/can/softing/softing_fw.c @@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card, const uint8_t *mem, *end, *dat; uint16_t type, len; uint32_t addr; - uint8_t *buf = NULL; + uint8_t *buf = NULL, *new_buf; int buflen = 0; int8_t type_end = 0; @@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card, if (len > buflen) { /* align buflen */ buflen = (len + (1024-1)) & ~(1024-1); - buf = krealloc(buf, buflen, GFP_KERNEL); - if (!buf) { + new_buf = krealloc(buf, buflen, GFP_KERNEL); + if (!new_buf) { ret = -ENOMEM; goto failed; } + buf = new_buf; } /* verify record data */ memcpy_fromio(buf, &dpram[addr + offset], len); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index d2f91f737871..c4643c400d46 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -53,7 +53,7 @@ static struct peak_usb_adapter *peak_usb_adapters_list[] = { * dump memory */ #define DUMP_WIDTH 16 -void dump_mem(char *prompt, void *p, int l) +void pcan_dump_mem(char *prompt, void *p, int l) { pr_info("%s dumping %s (%d bytes):\n", PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); @@ -203,9 +203,9 @@ static void peak_usb_read_bulk_callback(struct urb *urb) if (dev->state & PCAN_USB_STATE_STARTED) { err = dev->adapter->dev_decode_buf(dev, urb); if (err) - dump_mem("received usb message", - urb->transfer_buffer, - urb->transfer_buffer_length); + pcan_dump_mem("received usb message", + urb->transfer_buffer, + urb->transfer_buffer_length); } } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index 4c775b620be2..c8e5e91d7cb5 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -131,7 +131,7 @@ struct peak_usb_device { struct peak_usb_device *next_siblings; }; -void dump_mem(char *prompt, void *p, int l); +void pcan_dump_mem(char *prompt, void *p, int l); /* common timestamp management */ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 629c4ba5d49d..e1626d92511a 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -292,8 +292,8 @@ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev, if (!rec_len) { netdev_err(dev->netdev, "got unprocessed record in msg\n"); - dump_mem("rcvd rsp msg", pum->u.rec_buffer, - actual_length); + pcan_dump_mem("rcvd rsp msg", pum->u.rec_buffer, + actual_length); break; } @@ -756,8 +756,8 @@ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb) fail: if (err) - dump_mem("received msg", - urb->transfer_buffer, urb->actual_length); + pcan_dump_mem("received msg", + urb->transfer_buffer, urb->actual_length); return err; } diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index f0c8bd54ce29..021d69c5d9bc 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1712,7 +1712,7 @@ e100_set_network_leds(int active) static void e100_netpoll(struct net_device* netdev) { - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev); } #endif diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index a11af5cc4844..e4ff38949112 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -89,15 +89,6 @@ source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" - -config MIPS_SIM_NET - tristate "MIPS simulator Network device" - depends on MIPS_SIM - ---help--- - The MIPSNET device is a simple Ethernet network device which is - emulated by the MIPS Simulator. - If you are not using a MIPSsim or are unsure, say N. - source "drivers/net/ethernet/myricom/Kconfig" config FEALNX diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 878ad32b93f2..d4473072654a 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -40,7 +40,6 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ -obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/ obj-$(CONFIG_FEALNX) += fealnx.o obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 77bcd4cb4ffb..6d1a24acb77e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1278,7 +1278,7 @@ struct bnx2x { #define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_FW_RX_ALIGN_END \ - max(1UL << BNX2X_RX_ALIGN_SHIFT, \ + max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) @@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params { continue; \ else -#define for_each_napi_rx_queue(bp, var) \ - for ((var) = 0; (var) < bp->num_napi_queues; (var)++) - /* Skip OOO FP */ #define for_each_tx_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e879e19eb0d6..ca8048757c84 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) */ bnx2x_setup_tc(bp->dev, bp->max_cos); + /* Add all NAPI objects */ + bnx2x_add_all_napi(bp); bnx2x_napi_enable(bp); /* set pf load just before approaching the MCP */ @@ -2281,7 +2283,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* Wait for all pending SP commands to complete */ if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) { BNX2X_ERR("Timeout waiting for SP elements to complete\n"); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); return -EBUSY; } @@ -2329,7 +2331,7 @@ load_error0: } /* must be called with rtnl_lock */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) { int i; bool global = false; @@ -2391,7 +2393,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Cleanup the chip if needed */ if (unload_mode != UNLOAD_RECOVERY) - bnx2x_chip_cleanup(bp, unload_mode); + bnx2x_chip_cleanup(bp, unload_mode, keep_link); else { /* Send the UNLOAD_REQUEST to the MCP */ bnx2x_send_unload_req(bp, unload_mode); @@ -2408,12 +2410,14 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); /* Release IRQs */ bnx2x_free_irq(bp); /* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, false); } /* @@ -3764,7 +3768,7 @@ int bnx2x_reload_if_running(struct net_device *dev) if (unlikely(!netif_running(dev))) return 0; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); return bnx2x_nic_load(bp, LOAD_NORMAL); } @@ -3961,7 +3965,7 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(dev); - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); bnx2x_set_power_state(bp, pci_choose_state(pdev, state)); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index dfa757e74296..9c5ea6c5b4c7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -83,8 +83,9 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode); * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle + * @keep_link: true iff link should be kept up */ -void bnx2x_send_unload_done(struct bnx2x *bp); +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link); /** * bnx2x_config_rss_pf - configure RSS parameters in a PF. @@ -153,6 +154,14 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode); void bnx2x_link_set(struct bnx2x *bp); /** + * bnx2x_force_link_reset - Forces link reset, and put the PHY + * in reset as well. + * + * @bp: driver handle + */ +void bnx2x_force_link_reset(struct bnx2x *bp); + +/** * bnx2x_link_test - query link status. * * @bp: driver handle @@ -312,12 +321,13 @@ void bnx2x_set_num_queues(struct bnx2x *bp); * * @bp: driver handle * @unload_mode: COMMON, PORT, FUNCTION + * @keep_link: true iff link should be kept up. * * - Cleanup MAC configuration. * - Closes clients. * - etc. */ -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode); +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link); /** * bnx2x_acquire_hw_lock - acquire HW lock. @@ -446,7 +456,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl); bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err); /* dev_close main block */ -int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); +int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link); /* dev_open main block */ int bnx2x_nic_load(struct bnx2x *bp, int load_mode); @@ -710,17 +720,15 @@ static inline u16 bnx2x_tx_avail(struct bnx2x *bp, prod = txdata->tx_bd_prod; cons = txdata->tx_bd_cons; - /* NUM_TX_RINGS = number of "next-page" entries - It will be used as a threshold */ - used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; + used = SUB_S16(prod, cons); #ifdef BNX2X_STOP_ON_ERROR WARN_ON(used < 0); - WARN_ON(used > bp->tx_ring_size); - WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); + WARN_ON(used > txdata->tx_ring_size); + WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL); #endif - return (s16)(bp->tx_ring_size) - used; + return (s16)(txdata->tx_ring_size) - used; } static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) @@ -792,7 +800,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) bp->num_napi_queues = bp->num_queues; /* Add NAPI objects */ - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, BNX2X_NAPI_WEIGHT); } @@ -801,7 +809,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); } @@ -1088,6 +1096,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp, txdata->txq_index = txq_index; txdata->tx_cons_sb = tx_cons_sb; txdata->parent_fp = fp; + txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", txdata->cid, txdata->txq_index); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 3e4cff9b1ebe..b926f58e983b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -401,11 +401,11 @@ static const struct reg_addr reg_addrs[] = { { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE }, { 0x78000, 8192, RI_E3E3B0_OFFLINE }, - { 0x85000, 3, RI_ALL_ONLINE }, - { 0x8501c, 7, RI_ALL_ONLINE }, - { 0x85048, 1, RI_ALL_ONLINE }, - { 0x85200, 32, RI_ALL_ONLINE }, - { 0xb0000, 16384, RI_E1H_ONLINE }, + { 0x85000, 3, RI_ALL_OFFLINE }, + { 0x8501c, 7, RI_ALL_OFFLINE }, + { 0x85048, 1, RI_ALL_OFFLINE }, + { 0x85200, 32, RI_ALL_OFFLINE }, + { 0xb0000, 16384, RI_E1H_OFFLINE }, { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2E3E3B0_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE }, @@ -581,17 +581,12 @@ static const struct reg_addr reg_addrs[] = { { 0x140188, 3, RI_E1E1HE2E3_ONLINE }, { 0x140194, 13, RI_ALL_ONLINE }, { 0x140200, 6, RI_E1E1HE2E3_ONLINE }, - { 0x140220, 4, RI_E2E3_ONLINE }, - { 0x140240, 4, RI_E2E3_ONLINE }, { 0x140260, 4, RI_E2E3_ONLINE }, { 0x140280, 4, RI_E2E3_ONLINE }, - { 0x1402a0, 4, RI_E2E3_ONLINE }, - { 0x1402c0, 4, RI_E2E3_ONLINE }, { 0x1402e0, 2, RI_E2E3_ONLINE }, { 0x1402e8, 2, RI_E2E3E3B0_ONLINE }, { 0x1402f0, 9, RI_E2E3_ONLINE }, { 0x140314, 44, RI_E3B0_ONLINE }, - { 0x1403d0, 70, RI_E3B0_ONLINE }, { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE }, { 0x14c000, 4, RI_E1E1H_ONLINE }, @@ -704,7 +699,6 @@ static const struct reg_addr reg_addrs[] = { { 0x180398, 1, RI_E2E3E3B0_ONLINE }, { 0x1803a0, 5, RI_E2E3E3B0_ONLINE }, { 0x1803b4, 2, RI_E3E3B0_ONLINE }, - { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, @@ -800,9 +794,9 @@ static const struct reg_addr reg_addrs[] = { { 0x1b905c, 1, RI_E3E3B0_ONLINE }, { 0x1b9064, 1, RI_E3B0_ONLINE }, { 0x1b9080, 10, RI_E3B0_ONLINE }, - { 0x1b9400, 14, RI_E2E3E3B0_ONLINE }, - { 0x1b943c, 19, RI_E2E3E3B0_ONLINE }, - { 0x1b9490, 10, RI_E2E3E3B0_ONLINE }, + { 0x1b9400, 14, RI_E2E3E3B0_OFFLINE }, + { 0x1b943c, 19, RI_E2E3E3B0_OFFLINE }, + { 0x1b9490, 10, RI_E2E3E3B0_OFFLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE }, @@ -814,7 +808,6 @@ static const struct reg_addr reg_addrs[] = { { 0x200398, 1, RI_E2E3E3B0_ONLINE }, { 0x2003a0, 1, RI_E2E3E3B0_ONLINE }, { 0x2003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, @@ -921,7 +914,6 @@ static const struct reg_addr reg_addrs[] = { { 0x280398, 1, RI_E2E3E3B0_ONLINE }, { 0x2803a0, 1, RI_E2E3E3B0_ONLINE }, { 0x2803a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, @@ -1031,7 +1023,6 @@ static const struct reg_addr reg_addrs[] = { { 0x300398, 1, RI_E2E3E3B0_ONLINE }, { 0x3003a0, 1, RI_E2E3E3B0_ONLINE }, { 0x3003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE }, { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index fc4e0e3885b0..a19c9e088278 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -775,7 +775,7 @@ static void bnx2x_get_regs(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); struct dump_hdr dump_hdr = {0}; - regs->version = 0; + regs->version = 1; memset(p, 0, regs->len); if (!netif_running(bp->dev)) @@ -905,6 +905,7 @@ static int bnx2x_nway_reset(struct net_device *dev) if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); bnx2x_link_set(bp); } @@ -1587,6 +1588,12 @@ static int bnx2x_set_pauseparam(struct net_device *dev, bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; } + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_NONE; + if (epause->rx_pause) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX; + + if (epause->tx_pause) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX; } DP(BNX2X_MSG_ETHTOOL, @@ -1600,7 +1607,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, return 0; } -static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = { +static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = { "register_test (offline) ", "memory_test (offline) ", "int_loopback_test (offline)", @@ -1647,7 +1654,7 @@ static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata) return -EOPNOTSUPP; } - eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]); + eee_cfg = bp->link_vars.eee_status; edata->supported = bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >> @@ -1684,7 +1691,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) return -EOPNOTSUPP; } - eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]); + eee_cfg = bp->link_vars.eee_status; if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) { DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n"); @@ -1733,6 +1740,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata) /* Restart link to propogate changes */ if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); bnx2x_link_set(bp); } @@ -2257,7 +2265,7 @@ static int bnx2x_test_ext_loopback(struct bnx2x *bp) if (!netif_running(bp->dev)) return BNX2X_EXT_LOOPBACK_FAILED; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); if (rc) { DP(BNX2X_MSG_ETHTOOL, @@ -2408,7 +2416,7 @@ static void bnx2x_self_test(struct net_device *dev, link_up = bp->link_vars.link_up; - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); rc = bnx2x_nic_load(bp, LOAD_DIAG); if (rc) { etest->flags |= ETH_TEST_FL_FAILED; @@ -2440,7 +2448,7 @@ static void bnx2x_self_test(struct net_device *dev, etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, false); /* restore input for TX port IF */ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); @@ -2528,7 +2536,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); - int i, j, k, offset, start; + int i, j, k, start; char queue_name[MAX_QUEUE_NAME_LEN+1]; switch (stringset) { @@ -2564,13 +2572,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) start = 0; else start = 4; - for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp)); - i++, j++) { - offset = sprintf(buf+32*i, "%s", - bnx2x_tests_str_arr[j]); - *(buf+offset) = '\0'; - } - break; + memcpy(buf, bnx2x_tests_str_arr + start, + ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp)); } } @@ -2888,11 +2891,9 @@ static void bnx2x_get_channels(struct net_device *dev, */ static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) { - bnx2x_del_all_napi(bp); bnx2x_disable_msi(bp); BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; bnx2x_set_int_mode(bp); - bnx2x_add_all_napi(bp); } /** @@ -2936,7 +2937,7 @@ static int bnx2x_set_channels(struct net_device *dev, bnx2x_change_num_queues(bp, channels->combined_count); return 0; } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_change_num_queues(bp, channels->combined_count); return bnx2x_nic_load(bp, LOAD_NORMAL); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 76b6e65790f8..c795cfc5a545 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1286,6 +1286,9 @@ struct drv_func_mb { #define DRV_MSG_CODE_SET_MF_BW_MIN_MASK 0x00ff0000 #define DRV_MSG_CODE_SET_MF_BW_MAX_MASK 0xff000000 + #define DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET 0x00000002 + + #define DRV_MSG_CODE_LOAD_REQ_WITH_LFA 0x0000100a u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_DRV_LOAD_COMMON 0x10100000 @@ -1909,6 +1912,54 @@ struct lldp_local_mib { }; /***END OF DCBX STRUCTURES DECLARATIONS***/ +/***********************************************************/ +/* Elink section */ +/***********************************************************/ +#define SHMEM_LINK_CONFIG_SIZE 2 +struct shmem_lfa { + u32 req_duplex; + #define REQ_DUPLEX_PHY0_MASK 0x0000ffff + #define REQ_DUPLEX_PHY0_SHIFT 0 + #define REQ_DUPLEX_PHY1_MASK 0xffff0000 + #define REQ_DUPLEX_PHY1_SHIFT 16 + u32 req_flow_ctrl; + #define REQ_FLOW_CTRL_PHY0_MASK 0x0000ffff + #define REQ_FLOW_CTRL_PHY0_SHIFT 0 + #define REQ_FLOW_CTRL_PHY1_MASK 0xffff0000 + #define REQ_FLOW_CTRL_PHY1_SHIFT 16 + u32 req_line_speed; /* Also determine AutoNeg */ + #define REQ_LINE_SPD_PHY0_MASK 0x0000ffff + #define REQ_LINE_SPD_PHY0_SHIFT 0 + #define REQ_LINE_SPD_PHY1_MASK 0xffff0000 + #define REQ_LINE_SPD_PHY1_SHIFT 16 + u32 speed_cap_mask[SHMEM_LINK_CONFIG_SIZE]; + u32 additional_config; + #define REQ_FC_AUTO_ADV_MASK 0x0000ffff + #define REQ_FC_AUTO_ADV0_SHIFT 0 + #define NO_LFA_DUE_TO_DCC_MASK 0x00010000 + u32 lfa_sts; + #define LFA_LINK_FLAP_REASON_OFFSET 0 + #define LFA_LINK_FLAP_REASON_MASK 0x000000ff + #define LFA_LINK_DOWN 0x1 + #define LFA_LOOPBACK_ENABLED 0x2 + #define LFA_DUPLEX_MISMATCH 0x3 + #define LFA_MFW_IS_TOO_OLD 0x4 + #define LFA_LINK_SPEED_MISMATCH 0x5 + #define LFA_FLOW_CTRL_MISMATCH 0x6 + #define LFA_SPEED_CAP_MISMATCH 0x7 + #define LFA_DCC_LFA_DISABLED 0x8 + #define LFA_EEE_MISMATCH 0x9 + + #define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 + #define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 + + #define LINK_FLAP_COUNT_OFFSET 16 + #define LINK_FLAP_COUNT_MASK 0x00ff0000 + + #define LFA_FLAGS_MASK 0xff000000 + #define SHMEM_LFA_DONT_CLEAR_STAT (1<<24) +}; + struct ncsi_oem_fcoe_features { u32 fcoe_features1; #define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK 0x0000FFFF diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index f4beb46c4709..bcc112b82831 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -321,6 +321,127 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits) return val; } +/* + * bnx2x_check_lfa - This function checks if link reinitialization is required, + * or link flap can be avoided. + * + * @params: link parameters + * Returns 0 if Link Flap Avoidance conditions are met otherwise, the failed + * condition code. + */ +static int bnx2x_check_lfa(struct link_params *params) +{ + u32 link_status, cfg_idx, lfa_mask, cfg_size; + u32 cur_speed_cap_mask, cur_req_fc_auto_adv, additional_config; + u32 saved_val, req_val, eee_status; + struct bnx2x *bp = params->bp; + + additional_config = + REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + + /* NOTE: must be first condition checked - + * to verify DCC bit is cleared in any case! + */ + if (additional_config & NO_LFA_DUE_TO_DCC_MASK) { + DP(NETIF_MSG_LINK, "No LFA due to DCC flap after clp exit\n"); + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), + additional_config & ~NO_LFA_DUE_TO_DCC_MASK); + return LFA_DCC_LFA_DISABLED; + } + + /* Verify that link is up */ + link_status = REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, + port_mb[params->port].link_status)); + if (!(link_status & LINK_STATUS_LINK_UP)) + return LFA_LINK_DOWN; + + /* Verify that loopback mode is not set */ + if (params->loopback_mode) + return LFA_LOOPBACK_ENABLED; + + /* Verify that MFW supports LFA */ + if (!params->lfa_base) + return LFA_MFW_IS_TOO_OLD; + + if (params->num_phys == 3) { + cfg_size = 2; + lfa_mask = 0xffffffff; + } else { + cfg_size = 1; + lfa_mask = 0xffff; + } + + /* Compare Duplex */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex)); + req_val = params->req_duplex[0] | (params->req_duplex[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Duplex mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_DUPLEX_MISMATCH; + } + /* Compare Flow Control */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl)); + req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Flow control mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_FLOW_CTRL_MISMATCH; + } + /* Compare Link Speed */ + saved_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed)); + req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16); + if ((saved_val & lfa_mask) != (req_val & lfa_mask)) { + DP(NETIF_MSG_LINK, "Link speed mismatch %x vs. %x\n", + (saved_val & lfa_mask), (req_val & lfa_mask)); + return LFA_LINK_SPEED_MISMATCH; + } + + for (cfg_idx = 0; cfg_idx < cfg_size; cfg_idx++) { + cur_speed_cap_mask = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx])); + + if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) { + DP(NETIF_MSG_LINK, "Speed Cap mismatch %x vs. %x\n", + cur_speed_cap_mask, + params->speed_cap_mask[cfg_idx]); + return LFA_SPEED_CAP_MISMATCH; + } + } + + cur_req_fc_auto_adv = + REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)) & + REQ_FC_AUTO_ADV_MASK; + + if ((u16)cur_req_fc_auto_adv != params->req_fc_auto_adv) { + DP(NETIF_MSG_LINK, "Flow Ctrl AN mismatch %x vs. %x\n", + cur_req_fc_auto_adv, params->req_fc_auto_adv); + return LFA_FLOW_CTRL_MISMATCH; + } + + eee_status = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); + + if (((eee_status & SHMEM_EEE_LPI_REQUESTED_BIT) ^ + (params->eee_mode & EEE_MODE_ENABLE_LPI)) || + ((eee_status & SHMEM_EEE_REQUESTED_BIT) ^ + (params->eee_mode & EEE_MODE_ADV_LPI))) { + DP(NETIF_MSG_LINK, "EEE mismatch %x vs. %x\n", params->eee_mode, + eee_status); + return LFA_EEE_MISMATCH; + } + + /* LFA conditions are met */ + return 0; +} /******************************************************************/ /* EPIO/GPIO section */ /******************************************************************/ @@ -1307,93 +1428,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos) } /******************************************************************/ -/* EEE section */ -/******************************************************************/ -static u8 bnx2x_eee_has_cap(struct link_params *params) -{ - struct bnx2x *bp = params->bp; - - if (REG_RD(bp, params->shmem2_base) <= - offsetof(struct shmem2_region, eee_status[params->port])) - return 0; - - return 1; -} - -static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer) -{ - switch (nvram_mode) { - case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: - *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME; - break; - case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: - *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME; - break; - case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: - *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME; - break; - default: - *idle_timer = 0; - break; - } - - return 0; -} - -static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode) -{ - switch (idle_timer) { - case EEE_MODE_NVRAM_BALANCED_TIME: - *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; - break; - case EEE_MODE_NVRAM_AGGRESSIVE_TIME: - *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; - break; - case EEE_MODE_NVRAM_LATENCY_TIME: - *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; - break; - default: - *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; - break; - } - - return 0; -} - -static u32 bnx2x_eee_calc_timer(struct link_params *params) -{ - u32 eee_mode, eee_idle; - struct bnx2x *bp = params->bp; - - if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) { - if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { - /* time value in eee_mode --> used directly*/ - eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK; - } else { - /* hsi value in eee_mode --> time */ - if (bnx2x_eee_nvram_to_time(params->eee_mode & - EEE_MODE_NVRAM_MASK, - &eee_idle)) - return 0; - } - } else { - /* hsi values in nvram --> time*/ - eee_mode = ((REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_feature_config[params->port]. - eee_power_mode)) & - PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> - PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); - - if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle)) - return 0; - } - - return eee_idle; -} - - -/******************************************************************/ /* PFC section */ /******************************************************************/ static void bnx2x_update_pfc_xmac(struct link_params *params, @@ -1606,16 +1640,23 @@ static void bnx2x_set_xumac_nig(struct link_params *params, NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en); } -static void bnx2x_umac_disable(struct link_params *params) +static void bnx2x_set_umac_rxtx(struct link_params *params, u8 en) { u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0; + u32 val; struct bnx2x *bp = params->bp; if (!(REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port))) return; - + val = REG_RD(bp, umac_base + UMAC_REG_COMMAND_CONFIG); + if (en) + val |= (UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); + else + val &= ~(UMAC_COMMAND_CONFIG_REG_TX_ENA | + UMAC_COMMAND_CONFIG_REG_RX_ENA); /* Disable RX and TX */ - REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, 0); + REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); } static void bnx2x_umac_enable(struct link_params *params, @@ -1671,6 +1712,16 @@ static void bnx2x_umac_enable(struct link_params *params, REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); udelay(50); + /* Configure UMAC for EEE */ + if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) { + DP(NETIF_MSG_LINK, "configured UMAC for EEE\n"); + REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, + UMAC_UMAC_EEE_CTRL_REG_EEE_EN); + REG_WR(bp, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11); + } else { + REG_WR(bp, umac_base + UMAC_REG_UMAC_EEE_CTRL, 0x0); + } + /* Set MAC address for source TX Pause/PFC frames (under SW reset) */ REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0, ((params->mac_addr[2] << 24) | @@ -1766,11 +1817,12 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) } -static void bnx2x_xmac_disable(struct link_params *params) +static void bnx2x_set_xmac_rxtx(struct link_params *params, u8 en) { u8 port = params->port; struct bnx2x *bp = params->bp; u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0; + u32 val; if (REG_RD(bp, MISC_REG_RESET_REG_2) & MISC_REGISTERS_RESET_REG_2_XMAC) { @@ -1784,7 +1836,12 @@ static void bnx2x_xmac_disable(struct link_params *params) REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, (pfc_ctrl | (1<<1))); DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port); - REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0); + val = REG_RD(bp, xmac_base + XMAC_REG_CTRL); + if (en) + val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + else + val &= ~(XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN); + REG_WR(bp, xmac_base + XMAC_REG_CTRL, val); } } @@ -2529,16 +2586,6 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status) port_mb[params->port].link_status), link_status); } -static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status) -{ - struct bnx2x *bp = params->bp; - - if (bnx2x_eee_has_cap(params)) - REG_WR(bp, params->shmem2_base + - offsetof(struct shmem2_region, - eee_status[params->port]), eee_status); -} - static void bnx2x_update_pfc_nig(struct link_params *params, struct link_vars *vars, struct bnx2x_nig_brb_pfc_port_params *nig_params) @@ -2667,9 +2714,11 @@ int bnx2x_update_pfc(struct link_params *params, return bnx2x_status; DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); - if (CHIP_IS_E3(bp)) - bnx2x_update_pfc_xmac(params, vars, 0); - else { + + if (CHIP_IS_E3(bp)) { + if (vars->mac_type == MAC_TYPE_XMAC) + bnx2x_update_pfc_xmac(params, vars, 0); + } else { val = REG_RD(bp, MISC_REG_RESET_REG_2); if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) @@ -2825,16 +2874,18 @@ static int bnx2x_bmac2_enable(struct link_params *params, static int bnx2x_bmac_enable(struct link_params *params, struct link_vars *vars, - u8 is_lb) + u8 is_lb, u8 reset_bmac) { int rc = 0; u8 port = params->port; struct bnx2x *bp = params->bp; u32 val; /* Reset and unreset the BigMac */ - REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - usleep_range(1000, 2000); + if (reset_bmac) { + REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); + usleep_range(1000, 2000); + } REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); @@ -2866,37 +2917,28 @@ static int bnx2x_bmac_enable(struct link_params *params, return rc; } -static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) +static void bnx2x_set_bmac_rx(struct bnx2x *bp, u32 chip_id, u8 port, u8 en) { u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM; u32 wb_data[2]; u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4); + if (CHIP_IS_E2(bp)) + bmac_addr += BIGMAC2_REGISTER_BMAC_CONTROL; + else + bmac_addr += BIGMAC_REGISTER_BMAC_CONTROL; /* Only if the bmac is out of reset */ if (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) && nig_bmac_enable) { - - if (CHIP_IS_E2(bp)) { - /* Clear Rx Enable bit in BMAC_CONTROL register */ - REG_RD_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); - wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC2_REGISTER_BMAC_CONTROL, - wb_data, 2); - } else { - /* Clear Rx Enable bit in BMAC_CONTROL register */ - REG_RD_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); + /* Clear Rx Enable bit in BMAC_CONTROL register */ + REG_RD_DMAE(bp, bmac_addr, wb_data, 2); + if (en) + wb_data[0] |= BMAC_CONTROL_RX_ENABLE; + else wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE; - REG_WR_DMAE(bp, bmac_addr + - BIGMAC_REGISTER_BMAC_CONTROL, - wb_data, 2); - } + REG_WR_DMAE(bp, bmac_addr, wb_data, 2); usleep_range(1000, 2000); } } @@ -3231,6 +3273,245 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, EMAC_MDIO_STATUS_10MB); return rc; } + +/******************************************************************/ +/* EEE section */ +/******************************************************************/ +static u8 bnx2x_eee_has_cap(struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + if (REG_RD(bp, params->shmem2_base) <= + offsetof(struct shmem2_region, eee_status[params->port])) + return 0; + + return 1; +} + +static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer) +{ + switch (nvram_mode) { + case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED: + *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE: + *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME; + break; + case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY: + *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME; + break; + default: + *idle_timer = 0; + break; + } + + return 0; +} + +static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode) +{ + switch (idle_timer) { + case EEE_MODE_NVRAM_BALANCED_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED; + break; + case EEE_MODE_NVRAM_AGGRESSIVE_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE; + break; + case EEE_MODE_NVRAM_LATENCY_TIME: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY; + break; + default: + *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED; + break; + } + + return 0; +} + +static u32 bnx2x_eee_calc_timer(struct link_params *params) +{ + u32 eee_mode, eee_idle; + struct bnx2x *bp = params->bp; + + if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) { + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* time value in eee_mode --> used directly*/ + eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK; + } else { + /* hsi value in eee_mode --> time */ + if (bnx2x_eee_nvram_to_time(params->eee_mode & + EEE_MODE_NVRAM_MASK, + &eee_idle)) + return 0; + } + } else { + /* hsi values in nvram --> time*/ + eee_mode = ((REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_feature_config[params->port]. + eee_power_mode)) & + PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >> + PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT); + + if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle)) + return 0; + } + + return eee_idle; +} + +static int bnx2x_eee_set_timers(struct link_params *params, + struct link_vars *vars) +{ + u32 eee_idle = 0, eee_mode; + struct bnx2x *bp = params->bp; + + eee_idle = bnx2x_eee_calc_timer(params); + + if (eee_idle) { + REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), + eee_idle); + } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) && + (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) && + (params->eee_mode & EEE_MODE_OUTPUT_TIME)) { + DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n"); + return -EINVAL; + } + + vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); + if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { + /* eee_idle in 1u --> eee_status in 16u */ + eee_idle >>= 4; + vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | + SHMEM_EEE_TIME_OUTPUT_BIT; + } else { + if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode)) + return -EINVAL; + vars->eee_status |= eee_mode; + } + + return 0; +} + +static int bnx2x_eee_initial_config(struct link_params *params, + struct link_vars *vars, u8 mode) +{ + vars->eee_status |= ((u32) mode) << SHMEM_EEE_SUPPORTED_SHIFT; + + /* Propogate params' bits --> vars (for migration exposure) */ + if (params->eee_mode & EEE_MODE_ENABLE_LPI) + vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; + + if (params->eee_mode & EEE_MODE_ADV_LPI) + vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; + else + vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; + + return bnx2x_eee_set_timers(params, vars); +} + +static int bnx2x_eee_disable(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + + /* Make Certain LPI is disabled */ + REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x0); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + + return 0; +} + +static int bnx2x_eee_advertise(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, u8 modes) +{ + struct bnx2x *bp = params->bp; + u16 val = 0; + + /* Mask events preventing LPI generation */ + REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); + + if (modes & SHMEM_EEE_10G_ADV) { + DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n"); + val |= 0x8; + } + if (modes & SHMEM_EEE_1G_ADV) { + DP(NETIF_MSG_LINK, "Advertise 1GBase-T EEE\n"); + val |= 0x4; + } + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, val); + + vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; + vars->eee_status |= (modes << SHMEM_EEE_ADV_STATUS_SHIFT); + + return 0; +} + +static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status) +{ + struct bnx2x *bp = params->bp; + + if (bnx2x_eee_has_cap(params)) + REG_WR(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port]), eee_status); +} + +static void bnx2x_eee_an_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + u16 adv = 0, lp = 0; + u32 lp_adv = 0; + u8 neg = 0; + + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, &adv); + bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LP_EEE_ADV, &lp); + + if (lp & 0x2) { + lp_adv |= SHMEM_EEE_100M_ADV; + if (adv & 0x2) { + if (vars->line_speed == SPEED_100) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 100M\n"); + } + } + if (lp & 0x14) { + lp_adv |= SHMEM_EEE_1G_ADV; + if (adv & 0x14) { + if (vars->line_speed == SPEED_1000) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 1G\n"); + } + } + if (lp & 0x68) { + lp_adv |= SHMEM_EEE_10G_ADV; + if (adv & 0x68) { + if (vars->line_speed == SPEED_10000) + neg = 1; + DP(NETIF_MSG_LINK, "EEE negotiated - 10G\n"); + } + } + + vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; + vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT); + + if (neg) { + DP(NETIF_MSG_LINK, "EEE is active\n"); + vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; + } + +} + /******************************************************************/ /* BSC access functions from E3 */ /******************************************************************/ @@ -3752,6 +4033,19 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, * init configuration, and set/clear SGMII flag. Internal * phy init is done purely in phy_init stage. */ + +static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy, + struct link_params *params) +{ + struct bnx2x *bp = params->bp; + + DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c); + bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); +} + static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { @@ -4011,13 +4305,7 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); - /* Enable LPI pass through */ - DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); - bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_EEE_COMBO_CONTROL0, - 0x7c); - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC5, 0xc000); + bnx2x_warpcore_set_lpi_passthrough(phy, params); /* 10G XFI Full Duplex */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, @@ -4114,6 +4402,8 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13)); + bnx2x_warpcore_set_lpi_passthrough(phy, params); + if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) { /* SGMII Autoneg */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -4407,7 +4697,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, "serdes_net_if = 0x%x\n", vars->line_speed, serdes_net_if); bnx2x_set_aer_mmd(params, phy); - + bnx2x_warpcore_reset_lane(bp, phy, 1); vars->phy_flags |= PHY_XGXS_FLAG; if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) || (phy->req_line_speed && @@ -4716,6 +5006,10 @@ void bnx2x_link_status_update(struct link_params *params, vars->link_status = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, port_mb[port].link_status)); + if (bnx2x_eee_has_cap(params)) + vars->eee_status = REG_RD(bp, params->shmem2_base + + offsetof(struct shmem2_region, + eee_status[params->port])); vars->phy_flags = PHY_XGXS_FLAG; bnx2x_sync_link(params, vars); @@ -5432,7 +5726,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, switch (speed_mask) { case GP_STATUS_10M: vars->line_speed = SPEED_10; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_10TFD; else vars->link_status |= LINK_10THD; @@ -5440,7 +5734,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_100M: vars->line_speed = SPEED_100; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_100TXFD; else vars->link_status |= LINK_100TXHD; @@ -5449,7 +5743,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_1G: case GP_STATUS_1G_KX: vars->line_speed = SPEED_1000; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_1000TFD; else vars->link_status |= LINK_1000THD; @@ -5457,7 +5751,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_2_5G: vars->line_speed = SPEED_2500; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_2500TFD; else vars->link_status |= LINK_2500THD; @@ -5531,6 +5825,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { if (SINGLE_MEDIA_DIRECT(params)) { + vars->duplex = duplex; bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); if (phy->req_line_speed == SPEED_AUTO_NEG) bnx2x_xgxs_an_resolve(phy, params, vars, @@ -5625,6 +5920,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, LINK_STATUS_PARALLEL_DETECTION_USED; } bnx2x_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = duplex; } } @@ -6526,25 +6822,21 @@ static int bnx2x_update_link_down(struct link_params *params, usleep_range(10000, 20000); /* Reset BigMac/Xmac */ if (CHIP_IS_E1x(bp) || - CHIP_IS_E2(bp)) { - bnx2x_bmac_rx_disable(bp, params->port); - REG_WR(bp, GRCBASE_MISC + - MISC_REGISTERS_RESET_REG_2_CLEAR, - (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - } + CHIP_IS_E2(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); + if (CHIP_IS_E3(bp)) { /* Prevent LPI Generation by chip */ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); - REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0); REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2), 0); vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | SHMEM_EEE_ACTIVE_BIT); bnx2x_update_mng_eee(params, vars->eee_status); - bnx2x_xmac_disable(params); - bnx2x_umac_disable(params); + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); } return 0; @@ -6596,7 +6888,7 @@ static int bnx2x_update_link_up(struct link_params *params, if ((CHIP_IS_E1x(bp) || CHIP_IS_E2(bp))) { if (link_10g) { - if (bnx2x_bmac_enable(params, vars, 0) == + if (bnx2x_bmac_enable(params, vars, 0, 1) == -ESRCH) { DP(NETIF_MSG_LINK, "Found errors on BMAC\n"); vars->link_up = 0; @@ -7203,6 +7495,22 @@ static void bnx2x_8073_set_pause_cl37(struct link_params *params, msleep(500); } +static void bnx2x_8073_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + /* Enable LASI */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); + break; + } +} + static int bnx2x_8073_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -7223,12 +7531,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); - /* Enable LASI */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0004); - + bnx2x_8073_specific_func(phy, params, PHY_INIT); bnx2x_8073_set_pause_cl37(params, phy, vars); bnx2x_cl45_read(bp, phy, @@ -8263,7 +8566,7 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, u32 action) { struct bnx2x *bp = params->bp; - + u16 val; switch (action) { case DISABLE_TX: bnx2x_sfp_set_transmitter(params, phy, 0); @@ -8272,6 +8575,40 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy, if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) bnx2x_sfp_set_transmitter(params, phy, 1); break; + case PHY_INIT: + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, + (1<<2) | (1<<5)); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, + 0); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x0006); + /* Make MOD_ABS give interrupt on change */ + bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_OPT_CTRL, + &val); + val |= (1<<12); + if (phy->flags & FLAGS_NOC) + val |= (3<<5); + /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 + * status which reflect SFP+ module over-current + */ + if (!(phy->flags & FLAGS_NOC)) + val &= 0xff8f; /* Reset bits 4-6 */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, + val); + + /* Set 2-wire transfer rate of SFP+ module EEPROM + * to 100Khz since some DACs(direct attached cables) do + * not work at 400Khz. + */ + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, + 0xa001); + break; default: DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", action); @@ -9054,28 +9391,15 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, struct link_vars *vars) { u32 tx_en_mode; - u16 tmp1, val, mod_abs, tmp2; - u16 rx_alarm_ctrl_val; - u16 lasi_ctrl_val; + u16 tmp1, mod_abs, tmp2; struct bnx2x *bp = params->bp; /* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */ bnx2x_wait_reset_complete(bp, phy, params); - rx_alarm_ctrl_val = (1<<2) | (1<<5) ; - /* Should be 0x6 to enable XS on Tx side. */ - lasi_ctrl_val = 0x0006; DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); - /* Enable LASI */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, - rx_alarm_ctrl_val); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL, - 0); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); + bnx2x_8727_specific_func(phy, params, PHY_INIT); /* Initially configure MOD_ABS to interrupt when module is * presence( bit 8) */ @@ -9091,25 +9415,9 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); - /* Enable/Disable PHY transmitter output */ bnx2x_set_disable_pmd_transmit(params, phy, 0); - /* Make MOD_ABS give interrupt on change */ - bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, - &val); - val |= (1<<12); - if (phy->flags & FLAGS_NOC) - val |= (3<<5); - - /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0 - * status which reflect SFP+ module over-current - */ - if (!(phy->flags & FLAGS_NOC)) - val &= 0xff8f; /* Reset bits 4-6 */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val); - bnx2x_8727_power_module(bp, phy, 1); bnx2x_cl45_read(bp, phy, @@ -9119,13 +9427,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); bnx2x_8727_config_speed(phy, params); - /* Set 2-wire transfer rate of SFP+ module EEPROM - * to 100Khz since some DACs(direct attached cables) do - * not work at 400Khz. - */ - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR, - 0xa001); + /* Set TX PreEmphasis if needed */ if ((params->feature_config_flags & @@ -9554,6 +9856,29 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp, 0xFFFB, 0xFFFD); } +static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + switch (action) { + case PHY_INIT: + if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { + /* Save spirom version */ + bnx2x_save_848xx_spirom_version(phy, bp, params->port); + } + /* This phy uses the NIG latch mechanism since link indication + * arrives through its LED4 and not via its LASI signal, so we + * get steady signal instead of clear on read + */ + bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + + bnx2x_848xx_set_led(bp, phy); + break; + } +} + static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -9561,22 +9886,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val; - if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { - /* Save spirom version */ - bnx2x_save_848xx_spirom_version(phy, bp, params->port); - } - /* This phy uses the NIG latch mechanism since link indication - * arrives through its LED4 and not via its LASI signal, so we - * get steady signal instead of clear on read - */ - bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, - 1 << NIG_LATCH_BC_ENABLE_MI_INT); - + bnx2x_848xx_specific_func(phy, params, PHY_INIT); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000); - bnx2x_848xx_set_led(bp, phy); - /* set 1000 speed advertisement */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL, @@ -9883,39 +10196,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy, return 0; } -static int bnx2x_8483x_eee_timers(struct link_params *params, - struct link_vars *vars) -{ - u32 eee_idle = 0, eee_mode; - struct bnx2x *bp = params->bp; - - eee_idle = bnx2x_eee_calc_timer(params); - - if (eee_idle) { - REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2), - eee_idle); - } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) && - (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) && - (params->eee_mode & EEE_MODE_OUTPUT_TIME)) { - DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n"); - return -EINVAL; - } - - vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT); - if (params->eee_mode & EEE_MODE_OUTPUT_TIME) { - /* eee_idle in 1u --> eee_status in 16u */ - eee_idle >>= 4; - vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) | - SHMEM_EEE_TIME_OUTPUT_BIT; - } else { - if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode)) - return -EINVAL; - vars->eee_status |= eee_mode; - } - - return 0; -} - static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -9926,10 +10206,6 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); - /* Make Certain LPI is disabled */ - REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); - REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0); - /* Prevent Phy from working in EEE and advertising it */ rc = bnx2x_84833_cmd_hdlr(phy, params, PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); @@ -9938,10 +10214,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, return rc; } - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0); - vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; - - return 0; + return bnx2x_eee_disable(phy, params, vars); } static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, @@ -9952,8 +10225,6 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 cmd_args = 1; - DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n"); - rc = bnx2x_84833_cmd_hdlr(phy, params, PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); if (rc) { @@ -9961,15 +10232,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, return rc; } - bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8); - - /* Mask events preventing LPI generation */ - REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20); - - vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK; - vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT); - - return 0; + return bnx2x_eee_advertise(phy, params, vars, SHMEM_EEE_10G_ADV); } #define PHY84833_CONSTANT_LATENCY 1193 @@ -10101,22 +10364,10 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, MDIO_84833_TOP_CFG_FW_REV, &val); /* Configure EEE support */ - if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) { - phy->flags |= FLAGS_EEE_10GBT; - vars->eee_status |= SHMEM_EEE_10G_ADV << - SHMEM_EEE_SUPPORTED_SHIFT; - /* Propogate params' bits --> vars (for migration exposure) */ - if (params->eee_mode & EEE_MODE_ENABLE_LPI) - vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT; - else - vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT; - - if (params->eee_mode & EEE_MODE_ADV_LPI) - vars->eee_status |= SHMEM_EEE_REQUESTED_BIT; - else - vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; - - rc = bnx2x_8483x_eee_timers(params, vars); + if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && + (val != MDIO_84833_TOP_CFG_FW_NO_EEE) && + bnx2x_eee_has_cap(params)) { + rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV); if (rc) { DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); bnx2x_8483x_disable_eee(phy, params, vars); @@ -10135,7 +10386,6 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, return rc; } } else { - phy->flags &= ~FLAGS_EEE_10GBT; vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; } @@ -10274,29 +10524,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; /* Determine if EEE was negotiated */ - if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { - u32 eee_shmem = 0; - - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, - MDIO_AN_REG_EEE_ADV, &val1); - bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, - MDIO_AN_REG_LP_EEE_ADV, &val2); - if ((val1 & val2) & 0x8) { - DP(NETIF_MSG_LINK, "EEE negotiated\n"); - vars->eee_status |= SHMEM_EEE_ACTIVE_BIT; - } - - if (val2 & 0x12) - eee_shmem |= SHMEM_EEE_100M_ADV; - if (val2 & 0x4) - eee_shmem |= SHMEM_EEE_1G_ADV; - if (val2 & 0x68) - eee_shmem |= SHMEM_EEE_10G_ADV; - - vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK; - vars->eee_status |= (eee_shmem << - SHMEM_EEE_LP_ADV_STATUS_SHIFT); - } + if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) + bnx2x_eee_an_resolve(phy, params, vars); } return link_up; @@ -10565,6 +10794,35 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy, /******************************************************************/ /* 54618SE PHY SECTION */ /******************************************************************/ +static void bnx2x_54618se_specific_func(struct bnx2x_phy *phy, + struct link_params *params, + u32 action) +{ + struct bnx2x *bp = params->bp; + u16 temp; + switch (action) { + case PHY_INIT: + /* Configure LED4: set to INTR (0x6). */ + /* Accessing shadow register 0xe. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_LED_SEL2); + bnx2x_cl22_read(bp, phy, + MDIO_REG_GPHY_SHADOW, + &temp); + temp &= ~(0xf << 4); + temp |= (0x6 << 4); + bnx2x_cl22_write(bp, phy, + MDIO_REG_GPHY_SHADOW, + MDIO_REG_GPHY_SHADOW_WR_ENA | temp); + /* Configure INTR based on link status change. */ + bnx2x_cl22_write(bp, phy, + MDIO_REG_INTR_MASK, + ~MDIO_REG_INTR_MASK_LINK_STATUS); + break; + } +} + static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -10602,24 +10860,8 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, /* Wait for GPHY to reset */ msleep(50); - /* Configure LED4: set to INTR (0x6). */ - /* Accessing shadow register 0xe. */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_LED_SEL2); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_SHADOW, - &temp); - temp &= ~(0xf << 4); - temp |= (0x6 << 4); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_SHADOW, - MDIO_REG_GPHY_SHADOW_WR_ENA | temp); - /* Configure INTR based on link status change. */ - bnx2x_cl22_write(bp, phy, - MDIO_REG_INTR_MASK, - ~MDIO_REG_INTR_MASK_LINK_STATUS); + bnx2x_54618se_specific_func(phy, params, PHY_INIT); /* Flip the signal detect polarity (set 0x1c.0x1e[8]). */ bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_SHADOW, @@ -10724,28 +10966,52 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Setting 10M force\n"); } - /* Check if we should turn on Auto-GrEEEn */ - bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp); - if (temp == MDIO_REG_GPHY_ID_54618SE) { - if (params->feature_config_flags & - FEATURE_CONFIG_AUTOGREEEN_ENABLED) { - temp = 6; - DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + if ((phy->flags & FLAGS_EEE) && bnx2x_eee_has_cap(params)) { + int rc; + + bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS, + MDIO_REG_GPHY_EXP_ACCESS_TOP | + MDIO_REG_GPHY_EXP_TOP_2K_BUF); + bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, &temp); + temp &= 0xfffe; + bnx2x_cl22_write(bp, phy, MDIO_REG_GPHY_EXP_ACCESS_GATE, temp); + + rc = bnx2x_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV); + if (rc) { + DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); + bnx2x_eee_disable(phy, params, vars); + } else if ((params->eee_mode & EEE_MODE_ADV_LPI) && + (phy->req_duplex == DUPLEX_FULL) && + (bnx2x_eee_calc_timer(params) || + !(params->eee_mode & EEE_MODE_ENABLE_LPI))) { + /* Need to advertise EEE only when requested, + * and either no LPI assertion was requested, + * or it was requested and a valid timer was set. + * Also notice full duplex is required for EEE. + */ + bnx2x_eee_advertise(phy, params, vars, + SHMEM_EEE_1G_ADV); } else { - temp = 0; - DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n"); + DP(NETIF_MSG_LINK, "Don't Advertise 1GBase-T EEE\n"); + bnx2x_eee_disable(phy, params, vars); + } + } else { + vars->eee_status &= ~SHMEM_EEE_1G_ADV << + SHMEM_EEE_SUPPORTED_SHIFT; + + if (phy->flags & FLAGS_EEE) { + /* Handle legacy auto-grEEEn */ + if (params->feature_config_flags & + FEATURE_CONFIG_AUTOGREEEN_ENABLED) { + temp = 6; + DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n"); + } else { + temp = 0; + DP(NETIF_MSG_LINK, "Don't Adv. EEE\n"); + } + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_AN_REG_EEE_ADV, temp); } - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - MDIO_REG_GPHY_EEE_ADV); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - (0x1 << 14) | MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - temp); } bnx2x_cl22_write(bp, phy, @@ -10892,29 +11158,6 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n", vars->line_speed); - /* Report whether EEE is resolved. */ - bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val); - if (val == MDIO_REG_GPHY_ID_54618SE) { - if (vars->link_status & - LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) - val = 0; - else { - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - MDIO_AN_DEVAD); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - MDIO_REG_GPHY_EEE_RESOLVED); - bnx2x_cl22_write(bp, phy, - MDIO_REG_GPHY_CL45_ADDR_REG, - (0x1 << 14) | MDIO_AN_DEVAD); - bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_CL45_DATA_REG, - &val); - } - DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val); - } - bnx2x_ext_phy_resolve_fc(phy, params, vars); if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { @@ -10944,6 +11187,10 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, if (val & (1<<11)) vars->link_status |= LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE; + + if ((phy->flags & FLAGS_EEE) && + bnx2x_eee_has_cap(params)) + bnx2x_eee_an_resolve(phy, params, vars); } } return link_up; @@ -11349,7 +11596,7 @@ static struct bnx2x_phy phy_8073 = { .format_fw_ver = (format_fw_ver_t)bnx2x_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)NULL, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_8073_specific_func }; static struct bnx2x_phy phy_8705 = { .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705, @@ -11542,7 +11789,7 @@ static struct bnx2x_phy phy_84823 = { .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; static struct bnx2x_phy phy_84833 = { @@ -11551,8 +11798,7 @@ static struct bnx2x_phy phy_84833 = { .def_md_devad = 0, .flags = (FLAGS_FAN_FAILURE_DET_REQ | FLAGS_REARM_LATCH_SIGNAL | - FLAGS_TX_ERROR_CHECK | - FLAGS_EEE_10GBT), + FLAGS_TX_ERROR_CHECK), .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, .mdio_ctrl = 0, @@ -11578,7 +11824,7 @@ static struct bnx2x_phy phy_84833 = { .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func }; static struct bnx2x_phy phy_54618se = { @@ -11612,7 +11858,7 @@ static struct bnx2x_phy phy_54618se = { .format_fw_ver = (format_fw_ver_t)NULL, .hw_reset = (hw_reset_t)NULL, .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led, - .phy_specific_func = (phy_specific_func_t)NULL + .phy_specific_func = (phy_specific_func_t)bnx2x_54618se_specific_func }; /*****************************************************************/ /* */ @@ -11858,6 +12104,8 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp, case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54616: case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE: *phy = phy_54618se; + if (phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) + phy->flags |= FLAGS_EEE; break; case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101: *phy = phy_7101; @@ -12137,7 +12385,7 @@ void bnx2x_init_bmac_loopback(struct link_params *params, bnx2x_xgxs_deassert(params); /* set bmac loopback */ - bnx2x_bmac_enable(params, vars, 1); + bnx2x_bmac_enable(params, vars, 1, 1); REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); } @@ -12229,7 +12477,7 @@ void bnx2x_init_xgxs_loopback(struct link_params *params, if (USES_WARPCORE(bp)) bnx2x_xmac_enable(params, vars, 0); else - bnx2x_bmac_enable(params, vars, 0); + bnx2x_bmac_enable(params, vars, 0, 1); } if (params->loopback_mode == LOOPBACK_XGXS) { @@ -12254,8 +12502,161 @@ void bnx2x_init_xgxs_loopback(struct link_params *params, bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed); } +static void bnx2x_set_rx_filter(struct link_params *params, u8 en) +{ + struct bnx2x *bp = params->bp; + u8 val = en * 0x1F; + + /* Open the gate between the NIG to the BRB */ + if (!CHIP_IS_E1x(bp)) + val |= en * 0x20; + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + params->port*4, val); + + if (!CHIP_IS_E1(bp)) { + REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + params->port*4, + en*0x3); + } + + REG_WR(bp, (params->port ? NIG_REG_LLH1_BRB1_NOT_MCP : + NIG_REG_LLH0_BRB1_NOT_MCP), en); +} +static int bnx2x_avoid_link_flap(struct link_params *params, + struct link_vars *vars) +{ + u32 phy_idx; + u32 dont_clear_stat, lfa_sts; + struct bnx2x *bp = params->bp; + + /* Sync the link parameters */ + bnx2x_link_status_update(params, vars); + + /* + * The module verification was already done by previous link owner, + * so this call is meant only to get warning message + */ + + for (phy_idx = INT_PHY; phy_idx < params->num_phys; phy_idx++) { + struct bnx2x_phy *phy = ¶ms->phy[phy_idx]; + if (phy->phy_specific_func) { + DP(NETIF_MSG_LINK, "Calling PHY specific func\n"); + phy->phy_specific_func(phy, params, PHY_INIT); + } + if ((phy->media_type == ETH_PHY_SFPP_10G_FIBER) || + (phy->media_type == ETH_PHY_SFP_1G_FIBER) || + (phy->media_type == ETH_PHY_DA_TWINAX)) + bnx2x_verify_sfp_module(phy, params); + } + lfa_sts = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, + lfa_sts)); + + dont_clear_stat = lfa_sts & SHMEM_LFA_DONT_CLEAR_STAT; + + /* Re-enable the NIG/MAC */ + if (CHIP_IS_E3(bp)) { + if (!dont_clear_stat) { + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_CLEAR, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + REG_WR(bp, GRCBASE_MISC + + MISC_REGISTERS_RESET_REG_2_SET, + (MISC_REGISTERS_RESET_REG_2_MSTAT0 << + params->port)); + } + if (vars->line_speed < SPEED_10000) + bnx2x_umac_enable(params, vars, 0); + else + bnx2x_xmac_enable(params, vars, 0); + } else { + if (vars->line_speed < SPEED_10000) + bnx2x_emac_enable(params, vars, 0); + else + bnx2x_bmac_enable(params, vars, 0, !dont_clear_stat); + } + + /* Increment LFA count */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_AVOIDANCE_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_AVOIDANCE_COUNT_MASK) >> + LINK_FLAP_AVOIDANCE_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_AVOIDANCE_COUNT_OFFSET)); + /* Clear link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + + /* Disable NIG DRAIN */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + + /* Enable interrupts */ + bnx2x_link_int_enable(params); + return 0; +} + +static void bnx2x_cannot_avoid_link_flap(struct link_params *params, + struct link_vars *vars, + int lfa_status) +{ + u32 lfa_sts, cfg_idx, tmp_val; + struct bnx2x *bp = params->bp; + + bnx2x_link_reset(params, vars, 1); + + if (!params->lfa_base) + return; + /* Store the new link parameters */ + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_duplex), + params->req_duplex[0] | (params->req_duplex[1] << 16)); + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_flow_ctrl), + params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16)); + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, req_line_speed), + params->req_line_speed[0] | (params->req_line_speed[1] << 16)); + + for (cfg_idx = 0; cfg_idx < SHMEM_LINK_CONFIG_SIZE; cfg_idx++) { + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, + speed_cap_mask[cfg_idx]), + params->speed_cap_mask[cfg_idx]); + } + + tmp_val = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config)); + tmp_val &= ~REQ_FC_AUTO_ADV_MASK; + tmp_val |= params->req_fc_auto_adv; + + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, additional_config), tmp_val); + + lfa_sts = REG_RD(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts)); + + /* Clear the "Don't Clear Statistics" bit, and set reason */ + lfa_sts &= ~SHMEM_LFA_DONT_CLEAR_STAT; + + /* Set link flap reason */ + lfa_sts &= ~LFA_LINK_FLAP_REASON_MASK; + lfa_sts |= ((lfa_status & LFA_LINK_FLAP_REASON_MASK) << + LFA_LINK_FLAP_REASON_OFFSET); + + /* Increment link flap counter */ + lfa_sts = ((lfa_sts & ~LINK_FLAP_COUNT_MASK) | + (((((lfa_sts & LINK_FLAP_COUNT_MASK) >> + LINK_FLAP_COUNT_OFFSET) + 1) & 0xff) + << LINK_FLAP_COUNT_OFFSET)); + REG_WR(bp, params->lfa_base + + offsetof(struct shmem_lfa, lfa_sts), lfa_sts); + /* Proceed with regular link initialization */ +} + int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) { + int lfa_status; struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Phy Initialization started\n"); DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n", @@ -12270,6 +12671,19 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0; + /* Driver opens NIG-BRB filters */ + bnx2x_set_rx_filter(params, 1); + /* Check if link flap can be avoided */ + lfa_status = bnx2x_check_lfa(params); + + if (lfa_status == 0) { + DP(NETIF_MSG_LINK, "Link Flap Avoidance in progress\n"); + return bnx2x_avoid_link_flap(params, vars); + } + + DP(NETIF_MSG_LINK, "Cannot avoid link flap lfa_sta=0x%x\n", + lfa_status); + bnx2x_cannot_avoid_link_flap(params, vars, lfa_status); /* Disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, @@ -12352,13 +12766,12 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); } - /* Stop BigMac rx */ - if (!CHIP_IS_E3(bp)) - bnx2x_bmac_rx_disable(bp, port); - else { - bnx2x_xmac_disable(params); - bnx2x_umac_disable(params); - } + if (!CHIP_IS_E3(bp)) { + bnx2x_set_bmac_rx(bp, params->chip_id, port, 0); + } else { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } /* Disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); @@ -12416,6 +12829,56 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, vars->phy_flags = 0; return 0; } +int bnx2x_lfa_reset(struct link_params *params, + struct link_vars *vars) +{ + struct bnx2x *bp = params->bp; + vars->link_up = 0; + vars->phy_flags = 0; + if (!params->lfa_base) + return bnx2x_link_reset(params, vars, 1); + /* + * Activate NIG drain so that during this time the device won't send + * anything while it is unable to response. + */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); + + /* + * Close gracefully the gate from BMAC to NIG such that no half packets + * are passed. + */ + if (!CHIP_IS_E3(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 0); + + if (CHIP_IS_E3(bp)) { + bnx2x_set_xmac_rxtx(params, 0); + bnx2x_set_umac_rxtx(params, 0); + } + /* Wait 10ms for the pipe to clean up*/ + usleep_range(10000, 20000); + + /* Clean the NIG-BRB using the network filters in a way that will + * not cut a packet in the middle. + */ + bnx2x_set_rx_filter(params, 0); + + /* + * Re-open the gate between the BMAC and the NIG, after verifying the + * gate to the BRB is closed, otherwise packets may arrive to the + * firmware before driver had initialized it. The target is to achieve + * minimum management protocol down time. + */ + if (!CHIP_IS_E3(bp)) + bnx2x_set_bmac_rx(bp, params->chip_id, params->port, 1); + + if (CHIP_IS_E3(bp)) { + bnx2x_set_xmac_rxtx(params, 1); + bnx2x_set_umac_rxtx(params, 1); + } + /* Disable NIG drain */ + REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); + return 0; +} /****************************************************************************/ /* Common function */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 51cac8130051..9165b89a4b19 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -155,7 +155,7 @@ struct bnx2x_phy { #define FLAGS_DUMMY_READ (1<<9) #define FLAGS_MDC_MDIO_WA_B0 (1<<10) #define FLAGS_TX_ERROR_CHECK (1<<12) -#define FLAGS_EEE_10GBT (1<<13) +#define FLAGS_EEE (1<<13) /* preemphasis values for the rx side */ u16 rx_preemphasis[4]; @@ -216,6 +216,7 @@ struct bnx2x_phy { phy_specific_func_t phy_specific_func; #define DISABLE_TX 1 #define ENABLE_TX 2 +#define PHY_INIT 3 }; /* Inputs parameters to the CLC */ @@ -304,6 +305,8 @@ struct link_params { struct bnx2x *bp; u16 req_fc_auto_adv; /* Should be set to TX / BOTH when req_flow_ctrl is set to AUTO */ + u16 rsrv1; + u32 lfa_base; }; /* Output parameters */ @@ -356,7 +359,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars); to 0 */ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, u8 reset_ext_phy); - +int bnx2x_lfa_reset(struct link_params *params, struct link_vars *vars); /* bnx2x_link_update should be called upon link interrupt */ int bnx2x_link_update(struct link_params *params, struct link_vars *vars); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index dd451c3dd83d..7a9157052c7c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2171,7 +2171,6 @@ void bnx2x_link_set(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); @@ -2184,12 +2183,19 @@ static void bnx2x__link_reset(struct bnx2x *bp) { if (!BP_NOMCP(bp)) { bnx2x_acquire_phy_lock(bp); - bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_lfa_reset(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); } else BNX2X_ERR("Bootcode is missing - can not reset link\n"); } +void bnx2x_force_link_reset(struct bnx2x *bp) +{ + bnx2x_acquire_phy_lock(bp); + bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1); + bnx2x_release_phy_lock(bp); +} + u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes) { u8 rc = 0; @@ -4041,20 +4047,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) return val != 0; } -/* - * Reset the load status for the current engine. - */ -static void bnx2x_clear_load_status(struct bnx2x *bp) -{ - u32 val; - u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); - val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); -} - static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); @@ -6771,7 +6763,6 @@ static int bnx2x_init_hw_port(struct bnx2x *bp) u32 low, high; u32 val; - bnx2x__link_reset(bp); DP(NETIF_MSG_HW, "starting port init port %d\n", port); @@ -7575,8 +7566,14 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, } rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc < 0) + + if (rc == -EEXIST) { + DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); + /* do not treat adding same MAC as error */ + rc = 0; + } else if (rc < 0) BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); + return rc; } @@ -8258,12 +8255,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP. * * @bp: driver handle + * @keep_link: true iff link should be kept up */ -void bnx2x_send_unload_done(struct bnx2x *bp) +void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link) { + u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0; + /* Report UNLOAD_DONE to MCP */ if (!BP_NOMCP(bp)) - bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param); } static int bnx2x_func_wait_started(struct bnx2x *bp) @@ -8332,7 +8332,7 @@ static int bnx2x_func_wait_started(struct bnx2x *bp) return 0; } -void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) +void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) { int port = BP_PORT(bp); int i, rc = 0; @@ -8441,6 +8441,8 @@ unload_error: /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); /* Release IRQs */ bnx2x_free_irq(bp); @@ -8452,7 +8454,7 @@ unload_error: /* Report UNLOAD_DONE to MCP */ - bnx2x_send_unload_done(bp); + bnx2x_send_unload_done(bp, keep_link); } void bnx2x_disable_close_the_gate(struct bnx2x *bp) @@ -8864,7 +8866,8 @@ int bnx2x_leader_reset(struct bnx2x *bp) * driver is owner of the HW */ if (!global && !BP_NOMCP(bp)) { - load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0); + load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, + DRV_MSG_CODE_LOAD_REQ_WITH_LFA); if (!load_code) { BNX2X_ERR("MCP response failure, aborting\n"); rc = -EAGAIN; @@ -8970,7 +8973,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp) /* Stop the driver */ /* If interface has been removed - break */ - if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY)) + if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false)) return; bp->recovery_state = BNX2X_RECOVERY_WAIT; @@ -9136,7 +9139,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) bp->sp_rtnl_state = 0; smp_mb(); - bnx2x_nic_unload(bp, UNLOAD_NORMAL); + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); bnx2x_nic_load(bp, LOAD_NORMAL); goto sp_rtnl_exit; @@ -9322,7 +9325,8 @@ static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp) { - u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); + u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, + DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET); if (!rc) { BNX2X_ERR("MCP response failure, aborting\n"); return -EBUSY; @@ -9384,32 +9388,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) return rc; } -static bool __devinit bnx2x_can_flr(struct bnx2x *bp) -{ - int pos; - u32 cap; - struct pci_dev *dev = bp->pdev; - - pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return false; - - return true; -} - static int __devinit bnx2x_do_flr(struct bnx2x *bp) { int i, pos; u16 status; struct pci_dev *dev = bp->pdev; - /* probe the capability first */ - if (bnx2x_can_flr(bp)) - return -ENOTTY; + + if (CHIP_IS_E1x(bp)) { + BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); + return -EINVAL; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", + bp->common.bc_ver); + return -EINVAL; + } pos = pci_pcie_cap(dev); if (!pos) @@ -9429,12 +9425,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp) "transaction is not cleared; proceeding with reset anyway\n"); clear: - if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { - BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", - bp->common.bc_ver); - return -EINVAL; - } + BNX2X_DEV_INFO("Initiating FLR\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); return 0; @@ -9454,8 +9446,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ - if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) - return bnx2x_do_flr(bp); + rc = bnx2x_test_firmware_version(bp, false); + + if (!rc) { + /* fw version is good */ + BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); + rc = bnx2x_do_flr(bp); + } + + if (!rc) { + /* FLR was performed */ + BNX2X_DEV_INFO("FLR successful\n"); + return 0; + } + + BNX2X_DEV_INFO("Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bnx2x_prev_mcp_done(bp); @@ -10305,13 +10310,11 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) dev_info.port_hw_config[port]. fcoe_wwn_node_name_lower); } else if (!IS_MF_SD(bp)) { - u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); - /* * Read the WWN info only if the FCoE feature is enabled for * this function. */ - if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) bnx2x_get_ext_wwn_info(bp, func); } else if (IS_MF_FCOE_SD(bp)) @@ -11016,7 +11019,7 @@ static int bnx2x_close(struct net_device *dev) struct bnx2x *bp = netdev_priv(dev); /* Unload the driver, release IRQs */ - bnx2x_nic_unload(bp, UNLOAD_CLOSE); + bnx2x_nic_unload(bp, UNLOAD_CLOSE, false); /* Power off */ bnx2x_set_power_state(bp, PCI_D3hot); @@ -11084,7 +11087,14 @@ static int bnx2x_set_uc_list(struct bnx2x *bp) netdev_for_each_uc_addr(ha, dev) { rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, BNX2X_UC_LIST_MAC, &ramrod_flags); - if (rc < 0) { + if (rc == -EEXIST) { + DP(BNX2X_MSG_SP, + "Failed to schedule ADD operations: %d\n", rc); + /* do not treat adding same MAC as error */ + rc = 0; + + } else if (rc < 0) { + BNX2X_ERR("Failed to schedule ADD operations: %d\n", rc); return rc; @@ -11242,10 +11252,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void poll_bnx2x(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + int i; - disable_irq(bp->pdev->irq); - bnx2x_interrupt(bp->pdev->irq, dev); - enable_irq(bp->pdev->irq); + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + } } #endif @@ -11427,9 +11439,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, if (!chip_is_e1x) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - /* Reset the load counter */ - bnx2x_clear_load_status(bp); - dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; @@ -11915,9 +11924,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, */ bnx2x_set_int_mode(bp); - /* Add all NAPI objects */ - bnx2x_add_all_napi(bp); - rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); @@ -11992,9 +11998,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - /* Power on: we can't let PCI layer write to us while we are in D3 */ bnx2x_set_power_state(bp, PCI_D0); @@ -12041,6 +12044,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) bnx2x_tx_disable(bp); bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); del_timer_sync(&bp->timer); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index 28a0bcfe61ff..1b1999d34c71 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -4949,6 +4949,10 @@ #define UMAC_COMMAND_CONFIG_REG_SW_RESET (0x1<<13) #define UMAC_COMMAND_CONFIG_REG_TX_ENA (0x1<<0) #define UMAC_REG_COMMAND_CONFIG 0x8 +/* [RW 16] This is the duration for which MAC must wait to go back to ACTIVE + * state from LPI state when it receives packet for transmission. The + * decrement unit is 1 micro-second. */ +#define UMAC_REG_EEE_WAKE_TIMER 0x6c /* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers * to bit 17 of the MAC address etc. */ #define UMAC_REG_MAC_ADDR0 0xc @@ -4958,6 +4962,8 @@ /* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive * logic to check frames. */ #define UMAC_REG_MAXFR 0x14 +#define UMAC_REG_UMAC_EEE_CTRL 0x64 +#define UMAC_UMAC_EEE_CTRL_REG_EEE_EN (0x1<<3) /* [RW 8] The event id for aggregated interrupt 0 */ #define USDM_REG_AGG_INT_EVENT_0 0xc4038 #define USDM_REG_AGG_INT_EVENT_1 0xc403c @@ -6992,6 +6998,7 @@ Theotherbitsarereservedandshouldbezero*/ /* BCM84833 only */ #define MDIO_84833_TOP_CFG_FW_REV 0x400f #define MDIO_84833_TOP_CFG_FW_EEE 0x10b1 +#define MDIO_84833_TOP_CFG_FW_NO_EEE 0x1f81 #define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a #define MDIO_84833_SUPER_ISOLATE 0x8000 /* These are mailbox register set used by 84833. */ @@ -7160,10 +7167,11 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_REG_GPHY_ID_54618SE 0x5cd5 #define MDIO_REG_GPHY_CL45_ADDR_REG 0xd #define MDIO_REG_GPHY_CL45_DATA_REG 0xe -#define MDIO_REG_GPHY_EEE_ADV 0x3c -#define MDIO_REG_GPHY_EEE_1G (0x1 << 2) -#define MDIO_REG_GPHY_EEE_100 (0x1 << 1) #define MDIO_REG_GPHY_EEE_RESOLVED 0x803e +#define MDIO_REG_GPHY_EXP_ACCESS_GATE 0x15 +#define MDIO_REG_GPHY_EXP_ACCESS 0x17 +#define MDIO_REG_GPHY_EXP_ACCESS_TOP 0xd00 +#define MDIO_REG_GPHY_EXP_TOP_2K_BUF 0x40 #define MDIO_REG_GPHY_AUX_STATUS 0x19 #define MDIO_REG_INTR_STATUS 0x1a #define MDIO_REG_INTR_MASK 0x1b diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 62f754bd0dfe..5a5fbf57c4b4 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -229,8 +229,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp, */ list_add_tail(&spacer.link, &o->pending_comp); mb(); - list_del(&elem->link); - list_add_tail(&elem->link, &o->pending_comp); + list_move_tail(&elem->link, &o->pending_comp); list_del(&spacer.link); } else break; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 332db64dd5be..348ed02d3c69 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -39,14 +39,39 @@ static inline long bnx2x_hilo(u32 *hiref) #endif } -static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) +static inline u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) { - u16 res = sizeof(struct host_port_stats) >> 2; + u16 res = 0; - /* if PFC stats are not supported by the MFW, don't DMA them */ - if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) - res -= (sizeof(u32)*4) >> 2; + /* 'newest' convention - shmem2 cotains the size of the port stats */ + if (SHMEM2_HAS(bp, sizeof_port_stats)) { + u32 size = SHMEM2_RD(bp, sizeof_port_stats); + if (size) + res = size; + /* prevent newer BC from causing buffer overflow */ + if (res > sizeof(struct host_port_stats)) + res = sizeof(struct host_port_stats); + } + + /* Older convention - all BCs support the port stats' fields up until + * the 'not_used' field + */ + if (!res) { + res = offsetof(struct host_port_stats, not_used) + 4; + + /* if PFC stats are supported by the MFW, DMA them as well */ + if (bp->flags & BC_SUPPORTS_PFC_STATS) { + res += offsetof(struct host_port_stats, + pfc_frames_rx_lo) - + offsetof(struct host_port_stats, + pfc_frames_tx_hi) + 4 ; + } + } + + res >>= 2; + + WARN_ON(res > 2 * DMAE_LEN32_RD_MAX); return res; } @@ -101,6 +126,11 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) if (CHIP_REV_IS_SLOW(bp)) return; + /* Update MCP's statistics if possible */ + if (bp->func_stx) + memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, + sizeof(bp->func_stats)); + /* loader */ if (bp->executer_idx) { int loader_idx = PMF_DMAE_C(bp); @@ -128,8 +158,6 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } else if (bp->func_stx) { *stats_comp = 0; - memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, - sizeof(bp->func_stats)); bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); } } @@ -1151,9 +1179,11 @@ static void bnx2x_stats_update(struct bnx2x *bp) if (bp->port.pmf) bnx2x_hw_stats_update(bp); - if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { - BNX2X_ERR("storm stats were not updated for 3 times\n"); - bnx2x_panic(); + if (bnx2x_storm_stats_update(bp)) { + if (bp->stats_pending++ == 3) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + } return; } diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 3b4fc61f24cf..2107d79d69b3 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -823,10 +823,8 @@ static void cnic_free_context(struct cnic_dev *dev) } } -static void __cnic_free_uio(struct cnic_uio_dev *udev) +static void __cnic_free_uio_rings(struct cnic_uio_dev *udev) { - uio_unregister_device(&udev->cnic_uinfo); - if (udev->l2_buf) { dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, udev->l2_buf, udev->l2_buf_map); @@ -839,6 +837,14 @@ static void __cnic_free_uio(struct cnic_uio_dev *udev) udev->l2_ring = NULL; } +} + +static void __cnic_free_uio(struct cnic_uio_dev *udev) +{ + uio_unregister_device(&udev->cnic_uinfo); + + __cnic_free_uio_rings(udev); + pci_dev_put(udev->pdev); kfree(udev); } @@ -862,6 +868,8 @@ static void cnic_free_resc(struct cnic_dev *dev) if (udev) { udev->dev = NULL; cp->udev = NULL; + if (udev->uio_dev == -1) + __cnic_free_uio_rings(udev); } cnic_free_context(dev); @@ -996,6 +1004,34 @@ static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, return 0; } +static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) +{ + struct cnic_local *cp = udev->dev->cnic_priv; + + if (udev->l2_ring) + return 0; + + udev->l2_ring_size = pages * BCM_PAGE_SIZE; + udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, + &udev->l2_ring_map, + GFP_KERNEL | __GFP_COMP); + if (!udev->l2_ring) + return -ENOMEM; + + udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; + udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); + udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, + &udev->l2_buf_map, + GFP_KERNEL | __GFP_COMP); + if (!udev->l2_buf) { + __cnic_free_uio_rings(udev); + return -ENOMEM; + } + + return 0; + +} + static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) { struct cnic_local *cp = dev->cnic_priv; @@ -1005,6 +1041,11 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) list_for_each_entry(udev, &cnic_udev_list, list) { if (udev->pdev == dev->pcidev) { udev->dev = dev; + if (__cnic_alloc_uio_rings(udev, pages)) { + udev->dev = NULL; + read_unlock(&cnic_dev_lock); + return -ENOMEM; + } cp->udev = udev; read_unlock(&cnic_dev_lock); return 0; @@ -1020,20 +1061,9 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) udev->dev = dev; udev->pdev = dev->pcidev; - udev->l2_ring_size = pages * BCM_PAGE_SIZE; - udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, - &udev->l2_ring_map, - GFP_KERNEL | __GFP_COMP); - if (!udev->l2_ring) - goto err_udev; - udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; - udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); - udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, - &udev->l2_buf_map, - GFP_KERNEL | __GFP_COMP); - if (!udev->l2_buf) - goto err_dma; + if (__cnic_alloc_uio_rings(udev, pages)) + goto err_udev; write_lock(&cnic_dev_lock); list_add(&udev->list, &cnic_udev_list); @@ -1044,9 +1074,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) cp->udev = udev; return 0; - err_dma: - dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, - udev->l2_ring, udev->l2_ring_map); + err_udev: kfree(udev); return -ENOMEM; @@ -1260,7 +1288,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) if (ret) goto error; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { + if (CNIC_SUPPORTS_FCOE(cp)) { ret = cnic_alloc_kcq(dev, &cp->kcq2, true); if (ret) goto error; @@ -1275,6 +1303,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) if (ret) goto error; + if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) + return 0; + cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; cp->l2_rx_ring_size = 15; @@ -3050,6 +3081,22 @@ static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) IGU_INT_DISABLE, 0); } +static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx, + IGU_INT_ENABLE, 1); +} + +static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx) +{ + struct cnic_local *cp = dev->cnic_priv; + + cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx, + IGU_INT_ENABLE, 1); +} + static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) { u32 last_status = *info->status_idx_ptr; @@ -3086,9 +3133,8 @@ static void cnic_service_bnx2x_bh(unsigned long data) CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); - if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { - cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, - status_idx, IGU_INT_ENABLE, 1); + if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE) { + cp->arm_int(dev, status_idx); break; } @@ -5308,7 +5354,7 @@ static void cnic_stop_hw(struct cnic_dev *dev) /* Need to wait for the ring shutdown event to complete * before clearing the CNIC_UP flag. */ - while (cp->udev->uio_dev != -1 && i < 15) { + while (cp->udev && cp->udev->uio_dev != -1 && i < 15) { msleep(100); i++; } @@ -5473,8 +5519,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) cdev->max_iscsi_conn = ethdev->max_iscsi_conn; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) && - !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) + if (CNIC_SUPPORTS_FCOE(cp)) cdev->max_fcoe_conn = ethdev->max_fcoe_conn; if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) @@ -5492,10 +5537,13 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) cp->stop_cm = cnic_cm_stop_bnx2x_hw; cp->enable_int = cnic_enable_bnx2x_int; cp->disable_int_sync = cnic_disable_bnx2x_int_sync; - if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) + if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) { cp->ack_int = cnic_ack_bnx2x_e2_msix; - else + cp->arm_int = cnic_arm_bnx2x_e2_msix; + } else { cp->ack_int = cnic_ack_bnx2x_msix; + cp->arm_int = cnic_arm_bnx2x_msix; + } cp->close_conn = cnic_close_bnx2x_conn; return cdev; } diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h index 30328097f516..148604c3fa0c 100644 --- a/drivers/net/ethernet/broadcom/cnic.h +++ b/drivers/net/ethernet/broadcom/cnic.h @@ -334,6 +334,7 @@ struct cnic_local { void (*enable_int)(struct cnic_dev *); void (*disable_int_sync)(struct cnic_dev *); void (*ack_int)(struct cnic_dev *); + void (*arm_int)(struct cnic_dev *, u32 index); void (*close_conn)(struct cnic_sock *, u32 opcode); }; @@ -474,6 +475,10 @@ struct bnx2x_bd_chain_next { MAX_STAT_COUNTER_ID_E1)) #endif +#define CNIC_SUPPORTS_FCOE(cp) \ + (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \ + !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE)) + #define CNIC_RAMROD_TMO (HZ / 4) #endif diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h index 5cb88881bba1..2e92c348083e 100644 --- a/drivers/net/ethernet/broadcom/cnic_if.h +++ b/drivers/net/ethernet/broadcom/cnic_if.h @@ -14,8 +14,8 @@ #include "bnx2x/bnx2x_mfw_req.h" -#define CNIC_MODULE_VERSION "2.5.12" -#define CNIC_MODULE_RELDATE "June 29, 2012" +#define CNIC_MODULE_VERSION "2.5.13" +#define CNIC_MODULE_RELDATE "Sep 07, 2012" #define CNIC_ULP_RDMA 0 #define CNIC_ULP_ISCSI 1 diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index b441f33258e7..ce1eac529470 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -3268,6 +3268,7 @@ bnad_pci_probe(struct pci_dev *pdev, * Output : using_dac = 1 for 64 bit DMA * = 0 for 32 bit DMA */ + using_dac = false; err = bnad_pci_init(bnad, pdev, &using_dac); if (err) goto unlock_mutex; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ec2dafe8ae5b..745a1f53361f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -67,12 +67,12 @@ enum { }; enum { - MEMWIN0_APERTURE = 65536, - MEMWIN0_BASE = 0x30000, + MEMWIN0_APERTURE = 2048, + MEMWIN0_BASE = 0x1b800, MEMWIN1_APERTURE = 32768, MEMWIN1_BASE = 0x28000, - MEMWIN2_APERTURE = 2048, - MEMWIN2_BASE = 0x1b800, + MEMWIN2_APERTURE = 65536, + MEMWIN2_BASE = 0x30000, }; enum dev_master { @@ -211,6 +211,9 @@ struct tp_err_stats { struct tp_params { unsigned int ntxchan; /* # of Tx channels */ unsigned int tre; /* log2 of core clocks per TP tick */ + + uint32_t dack_re; /* DACK timer resolution */ + unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ }; struct vpd_params { @@ -315,6 +318,10 @@ enum { /* adapter flags */ USING_MSI = (1 << 1), USING_MSIX = (1 << 2), FW_OK = (1 << 4), + RSS_TNLALLLOOKUP = (1 << 5), + USING_SOFT_PARAMS = (1 << 6), + MASTER_PF = (1 << 7), + FW_OFLD_CONN = (1 << 9), }; struct rx_sw_desc; @@ -467,6 +474,11 @@ struct sge { u16 rdma_rxq[NCHAN]; u16 timer_val[SGE_NTIMERS]; u8 counter_val[SGE_NCOUNTERS]; + u32 fl_pg_order; /* large page allocation size */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + u32 fl_align; /* response queue message alignment */ + u32 fl_starve_thres; /* Free List starvation threshold */ unsigned int starve_thres; u8 idma_state[2]; unsigned int egr_start; @@ -511,6 +523,8 @@ struct adapter { struct net_device *port[MAX_NPORTS]; u8 chan_map[NCHAN]; /* channel -> port map */ + unsigned int l2t_start; + unsigned int l2t_end; struct l2t_data *l2t; void *uld_handle[CXGB4_ULD_MAX]; struct list_head list_node; @@ -619,7 +633,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, struct net_device *dev, unsigned int iqid); irqreturn_t t4_sge_intr_msix(int irq, void *cookie); -void t4_sge_init(struct adapter *adap); +int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); extern int dbfifo_int_thresh; @@ -638,6 +652,14 @@ static inline unsigned int us_to_core_ticks(const struct adapter *adap, return (us * adap->params.vpd.cclk) / 1000; } +static inline unsigned int core_ticks_to_us(const struct adapter *adapter, + unsigned int ticks) +{ + /* add Core Clock / 2 to round ticks to nearest uS */ + return ((ticks * 1000 + adapter->params.vpd.cclk/2) / + adapter->params.vpd.cclk); +} + void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, u32 val); @@ -656,6 +678,9 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); } +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx); void t4_intr_enable(struct adapter *adapter); void t4_intr_disable(struct adapter *adapter); int t4_slow_intr_handler(struct adapter *adapter); @@ -664,8 +689,12 @@ int t4_wait_dev_ready(struct adapter *adap); int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc); int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); +int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, + __be32 *buf); int t4_seeprom_wp(struct adapter *adapter, bool enable); +int get_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); +unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_check_fw_version(struct adapter *adapter); int t4_prep_adapter(struct adapter *adapter); int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); @@ -680,6 +709,8 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val); void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, struct tp_tcp_stats *v6); void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, @@ -695,6 +726,16 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, int t4_fw_bye(struct adapter *adap, unsigned int mbox); int t4_early_init(struct adapter *adap, unsigned int mbox); int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force); +int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset); +int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, + const u8 *fw_data, unsigned int size, int force); +int t4_fw_config_file(struct adapter *adap, unsigned int mbox, + unsigned int mtype, unsigned int maddr, + u32 *finiver, u32 *finicsum, u32 *cfcsum); +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size); +int t4_fw_initialize(struct adapter *adap, unsigned int mbox); int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, u32 *val); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5ed49af23d6a..94b784610319 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -78,28 +78,45 @@ */ #define MAX_SGE_TIMERVAL 200U -#ifdef CONFIG_PCI_IOV -/* - * Virtual Function provisioning constants. We need two extra Ingress Queues - * with Interrupt capability to serve as the VF's Firmware Event Queue and - * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free - * Lists associated with them). For each Ethernet/Control Egress Queue and - * for each Free List, we need an Egress Context. - */ enum { + /* + * Physical Function provisioning constants. + */ + PFRES_NVI = 4, /* # of Virtual Interfaces */ + PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */ + PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr + */ + PFRES_NEQ = 256, /* # of egress queues */ + PFRES_NIQ = 0, /* # of ingress queues */ + PFRES_TC = 0, /* PCI-E traffic class */ + PFRES_NEXACTF = 128, /* # of exact MPS filters */ + + PFRES_R_CAPS = FW_CMD_CAP_PF, + PFRES_WX_CAPS = FW_CMD_CAP_PF, + +#ifdef CONFIG_PCI_IOV + /* + * Virtual Function provisioning constants. We need two extra Ingress + * Queues with Interrupt capability to serve as the VF's Firmware + * Event Queue and Forwarded Interrupt Queue (when using MSI mode) -- + * neither will have Free Lists associated with them). For each + * Ethernet/Control Egress Queue and for each Free List, we need an + * Egress Context. + */ VFRES_NPORTS = 1, /* # of "ports" per VF */ VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ - VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ + VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ VFRES_TC = 0, /* PCI-E traffic class */ VFRES_NEXACTF = 16, /* # of exact MPS filters */ VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, +#endif }; /* @@ -146,7 +163,6 @@ static unsigned int pfvfres_pmask(struct adapter *adapter, } /*NOTREACHED*/ } -#endif enum { MAX_TXQ_ENTRIES = 16384, @@ -193,6 +209,7 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { }; #define FW_FNAME "cxgb4/t4fw.bin" +#define FW_CFNAME "cxgb4/t4-config.txt" MODULE_DESCRIPTION(DRV_DESC); MODULE_AUTHOR("Chelsio Communications"); @@ -201,6 +218,28 @@ MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); MODULE_FIRMWARE(FW_FNAME); +/* + * Normally we're willing to become the firmware's Master PF but will be happy + * if another PF has already become the Master and initialized the adapter. + * Setting "force_init" will cause this driver to forcibly establish itself as + * the Master PF and initialize the adapter. + */ +static uint force_init; + +module_param(force_init, uint, 0644); +MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter"); + +/* + * Normally if the firmware we connect to has Configuration File support, we + * use that and only fall back to the old Driver-based initialization if the + * Configuration File fails for some reason. If force_old_init is set, then + * we'll always use the old Driver-based initialization sequence. + */ +static uint force_old_init; + +module_param(force_old_init, uint, 0644); +MODULE_PARM_DESC(force_old_init, "Force old initialization sequence"); + static int dflt_msg_enable = DFLT_MSG_ENABLE; module_param(dflt_msg_enable, int, 0644); @@ -236,6 +275,20 @@ module_param_array(intr_cnt, uint, NULL, 0644); MODULE_PARM_DESC(intr_cnt, "thresholds 1..3 for queue interrupt packet counters"); +/* + * Normally we tell the chip to deliver Ingress Packets into our DMA buffers + * offset by 2 bytes in order to have the IP headers line up on 4-byte + * boundaries. This is a requirement for many architectures which will throw + * a machine check fault if an attempt is made to access one of the 4-byte IP + * header fields on a non-4-byte boundary. And it's a major performance issue + * even on some architectures which allow it like some implementations of the + * x86 ISA. However, some architectures don't mind this and for some very + * edge-case performance sensitive applications (like forwarding large volumes + * of small packets), setting this DMA offset to 0 will decrease the number of + * PCI-E Bus transfers enough to measurably affect performance. + */ +static int rx_dma_offset = 2; + static bool vf_acls; #ifdef CONFIG_PCI_IOV @@ -248,6 +301,30 @@ module_param_array(num_vf, uint, NULL, 0644); MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); #endif +/* + * The filter TCAM has a fixed portion and a variable portion. The fixed + * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP + * ports. The variable portion is 36 bits which can include things like Exact + * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits), + * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would + * far exceed the 36-bit budget for this "compressed" header portion of the + * filter. Thus, we have a scarce resource which must be carefully managed. + * + * By default we set this up to mostly match the set of filter matching + * capabilities of T3 but with accommodations for some of T4's more + * interesting features: + * + * { IP Fragment (1), MPS Match Type (3), IP Protocol (8), + * [Inner] VLAN (17), Port (3), FCoE (1) } + */ +enum { + TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC, + TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT, + TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT, +}; + +static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; + static struct dentry *cxgb4_debugfs_root; static LIST_HEAD(adapter_list); @@ -852,11 +929,25 @@ static int upgrade_fw(struct adapter *adap) */ if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || vers > adap->params.fw_vers) { - ret = -t4_load_fw(adap, fw->data, fw->size); + dev_info(dev, "upgrading firmware ...\n"); + ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, + /*force=*/false); if (!ret) - dev_info(dev, "firmware upgraded to version %pI4 from " - FW_FNAME "\n", &hdr->fw_ver); + dev_info(dev, "firmware successfully upgraded to " + FW_FNAME " (%d.%d.%d.%d)\n", + FW_HDR_FW_VER_MAJOR_GET(vers), + FW_HDR_FW_VER_MINOR_GET(vers), + FW_HDR_FW_VER_MICRO_GET(vers), + FW_HDR_FW_VER_BUILD_GET(vers)); + else + dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); + } else { + /* + * Tell our caller that we didn't upgrade the firmware. + */ + ret = -EINVAL; } + out: release_firmware(fw); return ret; } @@ -2470,8 +2561,8 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, else delta = size - hw_pidx + pidx; wmb(); - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - V_QID(qid) | V_PIDX(delta)); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(qid) | PIDX(delta)); } out: return ret; @@ -2579,8 +2670,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) else delta = q->size - hw_pidx + q->db_pidx; wmb(); - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - V_QID(q->cntxt_id) | V_PIDX(delta)); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(delta)); } out: q->db_disabled = 0; @@ -2617,9 +2708,9 @@ static void process_db_full(struct work_struct *work) notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); drain_db_fifo(adap, dbfifo_drain_delay); - t4_set_reg_field(adap, A_SGE_INT_ENABLE3, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, + DBFIFO_HP_INT | DBFIFO_LP_INT); notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); } @@ -2639,8 +2730,8 @@ static void process_db_drop(struct work_struct *work) void t4_db_full(struct adapter *adap) { - t4_set_reg_field(adap, A_SGE_INT_ENABLE3, - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT, 0); + t4_set_reg_field(adap, SGE_INT_ENABLE3, + DBFIFO_HP_INT | DBFIFO_LP_INT, 0); queue_work(workq, &adap->db_full_task); } @@ -3076,6 +3167,10 @@ static void setup_memwin(struct adapter *adap) t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), (bar0 + MEMWIN2_BASE) | BIR(0) | WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); +} + +static void setup_memwin_rdma(struct adapter *adap) +{ if (adap->vres.ocq.size) { unsigned int start, sz_kb; @@ -3155,6 +3250,488 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) /* * Phase 0 of initialization: contact FW, obtain config, perform basic init. + * + * If the firmware we're dealing with has Configuration File support, then + * we use that to perform all configuration + */ + +/* + * Tweak configuration based on module parameters, etc. Most of these have + * defaults assigned to them by Firmware Configuration Files (if we're using + * them) but need to be explicitly set if we're using hard-coded + * initialization. But even in the case of using Firmware Configuration + * Files, we'd like to expose the ability to change these via module + * parameters so these are essentially common tweaks/settings for + * Configuration Files and hard-coded initialization ... + */ +static int adap_init0_tweaks(struct adapter *adapter) +{ + /* + * Fix up various Host-Dependent Parameters like Page Size, Cache + * Line Size, etc. The firmware default is for a 4KB Page Size and + * 64B Cache Line Size ... + */ + t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES); + + /* + * Process module parameters which affect early initialization. + */ + if (rx_dma_offset != 2 && rx_dma_offset != 0) { + dev_err(&adapter->pdev->dev, + "Ignoring illegal rx_dma_offset=%d, using 2\n", + rx_dma_offset); + rx_dma_offset = 2; + } + t4_set_reg_field(adapter, SGE_CONTROL, + PKTSHIFT_MASK, + PKTSHIFT(rx_dma_offset)); + + /* + * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux + * adds the pseudo header itself. + */ + t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG, + CSUM_HAS_PSEUDO_HDR, 0); + + return 0; +} + +/* + * Attempt to initialize the adapter via a Firmware Configuration File. + */ +static int adap_init0_config(struct adapter *adapter, int reset) +{ + struct fw_caps_config_cmd caps_cmd; + const struct firmware *cf; + unsigned long mtype = 0, maddr = 0; + u32 finiver, finicsum, cfcsum; + int ret, using_flash; + + /* + * Reset device if necessary. + */ + if (reset) { + ret = t4_fw_reset(adapter, adapter->mbox, + PIORSTMODE | PIORST); + if (ret < 0) + goto bye; + } + + /* + * If we have a T4 configuration file under /lib/firmware/cxgb4/, + * then use that. Otherwise, use the configuration file stored + * in the adapter flash ... + */ + ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev); + if (ret < 0) { + using_flash = 1; + mtype = FW_MEMTYPE_CF_FLASH; + maddr = t4_flash_cfg_addr(adapter); + } else { + u32 params[7], val[7]; + + using_flash = 0; + if (cf->size >= FLASH_CFG_MAX_SIZE) + ret = -ENOMEM; + else { + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + ret = t4_query_params(adapter, adapter->mbox, + adapter->fn, 0, 1, params, val); + if (ret == 0) { + /* + * For t4_memory_write() below addresses and + * sizes have to be in terms of multiples of 4 + * bytes. So, if the Configuration File isn't + * a multiple of 4 bytes in length we'll have + * to write that out separately since we can't + * guarantee that the bytes following the + * residual byte in the buffer returned by + * request_firmware() are zeroed out ... + */ + size_t resid = cf->size & 0x3; + size_t size = cf->size & ~0x3; + __be32 *data = (__be32 *)cf->data; + + mtype = FW_PARAMS_PARAM_Y_GET(val[0]); + maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16; + + ret = t4_memory_write(adapter, mtype, maddr, + size, data); + if (ret == 0 && resid != 0) { + union { + __be32 word; + char buf[4]; + } last; + int i; + + last.word = data[size >> 2]; + for (i = resid; i < 4; i++) + last.buf[i] = 0; + ret = t4_memory_write(adapter, mtype, + maddr + size, + 4, &last.word); + } + } + } + + release_firmware(cf); + if (ret) + goto bye; + } + + /* + * Issue a Capability Configuration command to the firmware to get it + * to parse the Configuration File. We don't use t4_fw_config_file() + * because we want the ability to modify various features after we've + * processed the configuration file ... + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + caps_cmd.retval_len16 = + htonl(FW_CAPS_CONFIG_CMD_CFVALID | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + if (ret < 0) + goto bye; + + finiver = ntohl(caps_cmd.finiver); + finicsum = ntohl(caps_cmd.finicsum); + cfcsum = ntohl(caps_cmd.cfcsum); + if (finicsum != cfcsum) + dev_warn(adapter->pdev_dev, "Configuration File checksum "\ + "mismatch: [fini] csum=%#x, computed csum=%#x\n", + finicsum, cfcsum); + + /* + * If we're a pure NIC driver then disable all offloading facilities. + * This will allow the firmware to optimize aspects of the hardware + * configuration which will result in improved performance. + */ + caps_cmd.ofldcaps = 0; + caps_cmd.iscsicaps = 0; + caps_cmd.rdmacaps = 0; + caps_cmd.fcoecaps = 0; + + /* + * And now tell the firmware to use the configuration we just loaded. + */ + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE); + caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + NULL); + if (ret < 0) + goto bye; + + /* + * Tweak configuration based on system architecture, module + * parameters, etc. + */ + ret = adap_init0_tweaks(adapter); + if (ret < 0) + goto bye; + + /* + * And finally tell the firmware to initialize itself using the + * parameters from the Configuration File. + */ + ret = t4_fw_initialize(adapter, adapter->mbox); + if (ret < 0) + goto bye; + + /* + * Return successfully and note that we're operating with parameters + * not supplied by the driver, rather than from hard-wired + * initialization constants burried in the driver. + */ + adapter->flags |= USING_SOFT_PARAMS; + dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ + "Configuration File %s, version %#x, computed checksum %#x\n", + (using_flash + ? "in device FLASH" + : "/lib/firmware/" FW_CFNAME), + finiver, cfcsum); + return 0; + + /* + * Something bad happened. Return the error ... (If the "error" + * is that there's no Configuration File on the adapter we don't + * want to issue a warning since this is fairly common.) + */ +bye: + if (ret != -ENOENT) + dev_warn(adapter->pdev_dev, "Configuration file error %d\n", + -ret); + return ret; +} + +/* + * Attempt to initialize the adapter via hard-coded, driver supplied + * parameters ... + */ +static int adap_init0_no_config(struct adapter *adapter, int reset) +{ + struct sge *s = &adapter->sge; + struct fw_caps_config_cmd caps_cmd; + u32 v; + int i, ret; + + /* + * Reset device if necessary + */ + if (reset) { + ret = t4_fw_reset(adapter, adapter->mbox, + PIORSTMODE | PIORST); + if (ret < 0) + goto bye; + } + + /* + * Get device capabilities and select which we'll be using. + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + if (ret < 0) + goto bye; + +#ifndef CONFIG_CHELSIO_T4_OFFLOAD + /* + * If we're a pure NIC driver then disable all offloading facilities. + * This will allow the firmware to optimize aspects of the hardware + * configuration which will result in improved performance. + */ + caps_cmd.ofldcaps = 0; + caps_cmd.iscsicaps = 0; + caps_cmd.rdmacaps = 0; + caps_cmd.fcoecaps = 0; +#endif + + if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { + if (!vf_acls) + caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); + else + caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); + } else if (vf_acls) { + dev_err(adapter->pdev_dev, "virtualization ACLs not supported"); + goto bye; + } + caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_WRITE); + ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), + NULL); + if (ret < 0) + goto bye; + + /* + * Tweak configuration based on system architecture, module + * parameters, etc. + */ + ret = adap_init0_tweaks(adapter); + if (ret < 0) + goto bye; + + /* + * Select RSS Global Mode we want to use. We use "Basic Virtual" + * mode which maps each Virtual Interface to its own section of + * the RSS Table and we turn on all map and hash enables ... + */ + adapter->flags |= RSS_TNLALLLOOKUP; + ret = t4_config_glbl_rss(adapter, adapter->mbox, + FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, + FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | + FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | + ((adapter->flags & RSS_TNLALLLOOKUP) ? + FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0)); + if (ret < 0) + goto bye; + + /* + * Set up our own fundamental resource provisioning ... + */ + ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0, + PFRES_NEQ, PFRES_NETHCTRL, + PFRES_NIQFLINT, PFRES_NIQ, + PFRES_TC, PFRES_NVI, + FW_PFVF_CMD_CMASK_MASK, + pfvfres_pmask(adapter, adapter->fn, 0), + PFRES_NEXACTF, + PFRES_R_CAPS, PFRES_WX_CAPS); + if (ret < 0) + goto bye; + + /* + * Perform low level SGE initialization. We need to do this before we + * send the firmware the INITIALIZE command because that will cause + * any other PF Drivers which are waiting for the Master + * Initialization to proceed forward. + */ + for (i = 0; i < SGE_NTIMERS - 1; i++) + s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL); + s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; + s->counter_val[0] = 1; + for (i = 1; i < SGE_NCOUNTERS; i++) + s->counter_val[i] = min(intr_cnt[i - 1], + THRESHOLD_0_GET(THRESHOLD_0_MASK)); + t4_sge_init(adapter); + +#ifdef CONFIG_PCI_IOV + /* + * Provision resource limits for Virtual Functions. We currently + * grant them all the same static resource limits except for the Port + * Access Rights Mask which we're assigning based on the PF. All of + * the static provisioning stuff for both the PF and VF really needs + * to be managed in a persistent manner for each device which the + * firmware controls. + */ + { + int pf, vf; + + for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { + if (num_vf[pf] <= 0) + continue; + + /* VF numbering starts at 1! */ + for (vf = 1; vf <= num_vf[pf]; vf++) { + ret = t4_cfg_pfvf(adapter, adapter->mbox, + pf, vf, + VFRES_NEQ, VFRES_NETHCTRL, + VFRES_NIQFLINT, VFRES_NIQ, + VFRES_TC, VFRES_NVI, + FW_PFVF_CMD_CMASK_GET( + FW_PFVF_CMD_CMASK_MASK), + pfvfres_pmask( + adapter, pf, vf), + VFRES_NEXACTF, + VFRES_R_CAPS, VFRES_WX_CAPS); + if (ret < 0) + dev_warn(adapter->pdev_dev, + "failed to "\ + "provision pf/vf=%d/%d; " + "err=%d\n", pf, vf, ret); + } + } + } +#endif + + /* + * Set up the default filter mode. Later we'll want to implement this + * via a firmware command, etc. ... This needs to be done before the + * firmare initialization command ... If the selected set of fields + * isn't equal to the default value, we'll need to make sure that the + * field selections will fit in the 36-bit budget. + */ + if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { + int i, bits = 0; + + for (i = TP_VLAN_PRI_MAP_FIRST; i <= TP_VLAN_PRI_MAP_LAST; i++) + switch (tp_vlan_pri_map & (1 << i)) { + case 0: + /* compressed filter field not enabled */ + break; + case FCOE_MASK: + bits += 1; + break; + case PORT_MASK: + bits += 3; + break; + case VNIC_ID_MASK: + bits += 17; + break; + case VLAN_MASK: + bits += 17; + break; + case TOS_MASK: + bits += 8; + break; + case PROTOCOL_MASK: + bits += 8; + break; + case ETHERTYPE_MASK: + bits += 16; + break; + case MACMATCH_MASK: + bits += 9; + break; + case MPSHITTYPE_MASK: + bits += 3; + break; + case FRAGMENTATION_MASK: + bits += 1; + break; + } + + if (bits > 36) { + dev_err(adapter->pdev_dev, + "tp_vlan_pri_map=%#x needs %d bits > 36;"\ + " using %#x\n", tp_vlan_pri_map, bits, + TP_VLAN_PRI_MAP_DEFAULT); + tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; + } + } + v = tp_vlan_pri_map; + t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA, + &v, 1, TP_VLAN_PRI_MAP); + + /* + * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order + * to support any of the compressed filter fields above. Newer + * versions of the firmware do this automatically but it doesn't hurt + * to set it here. Meanwhile, we do _not_ need to set Lookup Every + * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets + * since the firmware automatically turns this on and off when we have + * a non-zero number of filters active (since it does have a + * performance impact). + */ + if (tp_vlan_pri_map) + t4_set_reg_field(adapter, TP_GLOBAL_CONFIG, + FIVETUPLELOOKUP_MASK, + FIVETUPLELOOKUP_MASK); + + /* + * Tweak some settings. + */ + t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) | + RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) | + PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) | + KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9)); + + /* + * Get basic stuff going by issuing the Firmware Initialize command. + * Note that this _must_ be after all PFVF commands ... + */ + ret = t4_fw_initialize(adapter, adapter->mbox); + if (ret < 0) + goto bye; + + /* + * Return successfully! + */ + dev_info(adapter->pdev_dev, "Successfully configured using built-in "\ + "driver parameters\n"); + return 0; + + /* + * Something bad happened. Return the error ... + */ +bye: + return ret; +} + +/* + * Phase 0 of initialization: contact FW, obtain config, perform basic init. */ static int adap_init0(struct adapter *adap) { @@ -3162,72 +3739,216 @@ static int adap_init0(struct adapter *adap) u32 v, port_vec; enum dev_state state; u32 params[7], val[7]; - struct fw_caps_config_cmd c; - - ret = t4_check_fw_version(adap); - if (ret == -EINVAL || ret > 0) { - if (upgrade_fw(adap) >= 0) /* recache FW version */ - ret = t4_check_fw_version(adap); - } - if (ret < 0) - return ret; + int reset = 1, j; - /* contact FW, request master */ - ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state); + /* + * Contact FW, advertising Master capability (and potentially forcing + * ourselves as the Master PF if our module parameter force_init is + * set). + */ + ret = t4_fw_hello(adap, adap->mbox, adap->fn, + force_init ? MASTER_MUST : MASTER_MAY, + &state); if (ret < 0) { dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", ret); return ret; } + if (ret == adap->mbox) + adap->flags |= MASTER_PF; + if (force_init && state == DEV_STATE_INIT) + state = DEV_STATE_UNINIT; - /* reset device */ - ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST); - if (ret < 0) - goto bye; - - for (v = 0; v < SGE_NTIMERS - 1; v++) - adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL); - adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; - adap->sge.counter_val[0] = 1; - for (v = 1; v < SGE_NCOUNTERS; v++) - adap->sge.counter_val[v] = min(intr_cnt[v - 1], - THRESHOLD_3_MASK); -#define FW_PARAM_DEV(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + /* + * If we're the Master PF Driver and the device is uninitialized, + * then let's consider upgrading the firmware ... (We always want + * to check the firmware version number in order to A. get it for + * later reporting and B. to warn if the currently loaded firmware + * is excessively mismatched relative to the driver.) + */ + ret = t4_check_fw_version(adap); + if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { + if (ret == -EINVAL || ret > 0) { + if (upgrade_fw(adap) >= 0) { + /* + * Note that the chip was reset as part of the + * firmware upgrade so we don't reset it again + * below and grab the new firmware version. + */ + reset = 0; + ret = t4_check_fw_version(adap); + } + } + if (ret < 0) + return ret; + } - params[0] = FW_PARAM_DEV(CCLK); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val); + /* + * Grab VPD parameters. This should be done after we establish a + * connection to the firmware since some of the VPD parameters + * (notably the Core Clock frequency) are retrieved via requests to + * the firmware. On the other hand, we need these fairly early on + * so we do this right after getting ahold of the firmware. + */ + ret = get_vpd_params(adap, &adap->params.vpd); if (ret < 0) goto bye; - adap->params.vpd.cclk = val[0]; - ret = adap_init1(adap, &c); + /* + * Find out what ports are available to us. Note that we need to do + * this before calling adap_init0_no_config() since it needs nports + * and portvec ... + */ + v = + FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); if (ret < 0) goto bye; + adap->params.nports = hweight32(port_vec); + adap->params.portvec = port_vec; + + /* + * If the firmware is initialized already (and we're not forcing a + * master initialization), note that we're living with existing + * adapter parameters. Otherwise, it's time to try initializing the + * adapter ... + */ + if (state == DEV_STATE_INIT) { + dev_info(adap->pdev_dev, "Coming up as %s: "\ + "Adapter already initialized\n", + adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); + adap->flags |= USING_SOFT_PARAMS; + } else { + dev_info(adap->pdev_dev, "Coming up as MASTER: "\ + "Initializing adapter\n"); + + /* + * If the firmware doesn't support Configuration + * Files warn user and exit, + */ + if (ret < 0) + dev_warn(adap->pdev_dev, "Firmware doesn't support " + "configuration file.\n"); + if (force_old_init) + ret = adap_init0_no_config(adap, reset); + else { + /* + * Find out whether we're dealing with a version of + * the firmware which has configuration file support. + */ + params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, + params, val); + + /* + * If the firmware doesn't support Configuration + * Files, use the old Driver-based, hard-wired + * initialization. Otherwise, try using the + * Configuration File support and fall back to the + * Driver-based initialization if there's no + * Configuration File found. + */ + if (ret < 0) + ret = adap_init0_no_config(adap, reset); + else { + /* + * The firmware provides us with a memory + * buffer where we can load a Configuration + * File from the host if we want to override + * the Configuration File in flash. + */ + + ret = adap_init0_config(adap, reset); + if (ret == -ENOENT) { + dev_info(adap->pdev_dev, + "No Configuration File present " + "on adapter. Using hard-wired " + "configuration parameters.\n"); + ret = adap_init0_no_config(adap, reset); + } + } + } + if (ret < 0) { + dev_err(adap->pdev_dev, + "could not initialize adapter, error %d\n", + -ret); + goto bye; + } + } + + /* + * If we're living with non-hard-coded parameters (either from a + * Firmware Configuration File or values programmed by a different PF + * Driver), give the SGE code a chance to pull in anything that it + * needs ... Note that this must be called after we retrieve our VPD + * parameters in order to know how to convert core ticks to seconds. + */ + if (adap->flags & USING_SOFT_PARAMS) { + ret = t4_sge_init(adap); + if (ret < 0) + goto bye; + } + + /* + * Grab some of our basic fundamental operating parameters. + */ +#define FW_PARAM_DEV(param) \ + (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) + #define FW_PARAM_PFVF(param) \ - (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ - FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \ - FW_PARAMS_PARAM_Y(adap->fn)) + FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ + FW_PARAMS_PARAM_Y(0) | \ + FW_PARAMS_PARAM_Z(0) - params[0] = FW_PARAM_DEV(PORTVEC); + params[0] = FW_PARAM_PFVF(EQ_START); params[1] = FW_PARAM_PFVF(L2T_START); params[2] = FW_PARAM_PFVF(L2T_END); params[3] = FW_PARAM_PFVF(FILTER_START); params[4] = FW_PARAM_PFVF(FILTER_END); params[5] = FW_PARAM_PFVF(IQFLINT_START); - params[6] = FW_PARAM_PFVF(EQ_START); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val); if (ret < 0) goto bye; - port_vec = val[0]; + adap->sge.egr_start = val[0]; + adap->l2t_start = val[1]; + adap->l2t_end = val[2]; adap->tids.ftid_base = val[3]; adap->tids.nftids = val[4] - val[3] + 1; adap->sge.ingr_start = val[5]; - adap->sge.egr_start = val[6]; - if (c.ofldcaps) { + /* query params related to active filter region */ + params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); + params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); + /* If Active filter size is set we enable establishing + * offload connection through firmware work request + */ + if ((val[0] != val[1]) && (ret >= 0)) { + adap->flags |= FW_OFLD_CONN; + adap->tids.aftid_base = val[0]; + adap->tids.aftid_end = val[1]; + } + +#ifdef CONFIG_CHELSIO_T4_OFFLOAD + /* + * Get device capabilities so we can determine what resources we need + * to manage. + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | FW_CMD_READ); + caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), + &caps_cmd); + if (ret < 0) + goto bye; + + if (caps_cmd.ofldcaps) { /* query offload-related parameters */ params[0] = FW_PARAM_DEV(NTID); params[1] = FW_PARAM_PFVF(SERVER_START); @@ -3235,28 +3956,55 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(TDDP_START); params[4] = FW_PARAM_PFVF(TDDP_END); params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + params, val); if (ret < 0) goto bye; adap->tids.ntids = val[0]; adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); adap->tids.stid_base = val[1]; adap->tids.nstids = val[2] - val[1] + 1; + /* + * Setup server filter region. Divide the availble filter + * region into two parts. Regular filters get 1/3rd and server + * filters get 2/3rd part. This is only enabled if workarond + * path is enabled. + * 1. For regular filters. + * 2. Server filter: This are special filters which are used + * to redirect SYN packets to offload queue. + */ + if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { + adap->tids.sftid_base = adap->tids.ftid_base + + DIV_ROUND_UP(adap->tids.nftids, 3); + adap->tids.nsftids = adap->tids.nftids - + DIV_ROUND_UP(adap->tids.nftids, 3); + adap->tids.nftids = adap->tids.sftid_base - + adap->tids.ftid_base; + } adap->vres.ddp.start = val[3]; adap->vres.ddp.size = val[4] - val[3] + 1; adap->params.ofldq_wr_cred = val[5]; + + params[0] = FW_PARAM_PFVF(ETHOFLD_START); + params[1] = FW_PARAM_PFVF(ETHOFLD_END); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + params, val); + if ((val[0] != val[1]) && (ret >= 0)) { + adap->tids.uotid_base = val[0]; + adap->tids.nuotids = val[1] - val[0] + 1; + } + adap->params.offload = 1; } - if (c.rdmacaps) { + if (caps_cmd.rdmacaps) { params[0] = FW_PARAM_PFVF(STAG_START); params[1] = FW_PARAM_PFVF(STAG_END); params[2] = FW_PARAM_PFVF(RQ_START); params[3] = FW_PARAM_PFVF(RQ_END); params[4] = FW_PARAM_PFVF(PBL_START); params[5] = FW_PARAM_PFVF(PBL_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, + params, val); if (ret < 0) goto bye; adap->vres.stag.start = val[0]; @@ -3272,8 +4020,7 @@ static int adap_init0(struct adapter *adap) params[3] = FW_PARAM_PFVF(CQ_END); params[4] = FW_PARAM_PFVF(OCQ_START); params[5] = FW_PARAM_PFVF(OCQ_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params, - val); + ret = t4_query_params(adap, 0, 0, 0, 6, params, val); if (ret < 0) goto bye; adap->vres.qp.start = val[0]; @@ -3283,11 +4030,11 @@ static int adap_init0(struct adapter *adap) adap->vres.ocq.start = val[4]; adap->vres.ocq.size = val[5] - val[4] + 1; } - if (c.iscsicaps) { + if (caps_cmd.iscsicaps) { params[0] = FW_PARAM_PFVF(ISCSI_START); params[1] = FW_PARAM_PFVF(ISCSI_END); - ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params, - val); + ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, + params, val); if (ret < 0) goto bye; adap->vres.iscsi.start = val[0]; @@ -3295,63 +4042,33 @@ static int adap_init0(struct adapter *adap) } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV +#endif /* CONFIG_CHELSIO_T4_OFFLOAD */ - adap->params.nports = hweight32(port_vec); - adap->params.portvec = port_vec; - adap->flags |= FW_OK; - - /* These are finalized by FW initialization, load their values now */ + /* + * These are finalized by FW initialization, load their values now. + */ v = t4_read_reg(adap, TP_TIMER_RESOLUTION); adap->params.tp.tre = TIMERRESOLUTION_GET(v); + adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); t4_read_mtu_tbl(adap, adap->params.mtus, NULL); t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, adap->params.b_wnd); -#ifdef CONFIG_PCI_IOV - /* - * Provision resource limits for Virtual Functions. We currently - * grant them all the same static resource limits except for the Port - * Access Rights Mask which we're assigning based on the PF. All of - * the static provisioning stuff for both the PF and VF really needs - * to be managed in a persistent manner for each device which the - * firmware controls. - */ - { - int pf, vf; - - for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { - if (num_vf[pf] <= 0) - continue; + /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ + for (j = 0; j < NCHAN; j++) + adap->params.tp.tx_modq[j] = j; - /* VF numbering starts at 1! */ - for (vf = 1; vf <= num_vf[pf]; vf++) { - ret = t4_cfg_pfvf(adap, adap->fn, pf, vf, - VFRES_NEQ, VFRES_NETHCTRL, - VFRES_NIQFLINT, VFRES_NIQ, - VFRES_TC, VFRES_NVI, - FW_PFVF_CMD_CMASK_MASK, - pfvfres_pmask(adap, pf, vf), - VFRES_NEXACTF, - VFRES_R_CAPS, VFRES_WX_CAPS); - if (ret < 0) - dev_warn(adap->pdev_dev, "failed to " - "provision pf/vf=%d/%d; " - "err=%d\n", pf, vf, ret); - } - } - } -#endif - - setup_memwin(adap); + adap->flags |= FW_OK; return 0; /* - * If a command timed out or failed with EIO FW does not operate within - * its spec or something catastrophic happened to HW/FW, stop issuing - * commands. + * Something bad happened. If a command timed out or failed with EIO + * FW does not operate within its spec or something catastrophic + * happened to HW/FW, stop issuing commands. */ -bye: if (ret != -ETIMEDOUT && ret != -EIO) - t4_fw_bye(adap, adap->fn); +bye: + if (ret != -ETIMEDOUT && ret != -EIO) + t4_fw_bye(adap, adap->mbox); return ret; } @@ -3814,7 +4531,9 @@ static int __devinit init_one(struct pci_dev *pdev, err = t4_prep_adapter(adapter); if (err) goto out_unmap_bar; + setup_memwin(adapter); err = adap_init0(adapter); + setup_memwin_rdma(adapter); if (err) goto out_unmap_bar; @@ -3956,8 +4675,11 @@ static void __devexit remove_one(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); +#ifdef CONFIG_PCI_IOV pci_disable_sriov(pdev); +#endif + if (adapter) { int i; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index d79980c5fc63..1b899fea1a91 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -100,6 +100,8 @@ struct tid_info { unsigned int nftids; unsigned int ftid_base; + unsigned int aftid_base; + unsigned int aftid_end; spinlock_t atid_lock ____cacheline_aligned_in_smp; union aopen_entry *afree; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index d49933ed551f..3ecc087d732d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -68,9 +68,6 @@ */ #define RX_PKT_SKB_LEN 512 -/* Ethernet header padding prepended to RX_PKTs */ -#define RX_PKT_PAD 2 - /* * Max number of Tx descriptors we clean up at a time. Should be modest as * freeing skbs isn't cheap and it happens while holding locks. We just need @@ -137,13 +134,6 @@ */ #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN -enum { - /* packet alignment in FL buffers */ - FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES, - /* egress status entry size */ - STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64 -}; - struct tx_sw_desc { /* SW state per Tx descriptor */ struct sk_buff *skb; struct ulptx_sgl *sgl; @@ -155,16 +145,57 @@ struct rx_sw_desc { /* SW state per Rx descriptor */ }; /* - * The low bits of rx_sw_desc.dma_addr have special meaning. + * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb + * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs. + * We could easily support more but there doesn't seem to be much need for + * that ... + */ +#define FL_MTU_SMALL 1500 +#define FL_MTU_LARGE 9000 + +static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, + unsigned int mtu) +{ + struct sge *s = &adapter->sge; + + return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); +} + +#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) +#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE) + +/* + * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses + * these to specify the buffer size as an index into the SGE Free List Buffer + * Size register array. We also use bit 4, when the buffer has been unmapped + * for DMA, but this is of course never sent to the hardware and is only used + * to prevent double unmappings. All of the above requires that the Free List + * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are + * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal + * Free List Buffer alignment is 32 bytes, this works out for us ... */ enum { - RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ - RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ + RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */ + RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */ + RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */ + + /* + * XXX We shouldn't depend on being able to use these indices. + * XXX Especially when some other Master PF has initialized the + * XXX adapter or we use the Firmware Configuration File. We + * XXX should really search through the Host Buffer Size register + * XXX array for the appropriately sized buffer indices. + */ + RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */ + RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */ + + RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */ + RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ }; static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) { - return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); + return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; } static inline bool is_buf_mapped(const struct rx_sw_desc *d) @@ -392,14 +423,35 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, } } -static inline int get_buf_size(const struct rx_sw_desc *d) +static inline int get_buf_size(struct adapter *adapter, + const struct rx_sw_desc *d) { -#if FL_PG_ORDER > 0 - return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : - PAGE_SIZE; -#else - return PAGE_SIZE; -#endif + struct sge *s = &adapter->sge; + unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; + int buf_size; + + switch (rx_buf_size_idx) { + case RX_SMALL_PG_BUF: + buf_size = PAGE_SIZE; + break; + + case RX_LARGE_PG_BUF: + buf_size = PAGE_SIZE << s->fl_pg_order; + break; + + case RX_SMALL_MTU_BUF: + buf_size = FL_MTU_SMALL_BUFSIZE(adapter); + break; + + case RX_LARGE_MTU_BUF: + buf_size = FL_MTU_LARGE_BUFSIZE(adapter); + break; + + default: + BUG_ON(1); + } + + return buf_size; } /** @@ -418,7 +470,8 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) if (is_buf_mapped(d)) dma_unmap_page(adap->pdev_dev, get_buf_addr(d), - get_buf_size(d), PCI_DMA_FROMDEVICE); + get_buf_size(adap, d), + PCI_DMA_FROMDEVICE); put_page(d->page); d->page = NULL; if (++q->cidx == q->size) @@ -444,7 +497,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) if (is_buf_mapped(d)) dma_unmap_page(adap->pdev_dev, get_buf_addr(d), - get_buf_size(d), PCI_DMA_FROMDEVICE); + get_buf_size(adap, d), PCI_DMA_FROMDEVICE); d->page = NULL; if (++q->cidx == q->size) q->cidx = 0; @@ -485,6 +538,7 @@ static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg, static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) { + struct sge *s = &adap->sge; struct page *pg; dma_addr_t mapping; unsigned int cred = q->avail; @@ -493,25 +547,27 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp |= __GFP_NOWARN | __GFP_COLD; -#if FL_PG_ORDER > 0 + if (s->fl_pg_order == 0) + goto alloc_small_pages; + /* * Prefer large buffers */ while (n) { - pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); + pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order); if (unlikely(!pg)) { q->large_alloc_failed++; break; /* fall back to single pages */ } mapping = dma_map_page(adap->pdev_dev, pg, 0, - PAGE_SIZE << FL_PG_ORDER, + PAGE_SIZE << s->fl_pg_order, PCI_DMA_FROMDEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { - __free_pages(pg, FL_PG_ORDER); + __free_pages(pg, s->fl_pg_order); goto out; /* do not try small pages for this error */ } - mapping |= RX_LARGE_BUF; + mapping |= RX_LARGE_PG_BUF; *d++ = cpu_to_be64(mapping); set_rx_sw_desc(sd, pg, mapping); @@ -525,8 +581,8 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, } n--; } -#endif +alloc_small_pages: while (n--) { pg = __skb_alloc_page(gfp, NULL); if (unlikely(!pg)) { @@ -769,8 +825,8 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) wmb(); /* write descriptors before telling HW */ spin_lock(&q->db_lock); if (!q->db_disabled) { - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - V_QID(q->cntxt_id) | V_PIDX(n)); + t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), + QID(q->cntxt_id) | PIDX(n)); } q->db_pidx = q->pidx; spin_unlock(&q->db_lock); @@ -1519,6 +1575,8 @@ static noinline int handle_trace_pkt(struct adapter *adap, static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, const struct cpl_rx_pkt *pkt) { + struct adapter *adapter = rxq->rspq.adap; + struct sge *s = &adapter->sge; int ret; struct sk_buff *skb; @@ -1529,8 +1587,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, return; } - copy_frags(skb, gl, RX_PKT_PAD); - skb->len = gl->tot_len - RX_PKT_PAD; + copy_frags(skb, gl, s->pktshift); + skb->len = gl->tot_len - s->pktshift; skb->data_len = skb->len; skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1566,6 +1624,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, struct sk_buff *skb; const struct cpl_rx_pkt *pkt; struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + struct sge *s = &q->adap->sge; if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) return handle_trace_pkt(q->adap, si); @@ -1585,7 +1644,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, return 0; } - __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ + __skb_pull(skb, s->pktshift); /* remove ethernet header padding */ skb->protocol = eth_type_trans(skb, q->netdev); skb_record_rx_queue(skb, q->idx); if (skb->dev->features & NETIF_F_RXHASH) @@ -1696,6 +1755,8 @@ static int process_responses(struct sge_rspq *q, int budget) int budget_left = budget; const struct rsp_ctrl *rc; struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + struct adapter *adapter = q->adap; + struct sge *s = &adapter->sge; while (likely(budget_left)) { rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); @@ -1722,7 +1783,7 @@ static int process_responses(struct sge_rspq *q, int budget) /* gather packet fragments */ for (frags = 0, fp = si.frags; ; frags++, fp++) { rsd = &rxq->fl.sdesc[rxq->fl.cidx]; - bufsz = get_buf_size(rsd); + bufsz = get_buf_size(adapter, rsd); fp->page = rsd->page; fp->offset = q->offset; fp->size = min(bufsz, len); @@ -1747,7 +1808,7 @@ static int process_responses(struct sge_rspq *q, int budget) si.nfrags = frags + 1; ret = q->handler(q, q->cur_desc, &si); if (likely(ret == 0)) - q->offset += ALIGN(fp->size, FL_ALIGN); + q->offset += ALIGN(fp->size, s->fl_align); else restore_rx_bufs(&si, &rxq->fl, frags); } else if (likely(rsp_type == RSP_TYPE_CPL)) { @@ -1983,6 +2044,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, { int ret, flsz = 0; struct fw_iq_cmd c; + struct sge *s = &adap->sge; struct port_info *pi = netdev_priv(dev); /* Size needs to be multiple of 16, including status entry. */ @@ -2015,11 +2077,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, fl->size = roundup(fl->size, 8); fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), sizeof(struct rx_sw_desc), &fl->addr, - &fl->sdesc, STAT_LEN, NUMA_NO_NODE); + &fl->sdesc, s->stat_len, NUMA_NO_NODE); if (!fl->desc) goto fl_nomem; - flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); + flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | FW_IQ_CMD_FL0FETCHRO(1) | FW_IQ_CMD_FL0DATARO(1) | @@ -2096,14 +2158,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, { int ret, nentries; struct fw_eq_eth_cmd c; + struct sge *s = &adap->sge; struct port_info *pi = netdev_priv(dev); /* Add status entries */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, sizeof(struct tx_desc), sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, + &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, netdev_queue_numa_node_read(netdevq)); if (!txq->q.desc) return -ENOMEM; @@ -2149,10 +2212,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, { int ret, nentries; struct fw_eq_ctrl_cmd c; + struct sge *s = &adap->sge; struct port_info *pi = netdev_priv(dev); /* Add status entries */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); txq->q.desc = alloc_ring(adap->pdev_dev, nentries, sizeof(struct tx_desc), 0, &txq->q.phys_addr, @@ -2200,14 +2264,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, { int ret, nentries; struct fw_eq_ofld_cmd c; + struct sge *s = &adap->sge; struct port_info *pi = netdev_priv(dev); /* Add status entries */ - nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, sizeof(struct tx_desc), sizeof(struct tx_sw_desc), - &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, + &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, NUMA_NO_NODE); if (!txq->q.desc) return -ENOMEM; @@ -2251,8 +2316,10 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, static void free_txq(struct adapter *adap, struct sge_txq *q) { + struct sge *s = &adap->sge; + dma_free_coherent(adap->pdev_dev, - q->size * sizeof(struct tx_desc) + STAT_LEN, + q->size * sizeof(struct tx_desc) + s->stat_len, q->desc, q->phys_addr); q->cntxt_id = 0; q->sdesc = NULL; @@ -2262,6 +2329,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q) static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl) { + struct sge *s = &adap->sge; unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; @@ -2276,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, if (fl) { free_rx_bufs(adap, fl, fl->avail); - dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, + dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, fl->desc, fl->addr); kfree(fl->sdesc); fl->sdesc = NULL; @@ -2408,18 +2476,112 @@ void t4_sge_stop(struct adapter *adap) * Performs SGE initialization needed every time after a chip reset. * We do not initialize any of the queues here, instead the driver * top-level must request them individually. + * + * Called in two different modes: + * + * 1. Perform actual hardware initialization and record hard-coded + * parameters which were used. This gets used when we're the + * Master PF and the Firmware Configuration File support didn't + * work for some reason. + * + * 2. We're not the Master PF or initialization was performed with + * a Firmware Configuration File. In this case we need to grab + * any of the SGE operating parameters that we need to have in + * order to do our job and make sure we can live with them ... */ -void t4_sge_init(struct adapter *adap) + +static int t4_sge_init_soft(struct adapter *adap) { - unsigned int i, v; struct sge *s = &adap->sge; - unsigned int fl_align_log = ilog2(FL_ALIGN); + u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu; + u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5; + u32 ingress_rx_threshold; - t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | - INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, - INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | - RXPKTCPLMODE | - (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); + /* + * Verify that CPL messages are going to the Ingress Queue for + * process_responses() and that only packet data is going to the + * Free Lists. + */ + if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) != + RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { + dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + /* + * Validate the Host Buffer Register Array indices that we want to + * use ... + * + * XXX Note that we should really read through the Host Buffer Size + * XXX register array and find the indices of the Buffer Sizes which + * XXX meet our needs! + */ + #define READ_FL_BUF(x) \ + t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32)) + + fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF); + fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF); + fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); + fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); + + #undef READ_FL_BUF + + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg || + (fl_large_pg & (fl_large_pg-1)) != 0))) { + dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", + fl_small_pg, fl_large_pg); + return -EINVAL; + } + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; + + if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || + fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { + dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", + fl_small_mtu, fl_large_mtu); + return -EINVAL; + } + + /* + * Retrieve our RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1); + timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3); + timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5); + s->timer_val[0] = core_ticks_to_us(adap, + TIMERVALUE0_GET(timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adap, + TIMERVALUE1_GET(timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adap, + TIMERVALUE2_GET(timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adap, + TIMERVALUE3_GET(timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adap, + TIMERVALUE4_GET(timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adap, + TIMERVALUE5_GET(timer_value_4_and_5)); + + ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD); + s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold); + s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold); + s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold); + s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold); + + return 0; +} + +static int t4_sge_init_hard(struct adapter *adap) +{ + struct sge *s = &adap->sge; + + /* + * Set up our basic SGE mode to deliver CPL messages to our Ingress + * Queue and Packet Date to the Free List. + */ + t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK, + RXPKTCPLMODE_MASK); /* * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows @@ -2433,13 +2595,24 @@ void t4_sge_init(struct adapter *adap) t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, F_ENABLE_DROP); - for (i = v = 0; i < 32; i += 4) - v |= (PAGE_SHIFT - 10) << i; - t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); - t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); -#if FL_PG_ORDER > 0 - t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); -#endif + /* + * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by + * t4_fixup_host_params(). + */ + s->fl_pg_order = FL_PG_ORDER; + if (s->fl_pg_order) + t4_write_reg(adap, + SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32), + PAGE_SIZE << FL_PG_ORDER); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32), + FL_MTU_SMALL_BUFSIZE(adap)); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32), + FL_MTU_LARGE_BUFSIZE(adap)); + + /* + * Note that the SGE Ingress Packet Count Interrupt Threshold and + * Timer Holdoff values must be supplied by our caller. + */ t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, THRESHOLD_0(s->counter_val[0]) | THRESHOLD_1(s->counter_val[1]) | @@ -2449,14 +2622,54 @@ void t4_sge_init(struct adapter *adap) TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, - TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | - TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); + TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) | + TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3]))); t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, - TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | - TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); + TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) | + TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5]))); + + return 0; +} + +int t4_sge_init(struct adapter *adap) +{ + struct sge *s = &adap->sge; + u32 sge_control; + int ret; + + /* + * Ingress Padding Boundary and Egress Status Page Size are set up by + * t4_fixup_host_params(). + */ + sge_control = t4_read_reg(adap, SGE_CONTROL); + s->pktshift = PKTSHIFT_GET(sge_control); + s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; + s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + + X_INGPADBOUNDARY_SHIFT); + + if (adap->flags & USING_SOFT_PARAMS) + ret = t4_sge_init_soft(adap); + else + ret = t4_sge_init_hard(adap); + if (ret < 0) + return ret; + + /* + * A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + s->fl_starve_thres + = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1; + setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ s->idma_state[0] = s->idma_state[1] = 0; spin_lock_init(&s->intrq_lock); + + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index fa947dfa4c30..ab732b378c4f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -120,6 +120,28 @@ static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, } } +/** + * t4_write_indirect - write indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect addresses + * @data_reg: register holding the value for the indirect registers + * @vals: values to write + * @nregs: how many indirect registers to write + * @start_idx: address of first indirect register to write + * + * Writes a sequential block of registers that are accessed indirectly + * through an address/data register pair. + */ +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx++); + t4_write_reg(adap, data_reg, *vals++); + } +} + /* * Get the reply to a mailbox command and store it in @rpl in big-endian order. */ @@ -330,6 +352,143 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) return 0; } +/* + * t4_mem_win_rw - read/write memory through PCIE memory window + * @adap: the adapter + * @addr: address of first byte requested + * @data: MEMWIN0_APERTURE bytes of data containing the requested address + * @dir: direction of transfer 1 => read, 0 => write + * + * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a + * MEMWIN0_APERTURE-byte-aligned address that covers the requested + * address @addr. + */ +static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) +{ + int i; + + /* + * Setup offset into PCIE memory window. Address must be a + * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to + * ensure that changes propagate before we attempt to use the new + * values.) + */ + t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, + addr & ~(MEMWIN0_APERTURE - 1)); + t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); + + /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ + for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) { + if (dir) + *data++ = t4_read_reg(adap, (MEMWIN0_BASE + i)); + else + t4_write_reg(adap, (MEMWIN0_BASE + i), *data++); + } + + return 0; +} + +/** + * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window + * @adap: the adapter + * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC + * @addr: address within indicated memory type + * @len: amount of memory to transfer + * @buf: host memory buffer + * @dir: direction of transfer 1 => read, 0 => write + * + * Reads/writes an [almost] arbitrary memory region in the firmware: the + * firmware memory address, length and host buffer must be aligned on + * 32-bit boudaries. The memory is transferred as a raw byte sequence + * from/to the firmware's memory. If this memory contains data + * structures which contain multi-byte integers, it's the callers + * responsibility to perform appropriate byte order conversions. + */ +static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len, + __be32 *buf, int dir) +{ + u32 pos, start, end, offset, memoffset; + int ret; + + /* + * Argument sanity checks ... + */ + if ((addr & 0x3) || (len & 0x3)) + return -EINVAL; + + /* + * Offset into the region of memory which is being accessed + * MEM_EDC0 = 0 + * MEM_EDC1 = 1 + * MEM_MC = 2 + */ + memoffset = (mtype * (5 * 1024 * 1024)); + + /* Determine the PCIE_MEM_ACCESS_OFFSET */ + addr = addr + memoffset; + + /* + * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes + * at a time so we need to round down the start and round up the end. + * We'll start copying out of the first line at (addr - start) a word + * at a time. + */ + start = addr & ~(MEMWIN0_APERTURE-1); + end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1); + offset = (addr - start)/sizeof(__be32); + + for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) { + __be32 data[MEMWIN0_APERTURE/sizeof(__be32)]; + + /* + * If we're writing, copy the data from the caller's memory + * buffer + */ + if (!dir) { + /* + * If we're doing a partial write, then we need to do + * a read-modify-write ... + */ + if (offset || len < MEMWIN0_APERTURE) { + ret = t4_mem_win_rw(adap, pos, data, 1); + if (ret) + return ret; + } + while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && + len > 0) { + data[offset++] = *buf++; + len -= sizeof(__be32); + } + } + + /* + * Transfer a block of memory and bail if there's an error. + */ + ret = t4_mem_win_rw(adap, pos, data, dir); + if (ret) + return ret; + + /* + * If we're reading, copy the data into the caller's memory + * buffer. + */ + if (dir) + while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) && + len > 0) { + *buf++ = data[offset++]; + len -= sizeof(__be32); + } + } + + return 0; +} + +int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, + __be32 *buf) +{ + return t4_memory_rw(adap, mtype, addr, len, buf, 0); +} + #define EEPROM_STAT_ADDR 0x7bfc #define VPD_BASE 0 #define VPD_LEN 512 @@ -355,8 +514,9 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable) * * Reads card parameters stored in VPD EEPROM. */ -static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) +int get_vpd_params(struct adapter *adapter, struct vpd_params *p) { + u32 cclk_param, cclk_val; int i, ret; int ec, sn; u8 vpd[VPD_LEN], csum; @@ -418,6 +578,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); strim(p->sn); + + /* + * Ask firmware for the Core Clock since it knows how to translate the + * Reference Clock ('V2') VPD field into a Core Clock value ... + */ + cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + ret = t4_query_params(adapter, adapter->mbox, 0, 0, + 1, &cclk_param, &cclk_val); + if (ret) + return ret; + p->cclk = cclk_val; + return 0; } @@ -718,6 +891,77 @@ static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) } /** + * t4_flash_cfg_addr - return the address of the flash configuration file + * @adapter: the adapter + * + * Return the address within the flash where the Firmware Configuration + * File is stored. + */ +unsigned int t4_flash_cfg_addr(struct adapter *adapter) +{ + if (adapter->params.sf_size == 0x100000) + return FLASH_FPGA_CFG_START; + else + return FLASH_CFG_START; +} + +/** + * t4_load_cfg - download config file + * @adap: the adapter + * @cfg_data: the cfg text file to write + * @size: text file size + * + * Write the supplied config text file to the card's serial flash. + */ +int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) +{ + int ret, i, n; + unsigned int addr; + unsigned int flash_cfg_start_sec; + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + + addr = t4_flash_cfg_addr(adap); + flash_cfg_start_sec = addr / SF_SEC_SIZE; + + if (size > FLASH_CFG_MAX_SIZE) { + dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n", + FLASH_CFG_MAX_SIZE); + return -EFBIG; + } + + i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ + sf_sec_size); + ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, + flash_cfg_start_sec + i - 1); + /* + * If size == 0 then we're simply erasing the FLASH sectors associated + * with the on-adapter Firmware Configuration File. + */ + if (ret || size == 0) + goto out; + + /* this will write to the flash up to SF_PAGE_SIZE at a time */ + for (i = 0; i < size; i += SF_PAGE_SIZE) { + if ((size - i) < SF_PAGE_SIZE) + n = size - i; + else + n = SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, n, cfg_data); + if (ret) + goto out; + + addr += SF_PAGE_SIZE; + cfg_data += SF_PAGE_SIZE; + } + +out: + if (ret) + dev_err(adap->pdev_dev, "config file %s failed %d\n", + (size == 0 ? "clear" : "download"), ret); + return ret; +} + +/** * t4_load_fw - download firmware * @adap: the adapter * @fw_data: the firmware image to write @@ -1018,9 +1262,9 @@ static void sge_intr_handler(struct adapter *adapter) { ERR_INVALID_CIDX_INC, "SGE GTS CIDX increment too large", -1, 0 }, { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, - { F_DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, - { F_DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, - { F_ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, + { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, + { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, + { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, "SGE IQID > 1023 received CPL for FL", -1, 0 }, { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, @@ -1520,7 +1764,7 @@ void t4_intr_enable(struct adapter *adapter) ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | - F_DBFIFO_HP_INT | F_DBFIFO_LP_INT | + DBFIFO_HP_INT | DBFIFO_LP_INT | EGRESS_SIZE_ERR); t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); @@ -1717,6 +1961,23 @@ void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) } /** + * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @adap: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val) +{ + t4_write_reg(adap, TP_PIO_ADDR, addr); + val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask; + t4_write_reg(adap, TP_PIO_DATA, val); +} + +/** * init_cong_ctrl - initialize congestion control parameters * @a: the alpha values for congestion control * @b: the beta values for congestion control @@ -2000,9 +2261,9 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, struct fw_ldst_cmd c; memset(&c, 0, sizeof(c)); - c.op_to_addrspace = htonl(V_FW_CMD_OP(FW_LDST_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_WRITE | - V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); + c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | + FW_CMD_WRITE | + FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); c.cycles_to_len16 = htonl(FW_LEN16(c)); c.u.addrval.addr = htonl(addr); c.u.addrval.val = htonl(val); @@ -2033,8 +2294,8 @@ int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) return -EINVAL; - t4_write_reg(adap, A_PCIE_MEM_ACCESS_OFFSET, addr & ~15); - t4_read_reg(adap, A_PCIE_MEM_ACCESS_OFFSET); + t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15); + t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); for (i = 0; i < len; i += 4) *data++ = t4_read_reg(adap, (MEMWIN0_BASE + off + i)); @@ -2102,39 +2363,129 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, } /** - * t4_fw_hello - establish communication with FW - * @adap: the adapter - * @mbox: mailbox to use for the FW command - * @evt_mbox: mailbox to receive async FW events - * @master: specifies the caller's willingness to be the device master - * @state: returns the current device state + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state (if non-NULL) * - * Issues a command to establish communication with FW. + * Issues a command to establish communication with FW. Returns either + * an error (negative integer) or the mailbox of the Master PF. */ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, enum dev_master master, enum dev_state *state) { int ret; struct fw_hello_cmd c; + u32 v; + unsigned int master_mbox; + int retries = FW_CMD_HELLO_RETRIES; +retry: + memset(&c, 0, sizeof(c)); INIT_CMD(c, HELLO, WRITE); c.err_to_mbasyncnot = htonl( FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | - FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) | - FW_HELLO_CMD_MBASYNCNOT(evt_mbox)); + FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : + FW_HELLO_CMD_MBMASTER_MASK) | + FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | + FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | + FW_HELLO_CMD_CLEARINIT); + /* + * Issue the HELLO command to the firmware. If it's not successful + * but indicates that we got a "busy" or "timeout" condition, retry + * the HELLO until we exhaust our retry limit. + */ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); - if (ret == 0 && state) { - u32 v = ntohl(c.err_to_mbasyncnot); - if (v & FW_HELLO_CMD_INIT) - *state = DEV_STATE_INIT; - else if (v & FW_HELLO_CMD_ERR) + if (ret < 0) { + if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) + goto retry; + return ret; + } + + v = ntohl(c.err_to_mbasyncnot); + master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); + if (state) { + if (v & FW_HELLO_CMD_ERR) *state = DEV_STATE_ERR; + else if (v & FW_HELLO_CMD_INIT) + *state = DEV_STATE_INIT; else *state = DEV_STATE_UNINIT; } - return ret; + + /* + * If we're not the Master PF then we need to wait around for the + * Master PF Driver to finish setting up the adapter. + * + * Note that we also do this wait if we're a non-Master-capable PF and + * there is no current Master PF; a Master PF may show up momentarily + * and we wouldn't want to fail pointlessly. (This can happen when an + * OS loads lots of different drivers rapidly at the same time). In + * this case, the Master PF returned by the firmware will be + * FW_PCIE_FW_MASTER_MASK so the test below will work ... + */ + if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && + master_mbox != mbox) { + int waiting = FW_CMD_HELLO_TIMEOUT; + + /* + * Wait for the firmware to either indicate an error or + * initialized state. If we see either of these we bail out + * and report the issue to the caller. If we exhaust the + * "hello timeout" and we haven't exhausted our retries, try + * again. Otherwise bail with a timeout error. + */ + for (;;) { + u32 pcie_fw; + + msleep(50); + waiting -= 50; + + /* + * If neither Error nor Initialialized are indicated + * by the firmware keep waiting till we exaust our + * timeout ... and then retry if we haven't exhausted + * our retries ... + */ + pcie_fw = t4_read_reg(adap, MA_PCIE_FW); + if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { + if (waiting <= 0) { + if (retries-- > 0) + goto retry; + + return -ETIMEDOUT; + } + continue; + } + + /* + * We either have an Error or Initialized condition + * report errors preferentially. + */ + if (state) { + if (pcie_fw & FW_PCIE_FW_ERR) + *state = DEV_STATE_ERR; + else if (pcie_fw & FW_PCIE_FW_INIT) + *state = DEV_STATE_INIT; + } + + /* + * If we arrived before a Master PF was selected and + * there's not a valid Master PF, grab its identity + * for our caller. + */ + if (master_mbox == FW_PCIE_FW_MASTER_MASK && + (pcie_fw & FW_PCIE_FW_MASTER_VLD)) + master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); + break; + } + } + + return master_mbox; } /** @@ -2186,6 +2537,334 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) } /** + * t4_fw_halt - issue a reset/halt to FW and put uP into RESET + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @force: force uP into RESET even if FW RESET command fails + * + * Issues a RESET command to firmware (if desired) with a HALT indication + * and then puts the microprocessor into RESET state. The RESET command + * will only be issued if a legitimate mailbox is provided (mbox <= + * FW_PCIE_FW_MASTER_MASK). + * + * This is generally used in order for the host to safely manipulate the + * adapter without fear of conflicting with whatever the firmware might + * be doing. The only way out of this state is to RESTART the firmware + * ... + */ +int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) +{ + int ret = 0; + + /* + * If a legitimate mailbox is provided, issue a RESET command + * with a HALT indication. + */ + if (mbox <= FW_PCIE_FW_MASTER_MASK) { + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = htonl(PIORST | PIORSTMODE); + c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + } + + /* + * Normally we won't complete the operation if the firmware RESET + * command fails but if our caller insists we'll go ahead and put the + * uP into RESET. This can be useful if the firmware is hung or even + * missing ... We'll have to take the risk of putting the uP into + * RESET without the cooperation of firmware in that case. + * + * We also force the firmware's HALT flag to be on in case we bypassed + * the firmware RESET command above or we're dealing with old firmware + * which doesn't have the HALT capability. This will serve as a flag + * for the incoming firmware to know that it's coming out of a HALT + * rather than a RESET ... if it's new enough to understand that ... + */ + if (ret == 0 || force) { + t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); + t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, + FW_PCIE_FW_HALT); + } + + /* + * And we always return the result of the firmware RESET command + * even when we force the uP into RESET ... + */ + return ret; +} + +/** + * t4_fw_restart - restart the firmware by taking the uP out of RESET + * @adap: the adapter + * @reset: if we want to do a RESET to restart things + * + * Restart firmware previously halted by t4_fw_halt(). On successful + * return the previous PF Master remains as the new PF Master and there + * is no need to issue a new HELLO command, etc. + * + * We do this in two ways: + * + * 1. If we're dealing with newer firmware we'll simply want to take + * the chip's microprocessor out of RESET. This will cause the + * firmware to start up from its start vector. And then we'll loop + * until the firmware indicates it's started again (PCIE_FW.HALT + * reset to 0) or we timeout. + * + * 2. If we're dealing with older firmware then we'll need to RESET + * the chip since older firmware won't recognize the PCIE_FW.HALT + * flag and automatically RESET itself on startup. + */ +int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) +{ + if (reset) { + /* + * Since we're directing the RESET instead of the firmware + * doing it automatically, we need to clear the PCIE_FW.HALT + * bit. + */ + t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); + + /* + * If we've been given a valid mailbox, first try to get the + * firmware to do the RESET. If that works, great and we can + * return success. Otherwise, if we haven't been given a + * valid mailbox or the RESET command failed, fall back to + * hitting the chip with a hammer. + */ + if (mbox <= FW_PCIE_FW_MASTER_MASK) { + t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); + msleep(100); + if (t4_fw_reset(adap, mbox, + PIORST | PIORSTMODE) == 0) + return 0; + } + + t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE); + msleep(2000); + } else { + int ms; + + t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); + for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { + if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) + return 0; + msleep(100); + ms += 100; + } + return -ETIMEDOUT; + } + return 0; +} + +/** + * t4_fw_upgrade - perform all of the steps necessary to upgrade FW + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @fw_data: the firmware image to write + * @size: image size + * @force: force upgrade even if firmware doesn't cooperate + * + * Perform all of the steps necessary for upgrading an adapter's + * firmware image. Normally this requires the cooperation of the + * existing firmware in order to halt all existing activities + * but if an invalid mailbox token is passed in we skip that step + * (though we'll still put the adapter microprocessor into RESET in + * that case). + * + * On successful return the new firmware will have been loaded and + * the adapter will have been fully RESET losing all previous setup + * state. On unsuccessful return the adapter may be completely hosed ... + * positive errno indicates that the adapter is ~probably~ intact, a + * negative errno indicates that things are looking bad ... + */ +int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, + const u8 *fw_data, unsigned int size, int force) +{ + const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; + int reset, ret; + + ret = t4_fw_halt(adap, mbox, force); + if (ret < 0 && !force) + return ret; + + ret = t4_load_fw(adap, fw_data, size); + if (ret < 0) + return ret; + + /* + * Older versions of the firmware don't understand the new + * PCIE_FW.HALT flag and so won't know to perform a RESET when they + * restart. So for newly loaded older firmware we'll have to do the + * RESET for it so it starts up on a clean slate. We can tell if + * the newly loaded firmware will handle this right by checking + * its header flags to see if it advertises the capability. + */ + reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); + return t4_fw_restart(adap, mbox, reset); +} + + +/** + * t4_fw_config_file - setup an adapter via a Configuration File + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @mtype: the memory type where the Configuration File is located + * @maddr: the memory address where the Configuration File is located + * @finiver: return value for CF [fini] version + * @finicsum: return value for CF [fini] checksum + * @cfcsum: return value for CF computed checksum + * + * Issue a command to get the firmware to process the Configuration + * File located at the specified mtype/maddress. If the Configuration + * File is processed successfully and return value pointers are + * provided, the Configuration File "[fini] section version and + * checksum values will be returned along with the computed checksum. + * It's up to the caller to decide how it wants to respond to the + * checksums not matching but it recommended that a prominant warning + * be emitted in order to help people rapidly identify changed or + * corrupted Configuration Files. + * + * Also note that it's possible to modify things like "niccaps", + * "toecaps",etc. between processing the Configuration File and telling + * the firmware to use the new configuration. Callers which want to + * do this will need to "hand-roll" their own CAPS_CONFIGS commands for + * Configuration Files if they want to do this. + */ +int t4_fw_config_file(struct adapter *adap, unsigned int mbox, + unsigned int mtype, unsigned int maddr, + u32 *finiver, u32 *finicsum, u32 *cfcsum) +{ + struct fw_caps_config_cmd caps_cmd; + int ret; + + /* + * Tell the firmware to process the indicated Configuration File. + * If there are no errors and the caller has provided return value + * pointers for the [fini] section version, checksum and computed + * checksum, pass those back to the caller. + */ + memset(&caps_cmd, 0, sizeof(caps_cmd)); + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_READ); + caps_cmd.retval_len16 = + htonl(FW_CAPS_CONFIG_CMD_CFVALID | + FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | + FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | + FW_LEN16(caps_cmd)); + ret = t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), &caps_cmd); + if (ret < 0) + return ret; + + if (finiver) + *finiver = ntohl(caps_cmd.finiver); + if (finicsum) + *finicsum = ntohl(caps_cmd.finicsum); + if (cfcsum) + *cfcsum = ntohl(caps_cmd.cfcsum); + + /* + * And now tell the firmware to use the configuration we just loaded. + */ + caps_cmd.op_to_write = + htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE); + caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); + return t4_wr_mbox(adap, mbox, &caps_cmd, sizeof(caps_cmd), NULL); +} + +/** + * t4_fixup_host_params - fix up host-dependent parameters + * @adap: the adapter + * @page_size: the host's Base Page Size + * @cache_line_size: the host's Cache Line Size + * + * Various registers in T4 contain values which are dependent on the + * host's Base Page and Cache Line Sizes. This function will fix all of + * those registers with the appropriate values as passed in ... + */ +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size) +{ + unsigned int page_shift = fls(page_size) - 1; + unsigned int sge_hps = page_shift - 10; + unsigned int stat_len = cache_line_size > 64 ? 128 : 64; + unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; + unsigned int fl_align_log = fls(fl_align) - 1; + + t4_write_reg(adap, SGE_HOST_PAGE_SIZE, + HOSTPAGESIZEPF0(sge_hps) | + HOSTPAGESIZEPF1(sge_hps) | + HOSTPAGESIZEPF2(sge_hps) | + HOSTPAGESIZEPF3(sge_hps) | + HOSTPAGESIZEPF4(sge_hps) | + HOSTPAGESIZEPF5(sge_hps) | + HOSTPAGESIZEPF6(sge_hps) | + HOSTPAGESIZEPF7(sge_hps)); + + t4_set_reg_field(adap, SGE_CONTROL, + INGPADBOUNDARY(INGPADBOUNDARY_MASK) | + EGRSTATUSPAGESIZE_MASK, + INGPADBOUNDARY(fl_align_log - 5) | + EGRSTATUSPAGESIZE(stat_len != 64)); + + /* + * Adjust various SGE Free List Host Buffer Sizes. + * + * This is something of a crock since we're using fixed indices into + * the array which are also known by the sge.c code and the T4 + * Firmware Configuration File. We need to come up with a much better + * approach to managing this array. For now, the first four entries + * are: + * + * 0: Host Page Size + * 1: 64KB + * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) + * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) + * + * For the single-MTU buffers in unpacked mode we need to include + * space for the SGE Control Packet Shift, 14 byte Ethernet header, + * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet + * Padding boundry. All of these are accommodated in the Factory + * Default Firmware Configuration File but we need to adjust it for + * this host's cache line size. + */ + t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, + (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) + & ~(fl_align-1)); + t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, + (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) + & ~(fl_align-1)); + + t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); + + return 0; +} + +/** + * t4_fw_initialize - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_fw_initialize(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** * t4_query_params - query FW or device parameters * @adap: the adapter * @mbox: mailbox to use for the FW command @@ -2837,10 +3516,6 @@ int __devinit t4_prep_adapter(struct adapter *adapter) return ret; } - ret = get_vpd_params(adapter, &adapter->params.vpd); - if (ret < 0) - return ret; - init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); /* @@ -2848,6 +3523,7 @@ int __devinit t4_prep_adapter(struct adapter *adapter) */ adapter->params.nports = 1; adapter->params.portvec = 1; + adapter->params.vpd.cclk = 50000; return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index c26b455f37de..f534ed7e10e9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -58,6 +58,7 @@ enum { enum { SF_PAGE_SIZE = 256, /* serial flash page size */ + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ }; enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */ @@ -137,4 +138,83 @@ struct rsp_ctrl { #define QINTR_CNT_EN 0x1 #define QINTR_TIMER_IDX(x) ((x) << 1) #define QINTR_TIMER_IDX_GET(x) (((x) >> 1) & 0x7) + +/* + * Flash layout. + */ +#define FLASH_START(start) ((start) * SF_SEC_SIZE) +#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE) + +enum { + /* + * Various Expansion-ROM boot images, etc. + */ + FLASH_EXP_ROM_START_SEC = 0, + FLASH_EXP_ROM_NSECS = 6, + FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC), + FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS), + + /* + * iSCSI Boot Firmware Table (iBFT) and other driver-related + * parameters ... + */ + FLASH_IBFT_START_SEC = 6, + FLASH_IBFT_NSECS = 1, + FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC), + FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS), + + /* + * Boot configuration data. + */ + FLASH_BOOTCFG_START_SEC = 7, + FLASH_BOOTCFG_NSECS = 1, + FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC), + FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS), + + /* + * Location of firmware image in FLASH. + */ + FLASH_FW_START_SEC = 8, + FLASH_FW_NSECS = 8, + FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), + FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), + + /* + * iSCSI persistent/crash information. + */ + FLASH_ISCSI_CRASH_START_SEC = 29, + FLASH_ISCSI_CRASH_NSECS = 1, + FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC), + FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS), + + /* + * FCoE persistent/crash information. + */ + FLASH_FCOE_CRASH_START_SEC = 30, + FLASH_FCOE_CRASH_NSECS = 1, + FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC), + FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS), + + /* + * Location of Firmware Configuration File in FLASH. Since the FPGA + * "FLASH" is smaller we need to store the Configuration File in a + * different location -- which will overlap the end of the firmware + * image if firmware ever gets that large ... + */ + FLASH_CFG_START_SEC = 31, + FLASH_CFG_NSECS = 1, + FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), + FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + + FLASH_FPGA_CFG_START_SEC = 15, + FLASH_FPGA_CFG_START = FLASH_START(FLASH_FPGA_CFG_START_SEC), + + /* + * Sectors 32-63 are reserved for FLASH failover. + */ +}; + +#undef FLASH_START +#undef FLASH_MAX_SIZE + #endif /* __T4_HW_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 111fc323f155..a1a8b57200f6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -86,10 +86,17 @@ #define CIDXINC_SHIFT 0 #define CIDXINC(x) ((x) << CIDXINC_SHIFT) +#define X_RXPKTCPLMODE_SPLIT 1 +#define X_INGPADBOUNDARY_SHIFT 5 + #define SGE_CONTROL 0x1008 #define DCASYSTYPE 0x00080000U -#define RXPKTCPLMODE 0x00040000U -#define EGRSTATUSPAGESIZE 0x00020000U +#define RXPKTCPLMODE_MASK 0x00040000U +#define RXPKTCPLMODE_SHIFT 18 +#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT) +#define EGRSTATUSPAGESIZE_MASK 0x00020000U +#define EGRSTATUSPAGESIZE_SHIFT 17 +#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT) #define PKTSHIFT_MASK 0x00001c00U #define PKTSHIFT_SHIFT 10 #define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) @@ -108,6 +115,35 @@ #define GLOBALENABLE 0x00000001U #define SGE_HOST_PAGE_SIZE 0x100c + +#define HOSTPAGESIZEPF7_MASK 0x0000000fU +#define HOSTPAGESIZEPF7_SHIFT 28 +#define HOSTPAGESIZEPF7(x) ((x) << HOSTPAGESIZEPF7_SHIFT) + +#define HOSTPAGESIZEPF6_MASK 0x0000000fU +#define HOSTPAGESIZEPF6_SHIFT 24 +#define HOSTPAGESIZEPF6(x) ((x) << HOSTPAGESIZEPF6_SHIFT) + +#define HOSTPAGESIZEPF5_MASK 0x0000000fU +#define HOSTPAGESIZEPF5_SHIFT 20 +#define HOSTPAGESIZEPF5(x) ((x) << HOSTPAGESIZEPF5_SHIFT) + +#define HOSTPAGESIZEPF4_MASK 0x0000000fU +#define HOSTPAGESIZEPF4_SHIFT 16 +#define HOSTPAGESIZEPF4(x) ((x) << HOSTPAGESIZEPF4_SHIFT) + +#define HOSTPAGESIZEPF3_MASK 0x0000000fU +#define HOSTPAGESIZEPF3_SHIFT 12 +#define HOSTPAGESIZEPF3(x) ((x) << HOSTPAGESIZEPF3_SHIFT) + +#define HOSTPAGESIZEPF2_MASK 0x0000000fU +#define HOSTPAGESIZEPF2_SHIFT 8 +#define HOSTPAGESIZEPF2(x) ((x) << HOSTPAGESIZEPF2_SHIFT) + +#define HOSTPAGESIZEPF1_MASK 0x0000000fU +#define HOSTPAGESIZEPF1_SHIFT 4 +#define HOSTPAGESIZEPF1(x) ((x) << HOSTPAGESIZEPF1_SHIFT) + #define HOSTPAGESIZEPF0_MASK 0x0000000fU #define HOSTPAGESIZEPF0_SHIFT 0 #define HOSTPAGESIZEPF0(x) ((x) << HOSTPAGESIZEPF0_SHIFT) @@ -155,6 +191,8 @@ #define SGE_INT_ENABLE3 0x1040 #define SGE_FL_BUFFER_SIZE0 0x1044 #define SGE_FL_BUFFER_SIZE1 0x1048 +#define SGE_FL_BUFFER_SIZE2 0x104c +#define SGE_FL_BUFFER_SIZE3 0x1050 #define SGE_INGRESS_RX_THRESHOLD 0x10a0 #define THRESHOLD_0_MASK 0x3f000000U #define THRESHOLD_0_SHIFT 24 @@ -173,6 +211,12 @@ #define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) #define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) +#define SGE_CONM_CTRL 0x1094 +#define EGRTHRESHOLD_MASK 0x00003f00U +#define EGRTHRESHOLDshift 8 +#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift) +#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift) + #define SGE_TIMER_VALUE_0_AND_1 0x10b8 #define TIMERVALUE0_MASK 0xffff0000U #define TIMERVALUE0_SHIFT 16 @@ -184,64 +228,54 @@ #define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) #define SGE_TIMER_VALUE_2_AND_3 0x10bc +#define TIMERVALUE2_MASK 0xffff0000U +#define TIMERVALUE2_SHIFT 16 +#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT) +#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT) +#define TIMERVALUE3_MASK 0x0000ffffU +#define TIMERVALUE3_SHIFT 0 +#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT) +#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT) + #define SGE_TIMER_VALUE_4_AND_5 0x10c0 +#define TIMERVALUE4_MASK 0xffff0000U +#define TIMERVALUE4_SHIFT 16 +#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT) +#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT) +#define TIMERVALUE5_MASK 0x0000ffffU +#define TIMERVALUE5_SHIFT 0 +#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT) +#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT) + #define SGE_DEBUG_INDEX 0x10cc #define SGE_DEBUG_DATA_HIGH 0x10d0 #define SGE_DEBUG_DATA_LOW 0x10d4 #define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 -#define S_LP_INT_THRESH 12 -#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) #define S_HP_INT_THRESH 28 +#define M_HP_INT_THRESH 0xfU #define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) +#define M_HP_COUNT 0x7ffU +#define S_HP_COUNT 16 +#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) +#define S_LP_INT_THRESH 12 +#define M_LP_INT_THRESH 0xfU +#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) +#define M_LP_COUNT 0x7ffU +#define S_LP_COUNT 0 +#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) #define A_SGE_DBFIFO_STATUS 0x10a4 #define S_ENABLE_DROP 13 #define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) #define F_ENABLE_DROP V_ENABLE_DROP(1U) -#define A_SGE_DOORBELL_CONTROL 0x10a8 - -#define A_SGE_CTXT_CMD 0x11fc -#define A_SGE_DBQ_CTXT_BADDR 0x1084 - -#define A_SGE_PF_KDOORBELL 0x0 - -#define S_QID 15 -#define V_QID(x) ((x) << S_QID) - -#define S_PIDX 0 -#define V_PIDX(x) ((x) << S_PIDX) - -#define M_LP_COUNT 0x7ffU -#define S_LP_COUNT 0 -#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) - -#define M_HP_COUNT 0x7ffU -#define S_HP_COUNT 16 -#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) - -#define A_SGE_INT_ENABLE3 0x1040 - -#define S_DBFIFO_HP_INT 8 -#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT) -#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U) - -#define S_DBFIFO_LP_INT 7 -#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT) -#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U) - #define S_DROPPED_DB 0 #define V_DROPPED_DB(x) ((x) << S_DROPPED_DB) #define F_DROPPED_DB V_DROPPED_DB(1U) +#define A_SGE_DOORBELL_CONTROL 0x10a8 -#define S_ERR_DROPPED_DB 18 -#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) -#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U) - -#define A_PCIE_MEM_ACCESS_OFFSET 0x306c - -#define M_HP_INT_THRESH 0xfU -#define M_LP_INT_THRESH 0xfU +#define A_SGE_CTXT_CMD 0x11fc +#define A_SGE_DBQ_CTXT_BADDR 0x1084 #define PCIE_PF_CLI 0x44 #define PCIE_INT_CAUSE 0x3004 @@ -287,6 +321,8 @@ #define WINDOW(x) ((x) << WINDOW_SHIFT) #define PCIE_MEM_ACCESS_OFFSET 0x306c +#define PCIE_FW 0x30b8 + #define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908 #define RNPP 0x80000000U #define RPCP 0x20000000U @@ -364,7 +400,7 @@ #define MEM_WRAP_CLIENT_NUM_MASK 0x0000000fU #define MEM_WRAP_CLIENT_NUM_SHIFT 0 #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) - +#define MA_PCIE_FW 0x30b8 #define MA_PARITY_ERROR_STATUS 0x77f4 #define EDC_0_BASE_ADDR 0x7900 @@ -385,6 +421,7 @@ #define CIM_BOOT_CFG 0x7b00 #define BOOTADDR_MASK 0xffffff00U +#define UPCRST 0x1U #define CIM_PF_MAILBOX_DATA 0x240 #define CIM_PF_MAILBOX_CTRL 0x280 @@ -457,6 +494,13 @@ #define VLANEXTENABLE_MASK 0x0000f000U #define VLANEXTENABLE_SHIFT 12 +#define TP_GLOBAL_CONFIG 0x7d08 +#define FIVETUPLELOOKUP_SHIFT 17 +#define FIVETUPLELOOKUP_MASK 0x00060000U +#define FIVETUPLELOOKUP(x) ((x) << FIVETUPLELOOKUP_SHIFT) +#define FIVETUPLELOOKUP_GET(x) (((x) & FIVETUPLELOOKUP_MASK) >> \ + FIVETUPLELOOKUP_SHIFT) + #define TP_PARA_REG2 0x7d68 #define MAXRXDATA_MASK 0xffff0000U #define MAXRXDATA_SHIFT 16 @@ -466,8 +510,47 @@ #define TIMERRESOLUTION_MASK 0x00ff0000U #define TIMERRESOLUTION_SHIFT 16 #define TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT) +#define DELAYEDACKRESOLUTION_MASK 0x000000ffU +#define DELAYEDACKRESOLUTION_SHIFT 0 +#define DELAYEDACKRESOLUTION_GET(x) \ + (((x) & DELAYEDACKRESOLUTION_MASK) >> DELAYEDACKRESOLUTION_SHIFT) #define TP_SHIFT_CNT 0x7dc0 +#define SYNSHIFTMAX_SHIFT 24 +#define SYNSHIFTMAX_MASK 0xff000000U +#define SYNSHIFTMAX(x) ((x) << SYNSHIFTMAX_SHIFT) +#define SYNSHIFTMAX_GET(x) (((x) & SYNSHIFTMAX_MASK) >> \ + SYNSHIFTMAX_SHIFT) +#define RXTSHIFTMAXR1_SHIFT 20 +#define RXTSHIFTMAXR1_MASK 0x00f00000U +#define RXTSHIFTMAXR1(x) ((x) << RXTSHIFTMAXR1_SHIFT) +#define RXTSHIFTMAXR1_GET(x) (((x) & RXTSHIFTMAXR1_MASK) >> \ + RXTSHIFTMAXR1_SHIFT) +#define RXTSHIFTMAXR2_SHIFT 16 +#define RXTSHIFTMAXR2_MASK 0x000f0000U +#define RXTSHIFTMAXR2(x) ((x) << RXTSHIFTMAXR2_SHIFT) +#define RXTSHIFTMAXR2_GET(x) (((x) & RXTSHIFTMAXR2_MASK) >> \ + RXTSHIFTMAXR2_SHIFT) +#define PERSHIFTBACKOFFMAX_SHIFT 12 +#define PERSHIFTBACKOFFMAX_MASK 0x0000f000U +#define PERSHIFTBACKOFFMAX(x) ((x) << PERSHIFTBACKOFFMAX_SHIFT) +#define PERSHIFTBACKOFFMAX_GET(x) (((x) & PERSHIFTBACKOFFMAX_MASK) >> \ + PERSHIFTBACKOFFMAX_SHIFT) +#define PERSHIFTMAX_SHIFT 8 +#define PERSHIFTMAX_MASK 0x00000f00U +#define PERSHIFTMAX(x) ((x) << PERSHIFTMAX_SHIFT) +#define PERSHIFTMAX_GET(x) (((x) & PERSHIFTMAX_MASK) >> \ + PERSHIFTMAX_SHIFT) +#define KEEPALIVEMAXR1_SHIFT 4 +#define KEEPALIVEMAXR1_MASK 0x000000f0U +#define KEEPALIVEMAXR1(x) ((x) << KEEPALIVEMAXR1_SHIFT) +#define KEEPALIVEMAXR1_GET(x) (((x) & KEEPALIVEMAXR1_MASK) >> \ + KEEPALIVEMAXR1_SHIFT) +#define KEEPALIVEMAXR2_SHIFT 0 +#define KEEPALIVEMAXR2_MASK 0x0000000fU +#define KEEPALIVEMAXR2(x) ((x) << KEEPALIVEMAXR2_SHIFT) +#define KEEPALIVEMAXR2_GET(x) (((x) & KEEPALIVEMAXR2_MASK) >> \ + KEEPALIVEMAXR2_SHIFT) #define TP_CCTRL_TABLE 0x7ddc #define TP_MTU_TABLE 0x7de4 @@ -501,6 +584,20 @@ #define TP_INT_CAUSE 0x7e74 #define FLMTXFLSTEMPTY 0x40000000U +#define TP_VLAN_PRI_MAP 0x140 +#define FRAGMENTATION_SHIFT 9 +#define FRAGMENTATION_MASK 0x00000200U +#define MPSHITTYPE_MASK 0x00000100U +#define MACMATCH_MASK 0x00000080U +#define ETHERTYPE_MASK 0x00000040U +#define PROTOCOL_MASK 0x00000020U +#define TOS_MASK 0x00000010U +#define VLAN_MASK 0x00000008U +#define VNIC_ID_MASK 0x00000004U +#define PORT_MASK 0x00000002U +#define FCOE_SHIFT 0 +#define FCOE_MASK 0x00000001U + #define TP_INGRESS_CONFIG 0x141 #define VNIC 0x00000800U #define CSUM_HAS_PSEUDO_HDR 0x00000400U diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index ad53f796b574..a6364632b490 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -79,6 +79,8 @@ struct fw_wr_hdr { #define FW_WR_FLOWID(x) ((x) << 8) #define FW_WR_LEN16(x) ((x) << 0) +#define HW_TPL_FR_MT_PR_IV_P_FC 0X32B + struct fw_ulptx_wr { __be32 op_to_compl; __be32 flowid_len16; @@ -155,6 +157,17 @@ struct fw_eth_tx_pkt_vm_wr { #define FW_CMD_MAX_TIMEOUT 3000 +/* + * If a host driver does a HELLO and discovers that there's already a MASTER + * selected, we may have to wait for that MASTER to finish issuing RESET, + * configuration and INITIALIZE commands. Also, there's a possibility that + * our own HELLO may get lost if it happens right as the MASTER is issuign a + * RESET command, so we need to be willing to make a few retries of our HELLO. + */ +#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT) +#define FW_CMD_HELLO_RETRIES 3 + + enum fw_cmd_opcodes { FW_LDST_CMD = 0x01, FW_RESET_CMD = 0x03, @@ -304,7 +317,17 @@ struct fw_reset_cmd { __be32 op_to_write; __be32 retval_len16; __be32 val; - __be32 r3; + __be32 halt_pkd; +}; + +#define FW_RESET_CMD_HALT_SHIFT 31 +#define FW_RESET_CMD_HALT_MASK 0x1 +#define FW_RESET_CMD_HALT(x) ((x) << FW_RESET_CMD_HALT_SHIFT) +#define FW_RESET_CMD_HALT_GET(x) \ + (((x) >> FW_RESET_CMD_HALT_SHIFT) & FW_RESET_CMD_HALT_MASK) + +enum fw_hellow_cmd { + fw_hello_cmd_stage_os = 0x0 }; struct fw_hello_cmd { @@ -315,8 +338,14 @@ struct fw_hello_cmd { #define FW_HELLO_CMD_INIT (1U << 30) #define FW_HELLO_CMD_MASTERDIS(x) ((x) << 29) #define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28) -#define FW_HELLO_CMD_MBMASTER(x) ((x) << 24) +#define FW_HELLO_CMD_MBMASTER_MASK 0xfU +#define FW_HELLO_CMD_MBMASTER_SHIFT 24 +#define FW_HELLO_CMD_MBMASTER(x) ((x) << FW_HELLO_CMD_MBMASTER_SHIFT) +#define FW_HELLO_CMD_MBMASTER_GET(x) \ + (((x) >> FW_HELLO_CMD_MBMASTER_SHIFT) & FW_HELLO_CMD_MBMASTER_MASK) #define FW_HELLO_CMD_MBASYNCNOT(x) ((x) << 20) +#define FW_HELLO_CMD_STAGE(x) ((x) << 17) +#define FW_HELLO_CMD_CLEARINIT (1U << 16) __be32 fwrev; }; @@ -401,6 +430,14 @@ enum fw_caps_config_fcoe { FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, }; +enum fw_memtype_cf { + FW_MEMTYPE_CF_EDC0 = 0x0, + FW_MEMTYPE_CF_EDC1 = 0x1, + FW_MEMTYPE_CF_EXTMEM = 0x2, + FW_MEMTYPE_CF_FLASH = 0x4, + FW_MEMTYPE_CF_INTERNAL = 0x5, +}; + struct fw_caps_config_cmd { __be32 op_to_write; __be32 retval_len16; @@ -416,10 +453,15 @@ struct fw_caps_config_cmd { __be16 r4; __be16 iscsicaps; __be16 fcoecaps; - __be32 r5; - __be64 r6; + __be32 cfcsum; + __be32 finiver; + __be32 finicsum; }; +#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27) +#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24) +#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16) + /* * params command mnemonics */ @@ -451,6 +493,7 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A, FW_PARAMS_PARAM_DEV_FWREV = 0x0B, FW_PARAMS_PARAM_DEV_TPREV = 0x0C, + FW_PARAMS_PARAM_DEV_CF = 0x0D, }; /* @@ -492,6 +535,8 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, + FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D, + FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E }; /* @@ -507,8 +552,16 @@ enum fw_params_param_dmaq { #define FW_PARAMS_MNEM(x) ((x) << 24) #define FW_PARAMS_PARAM_X(x) ((x) << 16) -#define FW_PARAMS_PARAM_Y(x) ((x) << 8) -#define FW_PARAMS_PARAM_Z(x) ((x) << 0) +#define FW_PARAMS_PARAM_Y_SHIFT 8 +#define FW_PARAMS_PARAM_Y_MASK 0xffU +#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT) +#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\ + FW_PARAMS_PARAM_Y_MASK) +#define FW_PARAMS_PARAM_Z_SHIFT 0 +#define FW_PARAMS_PARAM_Z_MASK 0xffu +#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT) +#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\ + FW_PARAMS_PARAM_Z_MASK) #define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) #define FW_PARAMS_PARAM_YZ(x) ((x) << 0) @@ -1599,6 +1652,16 @@ struct fw_debug_cmd { } u; }; +#define FW_PCIE_FW_ERR (1U << 31) +#define FW_PCIE_FW_INIT (1U << 30) +#define FW_PCIE_FW_HALT (1U << 29) +#define FW_PCIE_FW_MASTER_VLD (1U << 15) +#define FW_PCIE_FW_MASTER_MASK 0x7 +#define FW_PCIE_FW_MASTER_SHIFT 12 +#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) +#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ + FW_PCIE_FW_MASTER_MASK) + struct fw_hdr { u8 ver; u8 reserved1; @@ -1613,7 +1676,11 @@ struct fw_hdr { u8 intfver_iscsi; u8 intfver_fcoe; u8 reserved2; - __be32 reserved3[27]; + __u32 reserved3; + __u32 reserved4; + __u32 reserved5; + __be32 flags; + __be32 reserved6[23]; }; #define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff) @@ -1621,18 +1688,8 @@ struct fw_hdr { #define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff) #define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff) -#define S_FW_CMD_OP 24 -#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) - -#define S_FW_CMD_REQUEST 23 -#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST) -#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U) - -#define S_FW_CMD_WRITE 21 -#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE) -#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U) - -#define S_FW_LDST_CMD_ADDRSPACE 0 -#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE) +enum fw_hdr_flags { + FW_HDR_FLAGS_RESET_HALT = 0x00000001, +}; #endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 8877fbfefb63..f16745f4b36b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2421,7 +2421,7 @@ int t4vf_sge_init(struct adapter *adapter) fl0, fl1); return -EINVAL; } - if ((sge_params->sge_control & RXPKTCPLMODE) == 0) { + if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) { dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); return -EINVAL; } @@ -2431,7 +2431,8 @@ int t4vf_sge_init(struct adapter *adapter) */ if (fl1) FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; - STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64); + STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) + ? 128 : 64); PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + SGE_INGPADBOUNDARY_SHIFT); diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 845b2020f291..138446957786 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; + u16 cfg; spin_lock_irqsave(&lp->lock, flags); if (dev->flags & IFF_PROMISC) @@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev) /* in promiscuous mode, we accept errored packets, * so we have to enable interrupts on them also */ - writereg(dev, PP_RxCFG, - (lp->curr_rx_cfg | - (lp->rx_mode == RX_ALL_ACCEPT) - ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) - : 0)); + cfg = lp->curr_rx_cfg; + if (lp->rx_mode == RX_ALL_ACCEPT) + cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL; + writereg(dev, PP_RxCFG, cfg); spin_unlock_irqrestore(&lp->lock, flags); } diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index d266c86a53f7..5b622993ff17 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -110,6 +110,7 @@ static inline char *nic_name(struct pci_dev *pdev) #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) +#define MAX_VFS 30 /* Max VFs supported by BE3 FW */ #define FW_VER_LEN 32 struct be_dma_mem { diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fac97b4bb59..701b3e9a715b 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -120,7 +120,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter, if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { dev_warn(&adapter->pdev->dev, - "opcode %d-%d is not permitted\n", + "VF is not privileged to issue opcode %d-%d\n", opcode, subsystem); } else { extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & @@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter) int num = 0, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - spin_lock_bh(&adapter->mcc_cq_lock); + spin_lock(&adapter->mcc_cq_lock); while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { /* Interpret flags as an async trailer */ @@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter) if (num) be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); - spin_unlock_bh(&adapter->mcc_cq_lock); + spin_unlock(&adapter->mcc_cq_lock); return status; } @@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (be_error(adapter)) return -EIO; + local_bh_disable(); status = be_process_mcc(adapter); + local_bh_enable(); if (atomic_read(&mcc_obj->q.used) == 0) break; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c60de89b6669..84379f4fe837 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -20,6 +20,7 @@ #include "be.h" #include "be_cmds.h" #include <asm/div64.h> +#include <linux/aer.h> MODULE_VERSION(DRV_VER); MODULE_DEVICE_TABLE(pci, be_dev_ids); @@ -1948,7 +1949,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) if (adapter->num_rx_qs != MAX_RX_QS) dev_info(&adapter->pdev->dev, - "Created only %d receive queues", adapter->num_rx_qs); + "Created only %d receive queues\n", adapter->num_rx_qs); return 0; } @@ -2176,8 +2177,7 @@ static uint be_num_rss_want(struct be_adapter *adapter) { u32 num = 0; if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && - !sriov_want(adapter) && be_physfn(adapter) && - !be_is_mc(adapter)) { + !sriov_want(adapter) && be_physfn(adapter)) { num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); } @@ -2646,8 +2646,8 @@ static int be_vf_setup(struct be_adapter *adapter) } for_all_vfs(adapter, vf_cfg, vf) { - status = be_cmd_link_status_query(adapter, NULL, &lnk_speed, - NULL, vf + 1); + lnk_speed = 1000; + status = be_cmd_set_qos(adapter, lnk_speed, vf + 1); if (status) goto err; vf_cfg->tx_rate = lnk_speed * 10; @@ -2724,6 +2724,8 @@ static int be_get_config(struct be_adapter *adapter) if (pos) { pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF, &dev_num_vfs); + if (!lancer_chip(adapter)) + dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS); adapter->dev_num_vfs = dev_num_vfs; } return 0; @@ -3437,6 +3439,7 @@ static void be_ctrl_cleanup(struct be_adapter *adapter) if (mem->va) dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va, mem->dma); + kfree(adapter->pmac_id); } static int be_ctrl_init(struct be_adapter *adapter) @@ -3473,6 +3476,12 @@ static int be_ctrl_init(struct be_adapter *adapter) } memset(rx_filter->va, 0, rx_filter->size); + /* primary mac needs 1 pmac entry */ + adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, + sizeof(*adapter->pmac_id), GFP_KERNEL); + if (!adapter->pmac_id) + return -ENOMEM; + mutex_init(&adapter->mbox_lock); spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); @@ -3543,6 +3552,8 @@ static void __devexit be_remove(struct pci_dev *pdev) be_ctrl_cleanup(adapter); + pci_disable_pcie_error_reporting(pdev); + pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); @@ -3609,12 +3620,6 @@ static int be_get_initial_config(struct be_adapter *adapter) else adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT; - /* primary mac needs 1 pmac entry */ - adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1, - sizeof(u32), GFP_KERNEL); - if (!adapter->pmac_id) - return -ENOMEM; - status = be_cmd_get_cntl_attributes(adapter); if (status) return status; @@ -3763,7 +3768,9 @@ static void be_worker(struct work_struct *work) /* when interrupts are not yet enabled, just reap any pending * mcc completions */ if (!netif_running(adapter->netdev)) { + local_bh_disable(); be_process_mcc(adapter); + local_bh_enable(); goto reschedule; } @@ -3842,6 +3849,10 @@ static int __devinit be_probe(struct pci_dev *pdev, } } + status = pci_enable_pcie_error_reporting(pdev); + if (status) + dev_err(&pdev->dev, "Could not use PCIe error reporting\n"); + status = be_ctrl_init(adapter); if (status) goto free_netdev; @@ -4064,6 +4075,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev) if (status) return PCI_ERS_RESULT_DISCONNECT; + pci_cleanup_aer_uncorrect_error_status(pdev); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 3574e1499dfc..feff51664dcf 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -62,6 +62,13 @@ config FSL_PQ_MDIO ---help--- This driver supports the MDIO bus used by the gianfar and UCC drivers. +config FSL_XGMAC_MDIO + tristate "Freescale XGMAC MDIO" + depends on FSL_SOC + select PHYLIB + ---help--- + This driver supports the MDIO bus on the Fman 10G Ethernet MACs. + config UCC_GETH tristate "Freescale QE Gigabit Ethernet" depends on QUICC_ENGINE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 1752488c9ee5..3d1839afff65 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -9,6 +9,7 @@ ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) endif obj-$(CONFIG_FS_ENET) += fs_enet/ obj-$(CONFIG_FSL_PQ_MDIO) += fsl_pq_mdio.o +obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o gianfar_driver-objs := gianfar.o \ diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 0f2d1a710909..151453309401 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -174,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 55bb867258e6..cdf702a59485 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -137,8 +137,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); fec->fecp = ioremap(res.start, resource_size(&res)); - if (!fec->fecp) + if (!fec->fecp) { + ret = -ENOMEM; goto out_fec; + } if (get_bus_freq) { clock = get_bus_freq(ofdev->dev.of_node); @@ -172,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 9527b28d70d1..c93a05654b46 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -19,54 +19,90 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> -#include <linux/unistd.h> #include <linux/slab.h> -#include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/skbuff.h> -#include <linux/spinlock.h> -#include <linux/mm.h> #include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/crc32.h> #include <linux/mii.h> -#include <linux/phy.h> -#include <linux/of.h> #include <linux/of_address.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> +#include <linux/of_device.h> #include <asm/io.h> -#include <asm/irq.h> -#include <asm/uaccess.h> -#include <asm/ucc.h> +#include <asm/ucc.h> /* for ucc_set_qe_mux_mii_mng() */ #include "gianfar.h" -#include "fsl_pq_mdio.h" + +#define MIIMIND_BUSY 0x00000001 +#define MIIMIND_NOTVALID 0x00000004 +#define MIIMCFG_INIT_VALUE 0x00000007 +#define MIIMCFG_RESET 0x80000000 + +#define MII_READ_COMMAND 0x00000001 + +struct fsl_pq_mii { + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ +}; + +struct fsl_pq_mdio { + u8 res1[16]; + u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/ + u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/ + u8 res2[4]; + u32 emapm; /* MDIO Event mapping register (for etsec2)*/ + u8 res3[1280]; + struct fsl_pq_mii mii; + u8 res4[28]; + u32 utbipar; /* TBI phy address reg (only on UCC) */ + u8 res5[2728]; +} __packed; /* Number of microseconds to wait for an MII register to respond */ #define MII_TIMEOUT 1000 struct fsl_pq_mdio_priv { void __iomem *map; - struct fsl_pq_mdio __iomem *regs; + struct fsl_pq_mii __iomem *regs; + int irqs[PHY_MAX_ADDR]; +}; + +/* + * Per-device-type data. Each type of device tree node that we support gets + * one of these. + * + * @mii_offset: the offset of the MII registers within the memory map of the + * node. Some nodes define only the MII registers, and some define the whole + * MAC (which includes the MII registers). + * + * @get_tbipa: determines the address of the TBIPA register + * + * @ucc_configure: a special function for extra QE configuration + */ +struct fsl_pq_mdio_data { + unsigned int mii_offset; /* offset of the MII registers */ + uint32_t __iomem * (*get_tbipa)(void __iomem *p); + void (*ucc_configure)(phys_addr_t start, phys_addr_t end); }; /* - * Write value to the PHY at mii_id at register regnum, - * on the bus attached to the local interface, which may be different from the - * generic mdio bus (tied to a single interface), waiting until the write is - * done before returning. This is helpful in programming interfaces like - * the TBI which control interfaces like onchip SERDES and are always tied to - * the local mdio pins, which may not be the same as system mdio bus, used for + * Write value to the PHY at mii_id at register regnum, on the bus attached + * to the local interface, which may be different from the generic mdio bus + * (tied to a single interface), waiting until the write is done before + * returning. This is helpful in programming interfaces like the TBI which + * control interfaces like onchip SERDES and are always tied to the local + * mdio pins, which may not be the same as system mdio bus, used for * controlling the external PHYs, for example. */ -int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, - int regnum, u16 value) +static int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, + u16 value) { + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; u32 status; /* Set the PHY address and the register address we want to write */ @@ -83,20 +119,21 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, } /* - * Read the bus for PHY at addr mii_id, register regnum, and - * return the value. Clears miimcom first. All PHY operation - * done on the bus attached to the local interface, - * which may be different from the generic mdio bus - * This is helpful in programming interfaces like - * the TBI which, in turn, control interfaces like onchip SERDES - * and are always tied to the local mdio pins, which may not be the + * Read the bus for PHY at addr mii_id, register regnum, and return the value. + * Clears miimcom first. + * + * All PHY operation done on the bus attached to the local interface, which + * may be different from the generic mdio bus. This is helpful in programming + * interfaces like the TBI which, in turn, control interfaces like on-chip + * SERDES and are always tied to the local mdio pins, which may not be the * same as system mdio bus, used for controlling the external PHYs, for eg. */ -int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, - int mii_id, int regnum) +static int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { - u16 value; + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; u32 status; + u16 value; /* Set the PHY address and the register address we want to read */ out_be32(®s->miimadd, (mii_id << 8) | regnum); @@ -115,44 +152,15 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, /* Grab the value of the register from miimstat */ value = in_be32(®s->miimstat); + dev_dbg(&bus->dev, "read %04x from address %x/%x\n", value, mii_id, regnum); return value; } -static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus) -{ - struct fsl_pq_mdio_priv *priv = bus->priv; - - return priv->regs; -} - -/* - * Write value to the PHY at mii_id at register regnum, - * on the bus, waiting until the write is done before returning. - */ -int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) -{ - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); - - /* Write to the local MII regs */ - return fsl_pq_local_mdio_write(regs, mii_id, regnum, value); -} - -/* - * Read the bus for PHY at addr mii_id, register regnum, and - * return the value. Clears miimcom first. - */ -int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) -{ - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); - - /* Read the local MII regs */ - return fsl_pq_local_mdio_read(regs, mii_id, regnum); -} - /* Reset the MIIM registers, and wait for the bus to free */ static int fsl_pq_mdio_reset(struct mii_bus *bus) { - struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus); + struct fsl_pq_mdio_priv *priv = bus->priv; + struct fsl_pq_mii __iomem *regs = priv->regs; u32 status; mutex_lock(&bus->mdio_lock); @@ -170,234 +178,291 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus) mutex_unlock(&bus->mdio_lock); if (!status) { - printk(KERN_ERR "%s: The MII Bus is stuck!\n", - bus->name); + dev_err(&bus->dev, "timeout waiting for MII bus\n"); return -EBUSY; } return 0; } -void fsl_pq_mdio_bus_name(char *name, struct device_node *np) +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) +/* + * This is mildly evil, but so is our hardware for doing this. + * Also, we have to cast back to struct gfar because of + * definition weirdness done in gianfar.h. + */ +static uint32_t __iomem *get_gfar_tbipa(void __iomem *p) { - const u32 *addr; - u64 taddr = OF_BAD_ADDR; - - addr = of_get_address(np, 0, NULL, NULL); - if (addr) - taddr = of_translate_address(np, addr); + struct gfar __iomem *enet_regs = p; - snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name, - (unsigned long long)taddr); + return &enet_regs->tbipa; } -EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name); +/* + * Return the TBIPAR address for an eTSEC2 node + */ +static uint32_t __iomem *get_etsec_tbipa(void __iomem *p) +{ + return p; +} +#endif -static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) +/* + * Return the TBIPAR address for a QE MDIO node + */ +static uint32_t __iomem *get_ucc_tbipa(void __iomem *p) { -#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) - struct gfar __iomem *enet_regs; + struct fsl_pq_mdio __iomem *mdio = p; - /* - * This is mildly evil, but so is our hardware for doing this. - * Also, we have to cast back to struct gfar because of - * definition weirdness done in gianfar.h. - */ - if(of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "gianfar")) { - enet_regs = (struct gfar __iomem *)regs; - return &enet_regs->tbipa; - } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || - of_device_is_compatible(np, "fsl,etsec2-tbi")) { - return of_iomap(np, 1); - } -#endif - return NULL; + return &mdio->utbipar; } - -static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id) +/* + * Find the UCC node that controls the given MDIO node + * + * For some reason, the QE MDIO nodes are not children of the UCC devices + * that control them. Therefore, we need to scan all UCC nodes looking for + * the one that encompases the given MDIO node. We do this by comparing + * physical addresses. The 'start' and 'end' addresses of the MDIO node are + * passed, and the correct UCC node will cover the entire address range. + * + * This assumes that there is only one QE MDIO node in the entire device tree. + */ +static void ucc_configure(phys_addr_t start, phys_addr_t end) { -#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) + static bool found_mii_master; struct device_node *np = NULL; - int err = 0; - for_each_compatible_node(np, NULL, "ucc_geth") { - struct resource tempres; + if (found_mii_master) + return; - err = of_address_to_resource(np, 0, &tempres); - if (err) + for_each_compatible_node(np, NULL, "ucc_geth") { + struct resource res; + const uint32_t *iprop; + uint32_t id; + int ret; + + ret = of_address_to_resource(np, 0, &res); + if (ret < 0) { + pr_debug("fsl-pq-mdio: no address range in node %s\n", + np->full_name); continue; + } /* if our mdio regs fall within this UCC regs range */ - if ((start >= tempres.start) && (end <= tempres.end)) { - /* Find the id of the UCC */ - const u32 *id; - - id = of_get_property(np, "cell-index", NULL); - if (!id) { - id = of_get_property(np, "device-id", NULL); - if (!id) - continue; + if ((start < res.start) || (end > res.end)) + continue; + + iprop = of_get_property(np, "cell-index", NULL); + if (!iprop) { + iprop = of_get_property(np, "device-id", NULL); + if (!iprop) { + pr_debug("fsl-pq-mdio: no UCC ID in node %s\n", + np->full_name); + continue; } + } - *ucc_id = *id; + id = be32_to_cpup(iprop); - return 0; + /* + * cell-index and device-id for QE nodes are + * numbered from 1, not 0. + */ + if (ucc_set_qe_mux_mii_mng(id - 1) < 0) { + pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n", + np->full_name); + continue; } + + pr_debug("fsl-pq-mdio: setting node UCC%u to MII master\n", id); + found_mii_master = true; } +} - if (err) - return err; - else - return -EINVAL; -#else - return -ENODEV; #endif -} -static int fsl_pq_mdio_probe(struct platform_device *ofdev) +static struct of_device_id fsl_pq_mdio_match[] = { +#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) + { + .compatible = "fsl,gianfar-tbi", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .compatible = "fsl,gianfar-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .type = "mdio", + .compatible = "gianfar", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_gfar_tbipa, + }, + }, + { + .compatible = "fsl,etsec2-tbi", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_etsec_tbipa, + }, + }, + { + .compatible = "fsl,etsec2-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = offsetof(struct fsl_pq_mdio, mii), + .get_tbipa = get_etsec_tbipa, + }, + }, +#endif +#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE) + { + .compatible = "fsl,ucc-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_ucc_tbipa, + .ucc_configure = ucc_configure, + }, + }, + { + /* Legacy UCC MDIO node */ + .type = "mdio", + .compatible = "ucc_geth_phy", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + .get_tbipa = get_ucc_tbipa, + .ucc_configure = ucc_configure, + }, + }, +#endif + /* No Kconfig option for Fman support yet */ + { + .compatible = "fsl,fman-mdio", + .data = &(struct fsl_pq_mdio_data) { + .mii_offset = 0, + /* Fman TBI operations are handled elsewhere */ + }, + }, + + {}, +}; +MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); + +static int fsl_pq_mdio_probe(struct platform_device *pdev) { - struct device_node *np = ofdev->dev.of_node; + const struct of_device_id *id = + of_match_device(fsl_pq_mdio_match, &pdev->dev); + const struct fsl_pq_mdio_data *data = id->data; + struct device_node *np = pdev->dev.of_node; + struct resource res; struct device_node *tbi; struct fsl_pq_mdio_priv *priv; - struct fsl_pq_mdio __iomem *regs = NULL; - void __iomem *map; - u32 __iomem *tbipa; struct mii_bus *new_bus; - int tbiaddr = -1; - const u32 *addrp; - u64 addr = 0, size = 0; int err; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; + dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); - new_bus = mdiobus_alloc(); - if (!new_bus) { - err = -ENOMEM; - goto err_free_priv; - } + new_bus = mdiobus_alloc_size(sizeof(*priv)); + if (!new_bus) + return -ENOMEM; + priv = new_bus->priv; new_bus->name = "Freescale PowerQUICC MII Bus", - new_bus->read = &fsl_pq_mdio_read, - new_bus->write = &fsl_pq_mdio_write, - new_bus->reset = &fsl_pq_mdio_reset, - new_bus->priv = priv; - fsl_pq_mdio_bus_name(new_bus->id, np); - - addrp = of_get_address(np, 0, &size, NULL); - if (!addrp) { - err = -EINVAL; - goto err_free_bus; + new_bus->read = &fsl_pq_mdio_read; + new_bus->write = &fsl_pq_mdio_write; + new_bus->reset = &fsl_pq_mdio_reset; + new_bus->irq = priv->irqs; + + err = of_address_to_resource(np, 0, &res); + if (err < 0) { + dev_err(&pdev->dev, "could not obtain address information\n"); + goto error; } - /* Set the PHY base address */ - addr = of_translate_address(np, addrp); - if (addr == OF_BAD_ADDR) { - err = -EINVAL; - goto err_free_bus; - } + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name, + (unsigned long long)res.start); - map = ioremap(addr, size); - if (!map) { + priv->map = of_iomap(np, 0); + if (!priv->map) { err = -ENOMEM; - goto err_free_bus; + goto error; } - priv->map = map; - - if (of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "fsl,ucc-mdio") || - of_device_is_compatible(np, "ucc_geth_phy")) - map -= offsetof(struct fsl_pq_mdio, miimcfg); - regs = map; - priv->regs = regs; - - new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); - if (NULL == new_bus->irq) { - err = -ENOMEM; - goto err_unmap_regs; + /* + * Some device tree nodes represent only the MII registers, and + * others represent the MAC and MII registers. The 'mii_offset' field + * contains the offset of the MII registers inside the mapped register + * space. + */ + if (data->mii_offset > resource_size(&res)) { + dev_err(&pdev->dev, "invalid register map\n"); + err = -EINVAL; + goto error; } + priv->regs = priv->map + data->mii_offset; - new_bus->parent = &ofdev->dev; - dev_set_drvdata(&ofdev->dev, new_bus); - - if (of_device_is_compatible(np, "fsl,gianfar-mdio") || - of_device_is_compatible(np, "fsl,gianfar-tbi") || - of_device_is_compatible(np, "fsl,etsec2-mdio") || - of_device_is_compatible(np, "fsl,etsec2-tbi") || - of_device_is_compatible(np, "gianfar")) { - tbipa = get_gfar_tbipa(regs, np); - if (!tbipa) { - err = -EINVAL; - goto err_free_irqs; - } - } else if (of_device_is_compatible(np, "fsl,ucc-mdio") || - of_device_is_compatible(np, "ucc_geth_phy")) { - u32 id; - static u32 mii_mng_master; - - tbipa = ®s->utbipar; - - if ((err = get_ucc_id_for_range(addr, addr + size, &id))) - goto err_free_irqs; + new_bus->parent = &pdev->dev; + dev_set_drvdata(&pdev->dev, new_bus); - if (!mii_mng_master) { - mii_mng_master = id; - ucc_set_qe_mux_mii_mng(id - 1); + if (data->get_tbipa) { + for_each_child_of_node(np, tbi) { + if (strcmp(tbi->type, "tbi-phy") == 0) { + dev_dbg(&pdev->dev, "found TBI PHY node %s\n", + strrchr(tbi->full_name, '/') + 1); + break; + } } - } else { - err = -ENODEV; - goto err_free_irqs; - } - for_each_child_of_node(np, tbi) { - if (!strncmp(tbi->type, "tbi-phy", 8)) - break; - } + if (tbi) { + const u32 *prop = of_get_property(tbi, "reg", NULL); + uint32_t __iomem *tbipa; - if (tbi) { - const u32 *prop = of_get_property(tbi, "reg", NULL); + if (!prop) { + dev_err(&pdev->dev, + "missing 'reg' property in node %s\n", + tbi->full_name); + err = -EBUSY; + goto error; + } - if (prop) - tbiaddr = *prop; + tbipa = data->get_tbipa(priv->map); - if (tbiaddr == -1) { - err = -EBUSY; - goto err_free_irqs; - } else { - out_be32(tbipa, tbiaddr); + out_be32(tbipa, be32_to_cpup(prop)); } } + if (data->ucc_configure) + data->ucc_configure(res.start, res.end); + err = of_mdiobus_register(new_bus, np); if (err) { - printk (KERN_ERR "%s: Cannot register as MDIO bus\n", - new_bus->name); - goto err_free_irqs; + dev_err(&pdev->dev, "cannot register %s as MDIO bus\n", + new_bus->name); + goto error; } return 0; -err_free_irqs: - kfree(new_bus->irq); -err_unmap_regs: - iounmap(priv->map); -err_free_bus: +error: + if (priv->map) + iounmap(priv->map); + kfree(new_bus); -err_free_priv: - kfree(priv); + return err; } -static int fsl_pq_mdio_remove(struct platform_device *ofdev) +static int fsl_pq_mdio_remove(struct platform_device *pdev) { - struct device *device = &ofdev->dev; + struct device *device = &pdev->dev; struct mii_bus *bus = dev_get_drvdata(device); struct fsl_pq_mdio_priv *priv = bus->priv; @@ -406,41 +471,11 @@ static int fsl_pq_mdio_remove(struct platform_device *ofdev) dev_set_drvdata(device, NULL); iounmap(priv->map); - bus->priv = NULL; mdiobus_free(bus); - kfree(priv); return 0; } -static struct of_device_id fsl_pq_mdio_match[] = { - { - .type = "mdio", - .compatible = "ucc_geth_phy", - }, - { - .type = "mdio", - .compatible = "gianfar", - }, - { - .compatible = "fsl,ucc-mdio", - }, - { - .compatible = "fsl,gianfar-tbi", - }, - { - .compatible = "fsl,gianfar-mdio", - }, - { - .compatible = "fsl,etsec2-tbi", - }, - { - .compatible = "fsl,etsec2-mdio", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); - static struct platform_driver fsl_pq_mdio_driver = { .driver = { .name = "fsl-pq_mdio", diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.h b/drivers/net/ethernet/freescale/fsl_pq_mdio.h deleted file mode 100644 index bd17a2a0139b..000000000000 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Freescale PowerQUICC MDIO Driver -- MII Management Bus Implementation - * Driver for the MDIO bus controller on Freescale PowerQUICC processors - * - * Author: Andy Fleming - * Modifier: Sandeep Gopalpet - * - * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ -#ifndef __FSL_PQ_MDIO_H -#define __FSL_PQ_MDIO_H - -#define MIIMIND_BUSY 0x00000001 -#define MIIMIND_NOTVALID 0x00000004 -#define MIIMCFG_INIT_VALUE 0x00000007 -#define MIIMCFG_RESET 0x80000000 - -#define MII_READ_COMMAND 0x00000001 - -struct fsl_pq_mdio { - u8 res1[16]; - u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/ - u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/ - u8 res2[4]; - u32 emapm; /* MDIO Event mapping register (for etsec2)*/ - u8 res3[1280]; - u32 miimcfg; /* MII management configuration reg */ - u32 miimcom; /* MII management command reg */ - u32 miimadd; /* MII management address reg */ - u32 miimcon; /* MII management control reg */ - u32 miimstat; /* MII management status reg */ - u32 miimind; /* MII management indication reg */ - u8 reserved[28]; /* Space holder */ - u32 utbipar; /* TBI phy address reg (only on UCC) */ - u8 res4[2728]; -} __packed; - -int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); -int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); -int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, - int regnum, u16 value); -int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs, int mii_id, int regnum); -int __init fsl_pq_mdio_init(void); -void fsl_pq_mdio_exit(void); -void fsl_pq_mdio_bus_name(char *name, struct device_node *np); -#endif /* FSL_PQ_MDIO_H */ diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 4605f7246687..a1b52ec3b930 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -100,7 +100,6 @@ #include <linux/of_net.h> #include "gianfar.h" -#include "fsl_pq_mdio.h" #define TX_TIMEOUT (1*HZ) @@ -395,7 +394,13 @@ static void gfar_init_mac(struct net_device *ndev) if (ndev->features & NETIF_F_IP_CSUM) tctrl |= TCTRL_INIT_CSUM; - tctrl |= TCTRL_TXSCHED_PRIO; + if (priv->prio_sched_en) + tctrl |= TCTRL_TXSCHED_PRIO; + else { + tctrl |= TCTRL_TXSCHED_WRRS; + gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT); + gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT); + } gfar_write(®s->tctrl, tctrl); @@ -1041,7 +1046,7 @@ static int gfar_probe(struct platform_device *ofdev) if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_RX; } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { @@ -1161,6 +1166,9 @@ static int gfar_probe(struct platform_device *ofdev) priv->rx_filer_enable = 1; /* Enable most messages by default */ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; + /* use pritority h/w tx queue scheduling for single queue devices */ + if (priv->num_tx_queues == 1) + priv->prio_sched_en = 1; /* Carrier starts down, phylib will bring it up */ netif_carrier_off(dev); diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index 2136c7ff5e6d..4141ef2ddafc 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -301,8 +301,16 @@ extern const char gfar_driver_version[]; #define TCTRL_TFCPAUSE 0x00000008 #define TCTRL_TXSCHED_MASK 0x00000006 #define TCTRL_TXSCHED_INIT 0x00000000 +/* priority scheduling */ #define TCTRL_TXSCHED_PRIO 0x00000002 +/* weighted round-robin scheduling (WRRS) */ #define TCTRL_TXSCHED_WRRS 0x00000004 +/* default WRRS weight and policy setting, + * tailored to the tr03wt and tr47wt registers: + * equal weight for all Tx Qs, measured in 64byte units + */ +#define DEFAULT_WRRS_WEIGHT 0x18181818 + #define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) #define IEVENT_INIT_CLEAR 0xffffffff @@ -1098,7 +1106,8 @@ struct gfar_private { extended_hash:1, bd_stash_en:1, rx_filer_enable:1, - wol_en:1; /* Wake-on-LAN enabled */ + wol_en:1, /* Wake-on-LAN enabled */ + prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */ unsigned short padding; /* PHY stuff */ diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index c08e5d40fecb..18762a3ccce4 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -510,7 +510,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) spin_unlock_irqrestore(&etsects->lock, flags); - etsects->clock = ptp_clock_register(&etsects->caps); + etsects->clock = ptp_clock_register(&etsects->caps, &dev->dev); if (IS_ERR(etsects->clock)) { err = PTR_ERR(etsects->clock); goto no_clock; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 21c6574c5f15..164288439220 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -42,7 +42,6 @@ #include <asm/machdep.h> #include "ucc_geth.h" -#include "fsl_pq_mdio.h" #undef DEBUG diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c new file mode 100644 index 000000000000..1afb5ea2a984 --- /dev/null +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -0,0 +1,274 @@ +/* + * QorIQ 10G MDIO Controller + * + * Copyright 2012 Freescale Semiconductor, Inc. + * + * Authors: Andy Fleming <afleming@freescale.com> + * Timur Tabi <timur@freescale.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/phy.h> +#include <linux/mdio.h> +#include <linux/of_platform.h> +#include <linux/of_mdio.h> + +/* Number of microseconds to wait for a register to respond */ +#define TIMEOUT 1000 + +struct tgec_mdio_controller { + __be32 reserved[12]; + __be32 mdio_stat; /* MDIO configuration and status */ + __be32 mdio_ctl; /* MDIO control */ + __be32 mdio_data; /* MDIO data */ + __be32 mdio_addr; /* MDIO address */ +} __packed; + +#define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) +#define MDIO_STAT_BSY (1 << 0) +#define MDIO_STAT_RD_ER (1 << 1) +#define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) +#define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) +#define MDIO_CTL_PRE_DIS (1 << 10) +#define MDIO_CTL_SCAN_EN (1 << 11) +#define MDIO_CTL_POST_INC (1 << 14) +#define MDIO_CTL_READ (1 << 15) + +#define MDIO_DATA(x) (x & 0xffff) +#define MDIO_DATA_BSY (1 << 31) + +/* + * Wait untill the MDIO bus is free + */ +static int xgmac_wait_until_free(struct device *dev, + struct tgec_mdio_controller __iomem *regs) +{ + uint32_t status; + + /* Wait till the bus is free */ + status = spin_event_timeout( + !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); + if (!status) { + dev_err(dev, "timeout waiting for bus to be free\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Wait till the MDIO read or write operation is complete + */ +static int xgmac_wait_until_done(struct device *dev, + struct tgec_mdio_controller __iomem *regs) +{ + uint32_t status; + + /* Wait till the MDIO write is complete */ + status = spin_event_timeout( + !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); + if (!status) { + dev_err(dev, "timeout waiting for operation to complete\n"); + return -ETIMEDOUT; + } + + return 0; +} + +/* + * Write value to the PHY for this device to the register at regnum,waiting + * until the write is done before it returns. All PHY configuration has to be + * done through the TSEC1 MIIM regs. + */ +static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + uint16_t dev_addr = regnum >> 16; + int ret; + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Set the port and dev addr */ + out_be32(®s->mdio_ctl, + MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); + + /* Set the register address */ + out_be32(®s->mdio_addr, regnum & 0xffff); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Write the value to the register */ + out_be32(®s->mdio_data, MDIO_DATA(value)); + + ret = xgmac_wait_until_done(&bus->dev, regs); + if (ret) + return ret; + + return 0; +} + +/* + * Reads from register regnum in the PHY for device dev, returning the value. + * Clears miimcom first. All PHY configuration has to be done through the + * TSEC1 MIIM regs. + */ +static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + uint16_t dev_addr = regnum >> 16; + uint32_t mdio_ctl; + uint16_t value; + int ret; + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Set the Port and Device Addrs */ + mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); + out_be32(®s->mdio_ctl, mdio_ctl); + + /* Set the register address */ + out_be32(®s->mdio_addr, regnum & 0xffff); + + ret = xgmac_wait_until_free(&bus->dev, regs); + if (ret) + return ret; + + /* Initiate the read */ + out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); + + ret = xgmac_wait_until_done(&bus->dev, regs); + if (ret) + return ret; + + /* Return all Fs if nothing was there */ + if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) { + dev_err(&bus->dev, "MDIO read error\n"); + return 0xffff; + } + + value = in_be32(®s->mdio_data) & 0xffff; + dev_dbg(&bus->dev, "read %04x\n", value); + + return value; +} + +/* Reset the MIIM registers, and wait for the bus to free */ +static int xgmac_mdio_reset(struct mii_bus *bus) +{ + struct tgec_mdio_controller __iomem *regs = bus->priv; + int ret; + + mutex_lock(&bus->mdio_lock); + + /* Setup the MII Mgmt clock speed */ + out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); + + ret = xgmac_wait_until_free(&bus->dev, regs); + + mutex_unlock(&bus->mdio_lock); + + return ret; +} + +static int __devinit xgmac_mdio_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mii_bus *bus; + struct resource res; + int ret; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(&pdev->dev, "could not obtain address\n"); + return ret; + } + + bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); + if (!bus) + return -ENOMEM; + + bus->name = "Freescale XGMAC MDIO Bus"; + bus->read = xgmac_mdio_read; + bus->write = xgmac_mdio_write; + bus->reset = xgmac_mdio_reset; + bus->irq = bus->priv; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); + + /* Set the PHY base address */ + bus->priv = of_iomap(np, 0); + if (!bus->priv) { + ret = -ENOMEM; + goto err_ioremap; + } + + ret = of_mdiobus_register(bus, np); + if (ret) { + dev_err(&pdev->dev, "cannot register MDIO bus\n"); + goto err_registration; + } + + dev_set_drvdata(&pdev->dev, bus); + + return 0; + +err_registration: + iounmap(bus->priv); + +err_ioremap: + mdiobus_free(bus); + + return ret; +} + +static int __devexit xgmac_mdio_remove(struct platform_device *pdev) +{ + struct mii_bus *bus = dev_get_drvdata(&pdev->dev); + + mdiobus_unregister(bus); + iounmap(bus->priv); + mdiobus_free(bus); + + return 0; +} + +static struct of_device_id xgmac_mdio_match[] = { + { + .compatible = "fsl,fman-xmdio", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgmac_mdio_match); + +static struct platform_driver xgmac_mdio_driver = { + .driver = { + .name = "fsl-fman_xmdio", + .of_match_table = xgmac_mdio_match, + }, + .probe = xgmac_mdio_probe, + .remove = xgmac_mdio_remove, +}; + +module_platform_driver(xgmac_mdio_driver); + +MODULE_DESCRIPTION("Freescale QorIQ 10G MDIO Controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/i825xx/Kconfig b/drivers/net/ethernet/i825xx/Kconfig index fed5080a6b62..959faf7388e2 100644 --- a/drivers/net/ethernet/i825xx/Kconfig +++ b/drivers/net/ethernet/i825xx/Kconfig @@ -150,7 +150,7 @@ config SUN3_82586 config ZNET tristate "Zenith Z-Note support (EXPERIMENTAL)" - depends on EXPERIMENTAL && ISA_DMA_API + depends on EXPERIMENTAL && ISA_DMA_API && X86 ---help--- The Zenith Z-Note notebook computer has a built-in network (Ethernet) card, and this is the Linux driver for it. Note that the diff --git a/drivers/net/ethernet/i825xx/znet.c b/drivers/net/ethernet/i825xx/znet.c index bd1f1ef91e19..c9479e081b8a 100644 --- a/drivers/net/ethernet/i825xx/znet.c +++ b/drivers/net/ethernet/i825xx/znet.c @@ -139,8 +139,11 @@ struct znet_private { /* Only one can be built-in;-> */ static struct net_device *znet_dev; +#define NETIDBLK_MAGIC "NETIDBLK" +#define NETIDBLK_MAGIC_SIZE 8 + struct netidblk { - char magic[8]; /* The magic number (string) "NETIDBLK" */ + char magic[NETIDBLK_MAGIC_SIZE]; /* The magic number (string) "NETIDBLK" */ unsigned char netid[8]; /* The physical station address */ char nettype, globalopt; char vendor[8]; /* The machine vendor and product name. */ @@ -373,14 +376,16 @@ static int __init znet_probe (void) struct znet_private *znet; struct net_device *dev; char *p; + char *plast = phys_to_virt(0x100000 - NETIDBLK_MAGIC_SIZE); int err = -ENOMEM; /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */ - for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++) - if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0) + for(p = (char *)phys_to_virt(0xf0000); p <= plast; p++) + if (*p == 'N' && + strncmp(p, NETIDBLK_MAGIC, NETIDBLK_MAGIC_SIZE) == 0) break; - if (p >= (char *)phys_to_virt(0x100000)) { + if (p > plast) { if (znet_debug > 1) printk(KERN_INFO "No Z-Note ethernet adaptor found.\n"); return -ENODEV; @@ -860,14 +865,14 @@ static void hardware_init(struct net_device *dev) disable_dma(znet->rx_dma); /* reset by an interrupting task. */ clear_dma_ff(znet->rx_dma); set_dma_mode(znet->rx_dma, DMA_RX_MODE); - set_dma_addr(znet->rx_dma, (unsigned int) znet->rx_start); + set_dma_addr(znet->rx_dma, isa_virt_to_bus(znet->rx_start)); set_dma_count(znet->rx_dma, RX_BUF_SIZE); enable_dma(znet->rx_dma); /* Now set up the Tx channel. */ disable_dma(znet->tx_dma); clear_dma_ff(znet->tx_dma); set_dma_mode(znet->tx_dma, DMA_TX_MODE); - set_dma_addr(znet->tx_dma, (unsigned int) znet->tx_start); + set_dma_addr(znet->tx_dma, isa_virt_to_bus(znet->tx_start)); set_dma_count(znet->tx_dma, znet->tx_buf_len<<1); enable_dma(znet->tx_dma); release_dma_lock(flags); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 9010cea68bc3..b68d28a130e6 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if (adapter->rx_queue.queue_addr != NULL) { - if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { - dma_unmap_single(dev, - adapter->rx_queue.queue_dma, - adapter->rx_queue.queue_len, - DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = DMA_ERROR_CODE; - } - kfree(adapter->rx_queue.queue_addr); + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); adapter->rx_queue.queue_addr = NULL; } @@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev) goto err_out; } + dev = &adapter->vdev->dev; + adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; - adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, - GFP_KERNEL); + adapter->rx_queue.queue_addr = + dma_alloc_coherent(dev, adapter->rx_queue.queue_len, + &adapter->rx_queue.queue_dma, GFP_KERNEL); if (!adapter->rx_queue.queue_addr) { netdev_err(netdev, "unable to allocate rx queue pages\n"); @@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev) goto err_out; } - dev = &adapter->vdev->dev; - adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = dma_map_single(dev, - adapter->rx_queue.queue_addr, - adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || - (dma_mapping_error(dev, adapter->filter_list_dma)) || - (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { + (dma_mapping_error(dev, adapter->filter_list_dma))) { netdev_err(netdev, "unable to map filter or buffer list " "pages\n"); rc = -ENOMEM; diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 736a7d987db5..9089d00f1421 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -174,6 +174,20 @@ static int e1000_get_settings(struct net_device *netdev, ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : + ETH_TP_MDI); + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->mdix; return 0; } @@ -183,6 +197,22 @@ static int e1000_set_settings(struct net_device *netdev, struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + /* + * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) msleep(1); @@ -199,12 +229,21 @@ static int e1000_set_settings(struct net_device *netdev, ecmd->advertising = hw->autoneg_advertised; } else { u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { clear_bit(__E1000_RESETTING, &adapter->flags); return -EINVAL; } } + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = ecmd->eth_tp_mdix_ctrl; + } + /* reset the link */ if (netif_running(adapter->netdev)) { diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 3bfbb8df8989..3a8368e42ede 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -2014,6 +2014,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter, e1000_unmap_and_free_tx_resource(adapter, buffer_info); } + netdev_reset_queue(adapter->netdev); size = sizeof(struct e1000_buffer) * tx_ring->count; memset(tx_ring->buffer_info, 0, size); @@ -3262,6 +3263,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, nr_frags, mss); if (count) { + netdev_sent_queue(netdev, skb->len); skb_tx_timestamp(skb); e1000_tx_queue(adapter, tx_ring, tx_flags, count); @@ -3849,6 +3851,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, unsigned int i, eop; unsigned int count = 0; unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int bytes_compl = 0, pkts_compl = 0; i = tx_ring->next_to_clean; eop = tx_ring->buffer_info[i].next_to_watch; @@ -3866,6 +3869,11 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, if (cleaned) { total_tx_packets += buffer_info->segs; total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + } e1000_unmap_and_free_tx_resource(adapter, buffer_info); tx_desc->upper.data = 0; @@ -3879,6 +3887,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, tx_ring->next_to_clean = i; + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + #define TX_WAKE_THRESHOLD 32 if (unlikely(count && netif_carrier_ok(netdev) && E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { @@ -4939,6 +4949,10 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + return 0; err_inval: diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 0b3bade957fd..c98586408005 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -653,7 +653,7 @@ static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) **/ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) { - u16 data = er32(POEMB); + u32 data = er32(POEMB); if (active) data |= E1000_PHY_CTRL_D0A_LPLU; @@ -677,7 +677,7 @@ static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) **/ static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) { - u16 data = er32(POEMB); + u32 data = er32(POEMB); if (!active) { data &= ~E1000_PHY_CTRL_NOND0A_LPLU; @@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { - u32 ctrl, ctrl_ext, eecd; + u32 ctrl, ctrl_ext, eecd, tctl; s32 ret_val; /* @@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ew32(IMC, 0xffffffff); ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); e1e_flush(); usleep_range(10000, 20000); @@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. - * If the partner code word is null, stop forcing - * and restart auto negotiation. */ - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + if (rxcw & E1000_RXCW_C) { /* Enable autoneg, and unforce link up */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index cd153326c3cf..cb3356c9af80 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -310,6 +310,7 @@ struct e1000_adapter { */ struct e1000_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; + u32 tx_fifo_limit; struct napi_struct napi; diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 0349e2478df8..c11ac2756667 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -199,6 +199,11 @@ static int e1000_get_settings(struct net_device *netdev, else ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + return 0; } @@ -241,6 +246,10 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + return 0; err_inval: @@ -264,6 +273,22 @@ static int e1000_set_settings(struct net_device *netdev, return -EINVAL; } + /* + * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) usleep_range(1000, 2000); @@ -282,20 +307,32 @@ static int e1000_set_settings(struct net_device *netdev, hw->fc.requested_mode = e1000_fc_default; } else { u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { clear_bit(__E1000_RESETTING, &adapter->state); return -EINVAL; } } + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* + * fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + /* reset the link */ if (netif_running(adapter->netdev)) { e1000e_down(adapter); e1000e_up(adapter); - } else { + } else e1000e_reset(adapter); - } clear_bit(__E1000_RESETTING, &adapter->state); return 0; @@ -1905,7 +1942,8 @@ static int e1000_set_coalesce(struct net_device *netdev, return -EINVAL; if (ec->rx_coalesce_usecs == 4) { - adapter->itr = adapter->itr_setting = 4; + adapter->itr_setting = 4; + adapter->itr = adapter->itr_setting; } else if (ec->rx_coalesce_usecs <= 3) { adapter->itr = 20000; adapter->itr_setting = ec->rx_coalesce_usecs; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 95b245310f17..121990cab144 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -56,7 +56,7 @@ #define DRV_EXTRAVERSION "-k" -#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION +#define DRV_VERSION "2.1.4" DRV_EXTRAVERSION char e1000e_driver_name[] = "e1000e"; const char e1000e_driver_version[] = DRV_VERSION; @@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); } +static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, + struct e1000_buffer *bi) +{ + int i; + struct e1000_ps_page *ps_page; + + for (i = 0; i < adapter->rx_ps_pages; i++) { + ps_page = &bi->ps_pages[i]; + + if (ps_page->page) { + pr_info("packet dump for ps_page %d:\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, page_address(ps_page->page), + PAGE_SIZE, true); + } + } +} + /* * e1000e_dump - Print registers, Tx-ring and Rx-ring */ @@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) (unsigned long long)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); + 16, 1, buffer_info->skb->data, + buffer_info->skb->len, true); } /* Print Rx Ring Summary */ @@ -381,10 +399,8 @@ rx_ring_summary: buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(buffer_info->dma), - adapter->rx_ps_bsize0, true); + e1000e_dump_ps_pages(adapter, + buffer_info); } } break; @@ -444,12 +460,12 @@ rx_ring_summary: (unsigned long long)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) + if (netif_msg_pktdata(adapter) && + buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt - (buffer_info->dma), + buffer_info->skb->data, adapter->rx_buffer_len, true); } @@ -3430,7 +3446,7 @@ void e1000e_reset(struct e1000_adapter *adapter) /* * if short on Rx space, Rx wins and must trump Tx - * adjustment or use Early Receive if available + * adjustment */ if (pba < min_rx_space) pba = min_rx_space; @@ -3501,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter) } /* + * Alignment of Tx data is on an arbitrary byte boundary with the + * maximum size per Tx descriptor limited only to the transmit + * allocation of the packet buffer minus 96 bytes with an upper + * limit of 24KB due to receive synchronization limitations. + */ + adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, + 24 << 10); + + /* * Disable Adaptive Interrupt Moderation if 2 full packets cannot * fit in receive buffer. */ @@ -3730,6 +3755,10 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data) e_dbg("icr is %08X\n", icr); if (icr & E1000_ICR_RXSEQ) { adapter->flags &= ~FLAG_MSI_TEST_FAILED; + /* + * Force memory writes to complete before acknowledging the + * interrupt is handled. + */ wmb(); } @@ -3771,6 +3800,10 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) goto msi_test_failed; } + /* + * Force memory writes to complete before enabling and firing an + * interrupt. + */ wmb(); e1000_irq_enable(adapter); @@ -3782,7 +3815,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) e1000_irq_disable(adapter); - rmb(); + rmb(); /* read flags after interrupt has been fired */ if (adapter->flags & FLAG_MSI_TEST_FAILED) { adapter->int_mode = E1000E_INT_MODE_LEGACY; @@ -4645,7 +4678,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) struct e1000_buffer *buffer_info; unsigned int i; u32 cmd_length = 0; - u16 ipcse = 0, tucse, mss; + u16 ipcse = 0, mss; u8 ipcss, ipcso, tucss, tucso, hdr_len; if (!skb_is_gso(skb)) @@ -4679,7 +4712,6 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; tucss = skb_transport_offset(skb); tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; - tucse = 0; cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); @@ -4693,7 +4725,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); context_desc->upper_setup.tcp_fields.tucss = tucss; context_desc->upper_setup.tcp_fields.tucso = tucso; - context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->upper_setup.tcp_fields.tucse = 0; context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->cmd_and_length = cpu_to_le32(cmd_length); @@ -4769,12 +4801,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) return 1; } -#define E1000_MAX_PER_TXD 8192 -#define E1000_MAX_TXD_PWR 12 - static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, - unsigned int nr_frags, unsigned int mss) + unsigned int nr_frags) { struct e1000_adapter *adapter = tx_ring->adapter; struct pci_dev *pdev = adapter->pdev; @@ -5007,20 +5036,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) { + BUG_ON(size > tx_ring->count); + if (e1000_desc_unused(tx_ring) >= size) return 0; return __e1000_maybe_stop_tx(tx_ring, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int first; - unsigned int max_per_txd = E1000_MAX_PER_TXD; - unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; unsigned int len = skb_headlen(skb); unsigned int nr_frags; @@ -5040,18 +5068,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, } mss = skb_shinfo(skb)->gso_size; - /* - * The controller does a simple calculation to - * make sure there is enough room in the FIFO before - * initiating the DMA for each buffer. The calc is: - * 4 = ceil(buffer len/mss). To make sure we don't - * overrun the FIFO, adjust the max buffer len if mss - * drops. - */ if (mss) { u8 hdr_len; - max_per_txd = min(mss << 2, max_per_txd); - max_txd_pwr = fls(max_per_txd) - 1; /* * TSO Workaround for 82571/2/3 Controllers -- if skb->data @@ -5081,12 +5099,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, count++; count++; - count += TXD_USE_COUNT(len, max_txd_pwr); + count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) - count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), - max_txd_pwr); + count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), + adapter->tx_fifo_limit); if (adapter->hw.mac.tx_pkt_filtering) e1000_transfer_dhcp_info(adapter, skb); @@ -5128,15 +5146,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tx_flags |= E1000_TX_FLAGS_NO_FCS; /* if count is 0 then mapping error has occurred */ - count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); + count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, + nr_frags); if (count) { skb_tx_timestamp(skb); netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); - + e1000_maybe_stop_tx(tx_ring, + (MAX_SKB_FRAGS * + DIV_ROUND_UP(PAGE_SIZE, + adapter->tx_fifo_limit) + 2)); } else { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; @@ -6311,8 +6332,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->hw.phy.autoneg_advertised = 0x2f; /* ring size defaults */ - adapter->rx_ring->count = 256; - adapter->tx_ring->count = 256; + adapter->rx_ring->count = E1000_DEFAULT_RXD; + adapter->tx_ring->count = E1000_DEFAULT_TXD; /* * Initial Wake on LAN setting - If APM wake is enabled in diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index b860d4f7ea2a..fc62a3f3a5be 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -84,8 +84,9 @@ static const u16 e1000_igp_2_cable_length_table[] = { #define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 /* I82577 PHY Control 2 */ -#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 -#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 /* I82577 PHY Diagnostics Status */ #define I82577_DSTATUS_CABLE_LENGTH 0x03FC @@ -702,6 +703,32 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) if (ret_val) return ret_val; + /* Set MDI/MDIX mode */ + ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* + * Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + return e1000_set_master_slave_mode(hw); } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 5e84eaac48c1..ca4641e2f748 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ size += NVM_WORD_SIZE_BASE_SHIFT; + /* + * Check for invalid size + */ + if ((hw->mac.type == e1000_82576) && (size > 15)) { + pr_notice("The NVM size is not valid, defaulting to 32K\n"); + size = 15; + } + nvm->word_size = 1 << size; if (hw->mac.type < e1000_i210) { nvm->opcode_bits = 8; @@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) } else nvm->type = e1000_nvm_flash_hw; - /* - * Check for invalid size - */ - if ((hw->mac.type == e1000_82576) && (size > 15)) { - pr_notice("The NVM size is not valid, defaulting to 32K\n"); - size = 15; - } - /* NVM Function Pointers */ switch (hw->mac.type) { case e1000_82580: @@ -2223,11 +2223,10 @@ out: s32 igb_set_eee_i350(struct e1000_hw *hw) { s32 ret_val = 0; - u32 ipcnfg, eeer, ctrl_ext; + u32 ipcnfg, eeer; - ctrl_ext = rd32(E1000_CTRL_EXT); - if ((hw->mac.type != e1000_i350) || - (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) goto out; ipcnfg = rd32(E1000_IPCNFG); eeer = rd32(E1000_EEER); @@ -2240,6 +2239,14 @@ s32 igb_set_eee_i350(struct e1000_hw *hw) E1000_EEER_RX_LPI_EN | E1000_EEER_LPI_FC); + /* keep the LPI clock running before EEE is enabled */ + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { + u32 eee_su; + eee_su = rd32(E1000_EEE_SU); + eee_su &= ~E1000_EEE_SU_LPI_CLK_STP; + wr32(E1000_EEE_SU, eee_su); + } + } else { ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); @@ -2249,6 +2256,8 @@ s32 igb_set_eee_i350(struct e1000_hw *hw) } wr32(E1000_IPCNFG, ipcnfg); wr32(E1000_EEER, eeer); + rd32(E1000_IPCNFG); + rd32(E1000_EEER); out: return ret_val; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index ec7e4fe3e3ee..de4b41ec3c40 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -322,6 +322,9 @@ #define E1000_FCRTC_RTH_COAL_SHIFT 4 #define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ +/* Timestamp in Rx buffer */ +#define E1000_RXPBS_CFG_TS_EN 0x80000000 + /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 @@ -360,6 +363,7 @@ #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ #define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ #define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ /* If this bit asserted, the driver should claim the interrupt */ #define E1000_ICR_INT_ASSERTED 0x80000000 @@ -399,6 +403,7 @@ #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ #define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ @@ -510,6 +515,9 @@ #define E1000_TIMINCA_16NS_SHIFT 24 +#define E1000_TSICR_TXTS 0x00000002 +#define E1000_TSIM_TXTS 0x00000002 + #define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ #define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ #define E1000_MDICNFG_PHY_MASK 0x03E00000 @@ -849,8 +857,9 @@ #define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ #define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ #define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ -#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ +#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */ #define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ +#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */ /* SerDes Control */ #define E1000_GEN_CTL_READY 0x80000000 diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c index 7be98b6f1052..3404bc79f4ca 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.c +++ b/drivers/net/ethernet/intel/igb/e1000_phy.c @@ -464,6 +464,32 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw) phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); + if (ret_val) + goto out; + + /* Set MDI/MDIX mode */ + ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data); + if (ret_val) + goto out; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; + /* + * Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); out: return ret_val; @@ -2246,8 +2272,7 @@ s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw) if (ret_val) goto out; - phy_data &= ~I82580_PHY_CTRL2_AUTO_MDIX; - phy_data &= ~I82580_PHY_CTRL2_FORCE_MDI_MDIX; + phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK; ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data); if (ret_val) diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h index 34e40619f16b..6ac3299bfcb9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_phy.h +++ b/drivers/net/ethernet/intel/igb/e1000_phy.h @@ -111,8 +111,9 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw); #define I82580_PHY_STATUS2_SPEED_100MBPS 0x0100 /* I82580 PHY Control 2 */ -#define I82580_PHY_CTRL2_AUTO_MDIX 0x0400 -#define I82580_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 +#define I82580_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82580_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82580_PHY_CTRL2_MDIX_CFG_MASK 0x0600 /* I82580 PHY Diagnostics Status */ #define I82580_DSTATUS_CABLE_LENGTH 0x03FC diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 10efcd88dca0..e5db48594e8a 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -91,6 +91,8 @@ #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ #define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ #define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ /* Filtering Registers */ #define E1000_SAQF(_n) (0x5980 + 4 * (_n)) @@ -156,8 +158,12 @@ : (0x0E018 + ((_n) * 0x40))) #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ : (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) #define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ : (0x0E038 + ((_n) * 0x40))) #define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ @@ -343,6 +349,7 @@ /* Energy Efficient Ethernet "EEE" register */ #define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ #define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ +#define E1000_EEE_SU 0X0E34 /* EEE Setup */ /* Thermal Sensor Register */ #define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 9e572dd29ab2..8aad230c0592 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -34,9 +34,11 @@ #include "e1000_mac.h" #include "e1000_82575.h" +#ifdef CONFIG_IGB_PTP #include <linux/clocksource.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> +#endif /* CONFIG_IGB_PTP */ #include <linux/bitops.h> #include <linux/if_vlan.h> @@ -99,7 +101,6 @@ struct vf_data_storage { u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; u16 tx_rate; - struct pci_dev *vfdev; }; #define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ @@ -131,9 +132,9 @@ struct vf_data_storage { #define MAXIMUM_ETHERNET_VLAN_SIZE 1522 /* Supported Rx Buffer Sizes */ -#define IGB_RXBUFFER_512 512 +#define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_16384 16384 -#define IGB_RX_HDR_LEN IGB_RXBUFFER_512 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 /* How many Tx Descriptors do we need to call netif_wake_queue ? */ #define IGB_TX_QUEUE_WAKE 16 @@ -167,8 +168,8 @@ struct igb_tx_buffer { unsigned int bytecount; u16 gso_segs; __be16 protocol; - dma_addr_t dma; - u32 length; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; }; @@ -212,7 +213,6 @@ struct igb_q_vector { struct igb_ring_container rx, tx; struct napi_struct napi; - int numa_node; u16 itr_val; u8 set_itr; @@ -257,7 +257,6 @@ struct igb_ring { }; /* Items past this point are only used during ring alloc / free */ dma_addr_t dma; /* phys address of the ring */ - int numa_node; /* node to alloc ring memory on */ }; enum e1000_ring_flags_t { @@ -342,7 +341,6 @@ struct igb_adapter { /* OS defined structs */ struct pci_dev *pdev; - struct hwtstamp_config hwtstamp_config; spinlock_t stats64_lock; struct rtnl_link_stats64 stats64; @@ -373,15 +371,19 @@ struct igb_adapter { int vf_rate_link_speed; u32 rss_queues; u32 wvbr; - int node; u32 *shadow_vfta; +#ifdef CONFIG_IGB_PTP struct ptp_clock *ptp_clock; - struct ptp_clock_info caps; - struct delayed_work overflow_work; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; +#endif /* CONFIG_IGB_PTP */ + char fw_version[32]; }; @@ -390,6 +392,7 @@ struct igb_adapter { #define IGB_FLAG_QUAD_PORT_A (1 << 2) #define IGB_FLAG_QUEUE_PAIRS (1 << 3) #define IGB_FLAG_DMAC (1 << 4) +#define IGB_FLAG_PTP (1 << 5) /* DMA Coalescing defines */ #define IGB_MIN_TXPBSIZE 20408 @@ -435,13 +438,17 @@ extern void igb_power_up_link(struct igb_adapter *); extern void igb_set_fw_version(struct igb_adapter *); #ifdef CONFIG_IGB_PTP extern void igb_ptp_init(struct igb_adapter *adapter); -extern void igb_ptp_remove(struct igb_adapter *adapter); - -extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter, - struct skb_shared_hwtstamps *hwtstamps, - u64 systim); +extern void igb_ptp_stop(struct igb_adapter *adapter); +extern void igb_ptp_reset(struct igb_adapter *adapter); +extern void igb_ptp_tx_work(struct work_struct *work); +extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); +extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb); +extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd); +#endif /* CONFIG_IGB_PTP */ -#endif static inline s32 igb_reset_phy(struct e1000_hw *hw) { if (hw->phy.ops.reset) diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index a19c84cad0e9..2ea012849825 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -148,9 +148,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full| SUPPORTED_Autoneg | - SUPPORTED_TP); - ecmd->advertising = (ADVERTISED_TP | - ADVERTISED_Pause); + SUPPORTED_TP | + SUPPORTED_Pause); + ecmd->advertising = ADVERTISED_TP; if (hw->mac.autoneg == 1) { ecmd->advertising |= ADVERTISED_Autoneg; @@ -158,6 +158,21 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ecmd->advertising |= hw->phy.autoneg_advertised; } + if (hw->mac.autoneg != 1) + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + + if (hw->fc.requested_mode == e1000_fc_full) + ecmd->advertising |= ADVERTISED_Pause; + else if (hw->fc.requested_mode == e1000_fc_rx_pause) + ecmd->advertising |= (ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + else if (hw->fc.requested_mode == e1000_fc_tx_pause) + ecmd->advertising |= ADVERTISED_Asym_Pause; + else + ecmd->advertising &= ~(ADVERTISED_Pause | + ADVERTISED_Asym_Pause); + ecmd->port = PORT_TP; ecmd->phy_address = hw->phy.addr; } else { @@ -198,6 +213,19 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) } ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if (hw->phy.media_type == e1000_media_type_copper) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : + ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + return 0; } @@ -209,11 +237,27 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (igb_check_reset_block(hw)) { - dev_err(&adapter->pdev->dev, "Cannot change link " - "characteristics when SoL/IDER is active.\n"); + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL; } + /* + * MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + dev_err(&adapter->pdev->dev, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) msleep(1); @@ -227,12 +271,25 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) hw->fc.requested_mode = e1000_fc_default; } else { u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) { clear_bit(__IGB_RESETTING, &adapter->state); return -EINVAL; } } + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* + * fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + /* reset the link */ if (netif_running(adapter->netdev)) { igb_down(adapter); @@ -1089,8 +1146,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, wr32(reg, (_test[pat] & write)); val = rd32(reg) & mask; if (val != (_test[pat] & write & mask)) { - dev_err(&adapter->pdev->dev, "pattern test reg %04X " - "failed: got 0x%08X expected 0x%08X\n", + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (_test[pat] & write & mask)); *data = reg; return 1; @@ -1108,8 +1165,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, wr32(reg, write & mask); val = rd32(reg); if ((write & mask) != (val & mask)) { - dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" - " got 0x%08X expected 0x%08X\n", reg, + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return 1; @@ -1171,8 +1228,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) wr32(E1000_STATUS, toggle); after = rd32(E1000_STATUS) & toggle; if (value != after) { - dev_err(&adapter->pdev->dev, "failed STATUS register test " - "got: 0x%08X expected: 0x%08X\n", after, value); + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); *data = 1; return 1; } @@ -1468,35 +1526,27 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ctrl_reg = 0; - u16 phy_reg = 0; hw->mac.autoneg = false; - switch (hw->phy.type) { - case e1000_phy_m88: - /* Auto-MDI/MDIX Off */ - igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); - /* reset to update Auto-MDI/MDIX */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); - /* autoneg off */ - igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); - break; - case e1000_phy_82580: - /* enable MII loopback */ - igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041); - break; - case e1000_phy_i210: - /* set loopback speed in PHY */ - igb_read_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2), - &phy_reg); - phy_reg |= GS40G_MAC_SPEED_1G; - igb_write_phy_reg(hw, (GS40G_PAGE_SELECT & GS40G_PAGE_2), - phy_reg); - ctrl_reg = rd32(E1000_CTRL_EXT); - default: - break; + if (hw->phy.type == e1000_phy_m88) { + if (hw->phy.id != I210_I_PHY_ID) { + /* Auto-MDI/MDIX Off */ + igb_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); + /* autoneg off */ + igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); + } else { + /* force 1000, set loopback */ + igb_write_phy_reg(hw, I347AT4_PAGE_SELECT, 0); + igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); + } } + /* add small delay to avoid loopback test failure */ + msleep(50); + /* force 1000, set loopback */ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); @@ -1509,7 +1559,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) E1000_CTRL_FD | /* Force Duplex to FULL */ E1000_CTRL_SLU); /* Set link up enable bit */ - if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) + if (hw->phy.type == e1000_phy_m88) ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ wr32(E1000_CTRL, ctrl_reg); @@ -1517,11 +1567,10 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) /* Disable the receiver on the PHY so when a cable is plugged in, the * PHY does not begin to autoneg when a cable is reconnected to the NIC. */ - if ((hw->phy.type == e1000_phy_m88) || (hw->phy.type == e1000_phy_i210)) + if (hw->phy.type == e1000_phy_m88) igb_phy_disable_receiver(adapter); - udelay(500); - + mdelay(500); return 0; } @@ -1777,16 +1826,7 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) * sessions are active */ if (igb_check_reset_block(&adapter->hw)) { dev_err(&adapter->pdev->dev, - "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); - *data = 0; - goto out; - } - if ((adapter->hw.mac.type == e1000_i210) - || (adapter->hw.mac.type == e1000_i210)) { - dev_err(&adapter->pdev->dev, - "Loopback test not supported " - "on this part at this time.\n"); + "Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; } @@ -2255,6 +2295,54 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } } +static int igb_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + switch (adapter->hw.mac.type) { +#ifdef CONFIG_IGB_PTP + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; + + /* 82576 does not support timestamping all packets. */ + if (adapter->hw.mac.type >= e1000_82580) + info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; + else + info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + + return 0; +#endif /* CONFIG_IGB_PTP */ + default: + return -EOPNOTSUPP; + } +} + static int igb_ethtool_begin(struct net_device *netdev) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -2268,38 +2356,6 @@ static void igb_ethtool_complete(struct net_device *netdev) pm_runtime_put(&adapter->pdev->dev); } -#ifdef CONFIG_IGB_PTP -static int igb_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - struct igb_adapter *adapter = netdev_priv(dev); - - info->so_timestamping = - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - - if (adapter->ptp_clock) - info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; - - info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); - - info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_ALL) | - (1 << HWTSTAMP_FILTER_SOME) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); - - return 0; -} - -#endif static const struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, @@ -2326,11 +2382,9 @@ static const struct ethtool_ops igb_ethtool_ops = { .get_ethtool_stats = igb_get_ethtool_stats, .get_coalesce = igb_get_coalesce, .set_coalesce = igb_set_coalesce, + .get_ts_info = igb_get_ts_info, .begin = igb_ethtool_begin, .complete = igb_ethtool_complete, -#ifdef CONFIG_IGB_PTP - .get_ts_info = igb_ethtool_get_ts_info, -#endif }; void igb_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b7c2d5050572..60bf46534835 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); -static int igb_find_enabled_vfs(struct igb_adapter *adapter); -static int igb_check_vf_assignment(struct igb_adapter *adapter); +static bool igb_vfs_are_assigned(struct igb_adapter *adapter); #endif #ifdef CONFIG_PM @@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter) buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", n, tx_ring->next_to_use, tx_ring->next_to_clean, - (u64)buffer_info->dma, - buffer_info->length, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), buffer_info->next_to_watch, (u64)buffer_info->time_stamp); } @@ -456,17 +455,18 @@ static void igb_dump(struct igb_adapter *adapter) " %04X %p %016llX %p%s\n", i, le64_to_cpu(u0->a), le64_to_cpu(u0->b), - (u64)buffer_info->dma, - buffer_info->length, + (u64)dma_unmap_addr(buffer_info, dma), + dma_unmap_len(buffer_info, len), buffer_info->next_to_watch, (u64)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); + 16, 1, buffer_info->skb->data, + dma_unmap_len(buffer_info, len), + true); } } @@ -547,18 +547,17 @@ rx_ring_summary: (u64)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) { + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->skb) { print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - IGB_RX_HDR_LEN, true); + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + IGB_RX_HDR_LEN, true); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt( - buffer_info->page_dma + - buffer_info->page_offset), + page_address(buffer_info->page) + + buffer_info->page_offset, PAGE_SIZE/2, true); } } @@ -684,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter) { struct igb_ring *ring; int i; - int orig_node = adapter->node; for (i = 0; i < adapter->num_tx_queues; i++) { - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL, - adapter->node); - if (!ring) - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); + ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->tx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; - ring->numa_node = adapter->node; /* For 82575, context index must be unique per ring. */ if (adapter->hw.mac.type == e1000_82575) set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); adapter->tx_ring[i] = ring; } - /* Restore the adapter's original node */ - adapter->node = orig_node; for (i = 0; i < adapter->num_rx_queues; i++) { - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL, - adapter->node); - if (!ring) - ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); + ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); if (!ring) goto err; ring->count = adapter->rx_ring_count; ring->queue_index = i; ring->dev = &adapter->pdev->dev; ring->netdev = adapter->netdev; - ring->numa_node = adapter->node; /* set flag indicating ring supports SCTP checksum offload */ if (adapter->hw.mac.type >= e1000_82576) set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); @@ -743,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter) adapter->rx_ring[i] = ring; } - /* Restore the adapter's original node */ - adapter->node = orig_node; igb_cache_ring_register(adapter); return 0; err: - /* Restore the adapter's original node */ - adapter->node = orig_node; igb_free_queues(adapter); return -ENOMEM; @@ -1118,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter) struct igb_q_vector *q_vector; struct e1000_hw *hw = &adapter->hw; int v_idx; - int orig_node = adapter->node; for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { - if ((adapter->num_q_vectors == (adapter->num_rx_queues + - adapter->num_tx_queues)) && - (adapter->num_rx_queues == v_idx)) - adapter->node = orig_node; - if (orig_node == -1) { - int cur_node = next_online_node(adapter->node); - if (cur_node == MAX_NUMNODES) - cur_node = first_online_node; - adapter->node = cur_node; - } - q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL, - adapter->node); - if (!q_vector) - q_vector = kzalloc(sizeof(struct igb_q_vector), - GFP_KERNEL); + q_vector = kzalloc(sizeof(struct igb_q_vector), + GFP_KERNEL); if (!q_vector) goto err_out; q_vector->adapter = adapter; @@ -1144,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter) netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); adapter->q_vector[v_idx] = q_vector; } - /* Restore the adapter's original node */ - adapter->node = orig_node; return 0; err_out: - /* Restore the adapter's original node */ - adapter->node = orig_node; igb_free_q_vectors(adapter); return -ENOMEM; } @@ -1752,6 +1706,11 @@ void igb_reset(struct igb_adapter *adapter) /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); +#ifdef CONFIG_IGB_PTP + /* Re-enable PTP, where applicable. */ + igb_ptp_reset(adapter); +#endif /* CONFIG_IGB_PTP */ + igb_get_phy_info(hw); } @@ -2181,11 +2140,12 @@ static int __devinit igb_probe(struct pci_dev *pdev, } #endif + #ifdef CONFIG_IGB_PTP /* do hw tstamp init after resetting */ igb_ptp_init(adapter); +#endif /* CONFIG_IGB_PTP */ -#endif dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); /* print bus type/speed/width info */ dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", @@ -2260,9 +2220,9 @@ static void __devexit igb_remove(struct pci_dev *pdev) pm_runtime_get_noresume(&pdev->dev); #ifdef CONFIG_IGB_PTP - igb_ptp_remove(adapter); + igb_ptp_stop(adapter); +#endif /* CONFIG_IGB_PTP */ -#endif /* * The watchdog timer may be rescheduled, so explicitly * disable watchdog from being rescheduled. @@ -2295,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev) /* reclaim resources allocated to VFs */ if (adapter->vf_data) { /* disable iov and allow time for transactions to clear */ - if (!igb_check_vf_assignment(adapter)) { + if (igb_vfs_are_assigned(adapter)) { + dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n"); + } else { pci_disable_sriov(pdev); msleep(500); - } else { - dev_info(&pdev->dev, "VF(s) assigned to guests!\n"); } kfree(adapter->vf_data); @@ -2339,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) #ifdef CONFIG_PCI_IOV struct pci_dev *pdev = adapter->pdev; struct e1000_hw *hw = &adapter->hw; - int old_vfs = igb_find_enabled_vfs(adapter); + int old_vfs = pci_num_vf(adapter->pdev); int i; /* Virtualization features not supported on i210 family. */ @@ -2419,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) VLAN_HLEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; - adapter->node = -1; - spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { @@ -2667,13 +2625,11 @@ static int igb_close(struct net_device *netdev) int igb_setup_tx_resources(struct igb_ring *tx_ring) { struct device *dev = tx_ring->dev; - int orig_node = dev_to_node(dev); int size; size = sizeof(struct igb_tx_buffer) * tx_ring->count; - tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); - if (!tx_ring->tx_buffer_info) - tx_ring->tx_buffer_info = vzalloc(size); + + tx_ring->tx_buffer_info = vzalloc(size); if (!tx_ring->tx_buffer_info) goto err; @@ -2681,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); tx_ring->size = ALIGN(tx_ring->size, 4096); - set_dev_node(dev, tx_ring->numa_node); tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, &tx_ring->dma, GFP_KERNEL); - set_dev_node(dev, orig_node); - if (!tx_ring->desc) - tx_ring->desc = dma_alloc_coherent(dev, - tx_ring->size, - &tx_ring->dma, - GFP_KERNEL); - if (!tx_ring->desc) goto err; @@ -2703,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring) err: vfree(tx_ring->tx_buffer_info); - dev_err(dev, - "Unable to allocate memory for the transmit descriptor ring\n"); + tx_ring->tx_buffer_info = NULL; + dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n"); return -ENOMEM; } @@ -2821,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter) int igb_setup_rx_resources(struct igb_ring *rx_ring) { struct device *dev = rx_ring->dev; - int orig_node = dev_to_node(dev); - int size, desc_len; + int size; size = sizeof(struct igb_rx_buffer) * rx_ring->count; - rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); - if (!rx_ring->rx_buffer_info) - rx_ring->rx_buffer_info = vzalloc(size); + + rx_ring->rx_buffer_info = vzalloc(size); if (!rx_ring->rx_buffer_info) goto err; - desc_len = sizeof(union e1000_adv_rx_desc); /* Round up to nearest 4K */ - rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - set_dev_node(dev, rx_ring->numa_node); rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); - set_dev_node(dev, orig_node); - if (!rx_ring->desc) - rx_ring->desc = dma_alloc_coherent(dev, - rx_ring->size, - &rx_ring->dma, - GFP_KERNEL); - if (!rx_ring->desc) goto err; @@ -2860,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring) err: vfree(rx_ring->rx_buffer_info); rx_ring->rx_buffer_info = NULL; - dev_err(dev, "Unable to allocate memory for the receive descriptor" - " ring\n"); + dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n"); return -ENOMEM; } @@ -2899,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 mrqc, rxcsum; - u32 j, num_rx_queues, shift = 0, shift2 = 0; - union e1000_reta { - u32 dword; - u8 bytes[4]; - } reta; - static const u8 rsshash[40] = { - 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67, - 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb, - 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, - 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa }; + u32 j, num_rx_queues, shift = 0; + static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741, + 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE, + 0xA32DCB77, 0x0CF23080, 0x3BB7426A, + 0xFA01ACBE }; /* Fill out hash function seeds */ - for (j = 0; j < 10; j++) { - u32 rsskey = rsshash[(j * 4)]; - rsskey |= rsshash[(j * 4) + 1] << 8; - rsskey |= rsshash[(j * 4) + 2] << 16; - rsskey |= rsshash[(j * 4) + 3] << 24; - array_wr32(E1000_RSSRK(0), j, rsskey); - } + for (j = 0; j < 10; j++) + wr32(E1000_RSSRK(j), rsskey[j]); num_rx_queues = adapter->rss_queues; - if (adapter->vfs_allocated_count) { - /* 82575 and 82576 supports 2 RSS queues for VMDq */ - switch (hw->mac.type) { - case e1000_i350: - case e1000_82580: - num_rx_queues = 1; - shift = 0; - break; - case e1000_82576: + switch (hw->mac.type) { + case e1000_82575: + shift = 6; + break; + case e1000_82576: + /* 82576 supports 2 RSS queues for SR-IOV */ + if (adapter->vfs_allocated_count) { shift = 3; num_rx_queues = 2; - break; - case e1000_82575: - shift = 2; - shift2 = 6; - default: - break; } - } else { - if (hw->mac.type == e1000_82575) - shift = 6; + break; + default: + break; } - for (j = 0; j < (32 * 4); j++) { - reta.bytes[j & 3] = (j % num_rx_queues) << shift; - if (shift2) - reta.bytes[j & 3] |= num_rx_queues << shift2; - if ((j & 3) == 3) - wr32(E1000_RETA(j >> 2), reta.dword); + /* + * Populate the indirection table 4 entries at a time. To do this + * we are generating the results for n and n+2 and then interleaving + * those with the results with n+1 and n+3. + */ + for (j = 0; j < 32; j++) { + /* first pass generates n and n+2 */ + u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues; + u32 reta = (base & 0x07800780) >> (7 - shift); + + /* second pass generates n+1 and n+3 */ + base += 0x00010001 * num_rx_queues; + reta |= (base & 0x07800780) << (1 + shift); + + wr32(E1000_RETA(j), reta); } /* @@ -3185,8 +3112,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter, srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT; #endif srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; +#ifdef CONFIG_IGB_PTP if (hw->mac.type >= e1000_82580) srrctl |= E1000_SRRCTL_TIMESTAMP; +#endif /* CONFIG_IGB_PTP */ /* Only set Drop Enable if we are supporting multiple queues */ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) srrctl |= E1000_SRRCTL_DROP_EN; @@ -3270,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring, { if (tx_buffer->skb) { dev_kfree_skb_any(tx_buffer->skb); - if (tx_buffer->dma) + if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); - } else if (tx_buffer->dma) { + } else if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); } tx_buffer->next_to_watch = NULL; tx_buffer->skb = NULL; - tx_buffer->dma = 0; + dma_unmap_len_set(tx_buffer, len, 0); /* buffer_info must be completely set up in the transmit path */ } @@ -4230,9 +4159,11 @@ static __le32 igb_tx_cmd_type(u32 tx_flags) if (tx_flags & IGB_TX_FLAGS_VLAN) cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE); +#ifdef CONFIG_IGB_PTP /* set timestamp bit if present */ - if (tx_flags & IGB_TX_FLAGS_TSTAMP) + if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP); +#endif /* CONFIG_IGB_PTP */ /* set segmentation bits for TSO */ if (tx_flags & IGB_TX_FLAGS_TSO) @@ -4276,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring, const u8 hdr_len) { struct sk_buff *skb = first->skb; - struct igb_tx_buffer *tx_buffer_info; + struct igb_tx_buffer *tx_buffer; union e1000_adv_tx_desc *tx_desc; dma_addr_t dma; struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; @@ -4297,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring, goto dma_error; /* record length, and DMA address */ - first->length = size; - first->dma = dma; + dma_unmap_len_set(first, len, size); + dma_unmap_addr_set(first, dma, dma); tx_desc->read.buffer_addr = cpu_to_le64(dma); for (;;) { @@ -4340,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring, if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - tx_buffer_info->length = size; - tx_buffer_info->dma = dma; + tx_buffer = &tx_ring->tx_buffer_info[i]; + dma_unmap_len_set(tx_buffer, len, size); + dma_unmap_addr_set(tx_buffer, dma, dma); tx_desc->read.olinfo_status = 0; tx_desc->read.buffer_addr = cpu_to_le64(dma); @@ -4393,9 +4324,9 @@ dma_error: /* clear dma mappings for failed tx_buffer_info map */ for (;;) { - tx_buffer_info = &tx_ring->tx_buffer_info[i]; - igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); - if (tx_buffer_info == first) + tx_buffer = &tx_ring->tx_buffer_info[i]; + igb_unmap_and_free_tx_resource(tx_ring, tx_buffer); + if (tx_buffer == first) break; if (i == 0) i = tx_ring->count; @@ -4441,6 +4372,9 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size) netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, struct igb_ring *tx_ring) { +#ifdef CONFIG_IGB_PTP + struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); +#endif /* CONFIG_IGB_PTP */ struct igb_tx_buffer *first; int tso; u32 tx_flags = 0; @@ -4463,10 +4397,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { +#ifdef CONFIG_IGB_PTP + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + !(adapter->ptp_tx_skb))) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; + + adapter->ptp_tx_skb = skb_get(skb); + if (adapter->hw.mac.type == e1000_82576) + schedule_work(&adapter->ptp_tx_work); } +#endif /* CONFIG_IGB_PTP */ if (vlan_tx_tag_present(skb)) { tx_flags |= IGB_TX_FLAGS_VLAN; @@ -4662,11 +4603,13 @@ void igb_update_stats(struct igb_adapter *adapter, bytes = 0; packets = 0; for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; + u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i]; - ring->rx_stats.drops += rqdpc_tmp; - net_stats->rx_fifo_errors += rqdpc_tmp; + if (rqdpc) { + ring->rx_stats.drops += rqdpc; + net_stats->rx_fifo_errors += rqdpc; + } do { start = u64_stats_fetch_begin_bh(&ring->rx_syncp); @@ -4756,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter, reg = rd32(E1000_CTRL_EXT); if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { adapter->stats.rxerrc += rd32(E1000_RXERRC); - adapter->stats.tncrs += rd32(E1000_TNCRS); + + /* this stat has invalid values on i210/i211 */ + if ((hw->mac.type != e1000_i210) && + (hw->mac.type != e1000_i211)) + adapter->stats.tncrs += rd32(E1000_TNCRS); } adapter->stats.tsctc += rd32(E1000_TSCTC); @@ -4853,6 +4800,19 @@ static irqreturn_t igb_msix_other(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } +#ifdef CONFIG_IGB_PTP + if (icr & E1000_ICR_TS) { + u32 tsicr = rd32(E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + wr32(E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } +#endif /* CONFIG_IGB_PTP */ + wr32(E1000_EIMS, adapter->eims_other); return IRQ_HANDLED; @@ -5003,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event, static int igb_vf_configure(struct igb_adapter *adapter, int vf) { unsigned char mac_addr[ETH_ALEN]; - struct pci_dev *pdev = adapter->pdev; - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pvfdev; - unsigned int device_id; - u16 thisvf_devfn; eth_random_addr(mac_addr); igb_set_vf_mac(adapter, vf, mac_addr); - switch (adapter->hw.mac.type) { - case e1000_82576: - device_id = IGB_82576_VF_DEV_ID; - /* VF Stride for 82576 is 2 */ - thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) | - (pdev->devfn & 1); - break; - case e1000_i350: - device_id = IGB_I350_VF_DEV_ID; - /* VF Stride for I350 is 4 */ - thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) | - (pdev->devfn & 3); - break; - default: - device_id = 0; - thisvf_devfn = 0; - break; - } - - pvfdev = pci_get_device(hw->vendor_id, device_id, NULL); - while (pvfdev) { - if (pvfdev->devfn == thisvf_devfn) - break; - pvfdev = pci_get_device(hw->vendor_id, - device_id, pvfdev); - } - - if (pvfdev) - adapter->vf_data[vf].vfdev = pvfdev; - else - dev_err(&pdev->dev, - "Couldn't find pci dev ptr for VF %4.4x\n", - thisvf_devfn); - return pvfdev != NULL; + return 0; } -static int igb_find_enabled_vfs(struct igb_adapter *adapter) +static bool igb_vfs_are_assigned(struct igb_adapter *adapter) { - struct e1000_hw *hw = &adapter->hw; struct pci_dev *pdev = adapter->pdev; - struct pci_dev *pvfdev; - u16 vf_devfn = 0; - u16 vf_stride; - unsigned int device_id; - int vfs_found = 0; + struct pci_dev *vfdev; + int dev_id; switch (adapter->hw.mac.type) { case e1000_82576: - device_id = IGB_82576_VF_DEV_ID; - /* VF Stride for 82576 is 2 */ - vf_stride = 2; + dev_id = IGB_82576_VF_DEV_ID; break; case e1000_i350: - device_id = IGB_I350_VF_DEV_ID; - /* VF Stride for I350 is 4 */ - vf_stride = 4; + dev_id = IGB_I350_VF_DEV_ID; break; default: - device_id = 0; - vf_stride = 0; - break; - } - - vf_devfn = pdev->devfn + 0x80; - pvfdev = pci_get_device(hw->vendor_id, device_id, NULL); - while (pvfdev) { - if (pvfdev->devfn == vf_devfn && - (pvfdev->bus->number >= pdev->bus->number)) - vfs_found++; - vf_devfn += vf_stride; - pvfdev = pci_get_device(hw->vendor_id, - device_id, pvfdev); + return false; } - return vfs_found; -} - -static int igb_check_vf_assignment(struct igb_adapter *adapter) -{ - int i; - for (i = 0; i < adapter->vfs_allocated_count; i++) { - if (adapter->vf_data[i].vfdev) { - if (adapter->vf_data[i].vfdev->dev_flags & - PCI_DEV_FLAGS_ASSIGNED) + /* loop through all the VFs to see if we own any that are assigned */ + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL); + while (vfdev) { + /* if we don't own it we don't care */ + if (vfdev->is_virtfn && vfdev->physfn == pdev) { + /* if it is assigned we cannot release it */ + if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) return true; } + + vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev); } + return false; } @@ -5644,6 +5545,19 @@ static irqreturn_t igb_intr_msi(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } +#ifdef CONFIG_IGB_PTP + if (icr & E1000_ICR_TS) { + u32 tsicr = rd32(E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + wr32(E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } +#endif /* CONFIG_IGB_PTP */ + napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -5685,6 +5599,19 @@ static irqreturn_t igb_intr(int irq, void *data) mod_timer(&adapter->watchdog_timer, jiffies + 1); } +#ifdef CONFIG_IGB_PTP + if (icr & E1000_ICR_TS) { + u32 tsicr = rd32(E1000_TSICR); + + if (tsicr & E1000_TSICR_TXTS) { + /* acknowledge the interrupt */ + wr32(E1000_TSICR, E1000_TSICR_TXTS); + /* retrieve hardware timestamp */ + schedule_work(&adapter->ptp_tx_work); + } + } +#endif /* CONFIG_IGB_PTP */ + napi_schedule(&q_vector->napi); return IRQ_HANDLED; @@ -5744,37 +5671,6 @@ static int igb_poll(struct napi_struct *napi, int budget) return 0; } -#ifdef CONFIG_IGB_PTP -/** - * igb_tx_hwtstamp - utility function which checks for TX time stamp - * @q_vector: pointer to q_vector containing needed info - * @buffer: pointer to igb_tx_buffer structure - * - * If we were asked to do hardware stamping and such a time stamp is - * available, then it must have been for this skb here because we only - * allow only one such packet into the queue. - */ -static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, - struct igb_tx_buffer *buffer_info) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - struct skb_shared_hwtstamps shhwtstamps; - u64 regval; - - /* if skb does not support hw timestamp or TX stamp not valid exit */ - if (likely(!(buffer_info->tx_flags & IGB_TX_FLAGS_TSTAMP)) || - !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) - return; - - regval = rd32(E1000_TXSTMPL); - regval |= (u64)rd32(E1000_TXSTMPH) << 32; - - igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); - skb_tstamp_tx(buffer_info->skb, &shhwtstamps); -} - -#endif /** * igb_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: pointer to q_vector containing needed info @@ -5786,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *tx_ring = q_vector->tx.ring; struct igb_tx_buffer *tx_buffer; - union e1000_adv_tx_desc *tx_desc, *eop_desc; + union e1000_adv_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; @@ -5798,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) tx_desc = IGB_TX_DESC(tx_ring, i); i -= tx_ring->count; - for (; budget; budget--) { - eop_desc = tx_buffer->next_to_watch; - - /* prevent any other reads prior to eop_desc */ - rmb(); + do { + union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; /* if next_to_watch is not set then there is no work pending */ if (!eop_desc) break; + /* prevent any other reads prior to eop_desc */ + rmb(); + /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) break; @@ -5819,25 +5715,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; -#ifdef CONFIG_IGB_PTP - /* retrieve hardware timestamp */ - igb_tx_hwtstamp(q_vector, tx_buffer); - -#endif /* free the skb */ dev_kfree_skb_any(tx_buffer->skb); - tx_buffer->skb = NULL; /* unmap skb header data */ dma_unmap_single(tx_ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + /* clear tx_buffer data */ + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* clear last DMA location and unmap remaining buffers */ while (tx_desc != eop_desc) { - tx_buffer->dma = 0; - tx_buffer++; tx_desc++; i++; @@ -5848,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) } /* unmap any remaining paged data */ - if (tx_buffer->dma) { + if (dma_unmap_len(tx_buffer, len)) { dma_unmap_page(tx_ring->dev, - tx_buffer->dma, - tx_buffer->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); } } - /* clear last DMA location */ - tx_buffer->dma = 0; - /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; tx_desc++; @@ -5868,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) tx_buffer = tx_ring->tx_buffer_info; tx_desc = IGB_TX_DESC(tx_ring, 0); } - } + + /* issue prefetch for next Tx descriptor */ + prefetch(tx_desc); + + /* update budget accounting */ + budget--; + } while (likely(budget)); netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); @@ -5884,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { struct e1000_hw *hw = &adapter->hw; - eop_desc = tx_buffer->next_to_watch; - /* Detect a transmit hang in hardware, this serializes the * check with the clearing of time_stamp and movement of i */ clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); - if (eop_desc && + if (tx_buffer->next_to_watch && time_after(jiffies, tx_buffer->time_stamp + (adapter->tx_timeout_factor * HZ)) && !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { @@ -5913,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) tx_ring->next_to_use, tx_ring->next_to_clean, tx_buffer->time_stamp, - eop_desc, + tx_buffer->next_to_watch, jiffies, - eop_desc->wb.status); + tx_buffer->next_to_watch->wb.status); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); @@ -5995,47 +5889,6 @@ static inline void igb_rx_hash(struct igb_ring *ring, skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); } -#ifdef CONFIG_IGB_PTP -static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, - union e1000_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - struct igb_adapter *adapter = q_vector->adapter; - struct e1000_hw *hw = &adapter->hw; - u64 regval; - - if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP | - E1000_RXDADV_STAT_TS)) - return; - - /* - * If this bit is set, then the RX registers contain the time stamp. No - * other packet will be time stamped until we read these registers, so - * read the registers to make them available again. Because only one - * packet can be time stamped at a time, we know that the register - * values must belong to this one here and therefore we don't need to - * compare any of the additional attributes stored for it. - * - * If nothing went wrong, then it should have a shared tx_flags that we - * can turn into a skb_shared_hwtstamps. - */ - if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { - u32 *stamp = (u32 *)skb->data; - regval = le32_to_cpu(*(stamp + 2)); - regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; - skb_pull(skb, IGB_TS_HDR_LEN); - } else { - if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) - return; - - regval = rd32(E1000_RXSTMPL); - regval |= (u64)rd32(E1000_RXSTMPH) << 32; - } - - igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); -} - -#endif static void igb_rx_vlan(struct igb_ring *ring, union e1000_adv_rx_desc *rx_desc, struct sk_buff *skb) @@ -6147,8 +6000,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) } #ifdef CONFIG_IGB_PTP - igb_rx_hwtstamp(q_vector, rx_desc, skb); -#endif + igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb); +#endif /* CONFIG_IGB_PTP */ igb_rx_hash(rx_ring, rx_desc, skb); igb_rx_checksum(rx_ring, rx_desc, skb); igb_rx_vlan(rx_ring, rx_desc, skb); @@ -6342,181 +6195,6 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } /** - * igb_hwtstamp_ioctl - control hardware time stamping - * @netdev: - * @ifreq: - * @cmd: - * - * Outgoing time stamping can be enabled and disabled. Play nice and - * disable it when requested, although it shouldn't case any overhead - * when no packet needs it. At most one packet in the queue may be - * marked for time stamping, otherwise it would be impossible to tell - * for sure to which packet the hardware time stamp belongs. - * - * Incoming time stamping has to be configured via the hardware - * filters. Not all combinations are supported, in particular event - * type has to be specified. Matching the kind of event packet is - * not supported, with the exception of "all V2 events regardless of - * level 2 or 4". - * - **/ -static int igb_hwtstamp_ioctl(struct net_device *netdev, - struct ifreq *ifr, int cmd) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct hwtstamp_config config; - u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; - u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; - u32 tsync_rx_cfg = 0; - bool is_l4 = false; - bool is_l2 = false; - u32 regval; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - /* reserved for future extensions */ - if (config.flags) - return -EINVAL; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - tsync_tx_ctl = 0; - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - tsync_rx_ctl = 0; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_ALL: - /* - * register TSYNCRXCFG must be set, therefore it is not - * possible to time stamp both Sync and Delay_Req messages - * => fall back to time stamping all packets - */ - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; - is_l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; - is_l4 = true; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; - tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; - is_l2 = true; - is_l4 = true; - config.rx_filter = HWTSTAMP_FILTER_SOME; - break; - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; - config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; - is_l2 = true; - is_l4 = true; - break; - default: - return -ERANGE; - } - - if (hw->mac.type == e1000_82575) { - if (tsync_rx_ctl | tsync_tx_ctl) - return -EINVAL; - return 0; - } - - /* - * Per-packet timestamping only works if all packets are - * timestamped, so enable timestamping in all packets as - * long as one rx filter was configured. - */ - if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { - tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; - tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; - } - - /* enable/disable TX */ - regval = rd32(E1000_TSYNCTXCTL); - regval &= ~E1000_TSYNCTXCTL_ENABLED; - regval |= tsync_tx_ctl; - wr32(E1000_TSYNCTXCTL, regval); - - /* enable/disable RX */ - regval = rd32(E1000_TSYNCRXCTL); - regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); - regval |= tsync_rx_ctl; - wr32(E1000_TSYNCRXCTL, regval); - - /* define which PTP packets are time stamped */ - wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); - - /* define ethertype filter for timestamped packets */ - if (is_l2) - wr32(E1000_ETQF(3), - (E1000_ETQF_FILTER_ENABLE | /* enable filter */ - E1000_ETQF_1588 | /* enable timestamping */ - ETH_P_1588)); /* 1588 eth protocol type */ - else - wr32(E1000_ETQF(3), 0); - -#define PTP_PORT 319 - /* L4 Queue Filter[3]: filter by destination port and protocol */ - if (is_l4) { - u32 ftqf = (IPPROTO_UDP /* UDP */ - | E1000_FTQF_VF_BP /* VF not compared */ - | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ - | E1000_FTQF_MASK); /* mask all inputs */ - ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ - - wr32(E1000_IMIR(3), htons(PTP_PORT)); - wr32(E1000_IMIREXT(3), - (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); - if (hw->mac.type == e1000_82576) { - /* enable source port check */ - wr32(E1000_SPQF(3), htons(PTP_PORT)); - ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; - } - wr32(E1000_FTQF(3), ftqf); - } else { - wr32(E1000_FTQF(3), E1000_FTQF_MASK); - } - wrfl(); - - adapter->hwtstamp_config = config; - - /* clear TX/RX time stamp registers, just to be sure */ - regval = rd32(E1000_TXSTMPH); - regval = rd32(E1000_RXSTMPH); - - return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? - -EFAULT : 0; -} - -/** * igb_ioctl - * @netdev: * @ifreq: @@ -6529,8 +6207,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) case SIOCGMIIREG: case SIOCSMIIREG: return igb_mii_ioctl(netdev, ifr, cmd); +#ifdef CONFIG_IGB_PTP case SIOCSHWTSTAMP: - return igb_hwtstamp_ioctl(netdev, ifr, cmd); + return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); +#endif /* CONFIG_IGB_PTP */ default: return -EOPNOTSUPP; } @@ -6676,6 +6356,10 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx) default: goto err_inval; } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + return 0; err_inval: diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index c846ea9131a3..ee21445157a3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -69,22 +69,22 @@ * 2^40 * 10^-9 / 60 = 18.3 minutes. */ -#define IGB_OVERFLOW_PERIOD (HZ * 60 * 9) -#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) -#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) -#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) -#define IGB_NBITS_82580 40 +#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) +#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT) +#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1) +#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) +#define IGB_NBITS_82580 40 /* * SYSTIM read access for the 82576 */ -static cycle_t igb_82576_systim_read(const struct cyclecounter *cc) +static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) { - u64 val; - u32 lo, hi; struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi; lo = rd32(E1000_SYSTIML); hi = rd32(E1000_SYSTIMH); @@ -99,12 +99,12 @@ static cycle_t igb_82576_systim_read(const struct cyclecounter *cc) * SYSTIM read access for the 82580 */ -static cycle_t igb_82580_systim_read(const struct cyclecounter *cc) +static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc) { - u64 val; - u32 lo, hi, jk; struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc); struct e1000_hw *hw = &igb->hw; + u64 val; + u32 lo, hi, jk; /* * The timestamp latches on lowest register read. For the 82580 @@ -122,16 +122,101 @@ static cycle_t igb_82580_systim_read(const struct cyclecounter *cc) } /* + * SYSTIM read access for I210/I211 + */ + +static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts) +{ + struct e1000_hw *hw = &adapter->hw; + u32 sec, nsec, jk; + + /* + * The timestamp latches on lowest register read. For I210/I211, the + * lowest register is SYSTIMR. Since we only need to provide nanosecond + * resolution, we can ignore it. + */ + jk = rd32(E1000_SYSTIMR); + nsec = rd32(E1000_SYSTIML); + sec = rd32(E1000_SYSTIMH); + + ts->tv_sec = sec; + ts->tv_nsec = nsec; +} + +static void igb_ptp_write_i210(struct igb_adapter *adapter, + const struct timespec *ts) +{ + struct e1000_hw *hw = &adapter->hw; + + /* + * Writing the SYSTIMR register is not necessary as it only provides + * sub-nanosecond resolution. + */ + wr32(E1000_SYSTIML, ts->tv_nsec); + wr32(E1000_SYSTIMH, ts->tv_sec); +} + +/** + * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp + * @adapter: board private structure + * @hwtstamps: timestamp structure to update + * @systim: unsigned 64bit system time value. + * + * We need to convert the system time value stored in the RX/TXSTMP registers + * into a hwtstamp which can be used by the upper level timestamping functions. + * + * The 'tmreg_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two (or three) 32 bit registers. The first + * read latches the value. Ditto for writing. + * + * In addition, here have extended the system time with an overflow + * counter in software. + **/ +static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + unsigned long flags; + u64 ns; + + switch (adapter->hw.mac.type) { + case e1000_82576: + case e1000_82580: + case e1000_i350: + spin_lock_irqsave(&adapter->tmreg_lock, flags); + + ns = timecounter_cyc2time(&adapter->tc, systim); + + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); + break; + case e1000_i210: + case e1000_i211: + memset(hwtstamps, 0, sizeof(*hwtstamps)); + /* Upper 32 bits contain s, lower 32 bits contain ns. */ + hwtstamps->hwtstamp = ktime_set(systim >> 32, + systim & 0xFFFFFFFF); + break; + default: + break; + } +} + +/* * PTP clock operations */ -static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int igb_ptp_adjfreq_82576(struct ptp_clock_info *ptp, s32 ppb) { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; u64 rate; u32 incvalue; - int neg_adj = 0; - struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); - struct e1000_hw *hw = &igb->hw; if (ppb < 0) { neg_adj = 1; @@ -153,13 +238,14 @@ static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb) return 0; } -static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int igb_ptp_adjfreq_82580(struct ptp_clock_info *ptp, s32 ppb) { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + int neg_adj = 0; u64 rate; u32 inca; - int neg_adj = 0; - struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); - struct e1000_hw *hw = &igb->hw; if (ppb < 0) { neg_adj = 1; @@ -178,11 +264,12 @@ static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb) return 0; } -static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta) +static int igb_ptp_adjtime_82576(struct ptp_clock_info *ptp, s64 delta) { - s64 now; + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); unsigned long flags; - struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); + s64 now; spin_lock_irqsave(&igb->tmreg_lock, flags); @@ -195,12 +282,32 @@ static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) { + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + struct timespec now, then = ns_to_timespec(delta); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, &now); + now = timespec_add(now, then); + igb_ptp_write_i210(igb, (const struct timespec *)&now); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, + struct timespec *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; u64 ns; u32 remainder; - unsigned long flags; - struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); spin_lock_irqsave(&igb->tmreg_lock, flags); @@ -214,11 +321,29 @@ static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts) return 0; } -static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts) +static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, + struct timespec *ts) { - u64 ns; + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); unsigned long flags; - struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps); + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_read_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; +} + +static int igb_ptp_settime_82576(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + u64 ns; ns = ts->tv_sec * 1000000000ULL; ns += ts->tv_nsec; @@ -232,77 +357,369 @@ static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts) return 0; } -static int ptp_82576_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) +static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, + const struct timespec *ts) { - return -EOPNOTSUPP; + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + unsigned long flags; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + igb_ptp_write_i210(igb, ts); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + return 0; } -static int ptp_82580_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) +static int igb_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) { return -EOPNOTSUPP; } -static void igb_overflow_check(struct work_struct *work) +/** + * igb_ptp_tx_work + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. + */ +void igb_ptp_tx_work(struct work_struct *work) +{ + struct igb_adapter *adapter = container_of(work, struct igb_adapter, + ptp_tx_work); + struct e1000_hw *hw = &adapter->hw; + u32 tsynctxctl; + + if (!adapter->ptp_tx_skb) + return; + + tsynctxctl = rd32(E1000_TSYNCTXCTL); + if (tsynctxctl & E1000_TSYNCTXCTL_VALID) + igb_ptp_tx_hwtstamp(adapter); + else + /* reschedule to check later */ + schedule_work(&adapter->ptp_tx_work); +} + +static void igb_ptp_overflow_check(struct work_struct *work) { - struct timespec ts; struct igb_adapter *igb = - container_of(work, struct igb_adapter, overflow_work.work); + container_of(work, struct igb_adapter, ptp_overflow_work.work); + struct timespec ts; - igb_gettime(&igb->caps, &ts); + igb->ptp_caps.gettime(&igb->ptp_caps, &ts); pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); - schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD); + schedule_delayed_work(&igb->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); +} + +/** + * igb_ptp_tx_hwtstamp - utility function which checks for TX time stamp + * @adapter: Board private structure. + * + * If we were asked to do hardware stamping and such a time stamp is + * available, then it must have been for this skb here because we only + * allow only one such packet into the queue. + */ +void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; + u64 regval; + + regval = rd32(E1000_TXSTMPL); + regval |= (u64)rd32(E1000_TXSTMPH) << 32; + + igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); + dev_kfree_skb_any(adapter->ptp_tx_skb); + adapter->ptp_tx_skb = NULL; +} + +void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, + union e1000_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; + u64 regval; + + if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP | + E1000_RXDADV_STAT_TS)) + return; + + /* + * If this bit is set, then the RX registers contain the time stamp. No + * other packet will be time stamped until we read these registers, so + * read the registers to make them available again. Because only one + * packet can be time stamped at a time, we know that the register + * values must belong to this one here and therefore we don't need to + * compare any of the additional attributes stored for it. + * + * If nothing went wrong, then it should have a shared tx_flags that we + * can turn into a skb_shared_hwtstamps. + */ + if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { + u32 *stamp = (u32 *)skb->data; + regval = le32_to_cpu(*(stamp + 2)); + regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; + skb_pull(skb, IGB_TS_HDR_LEN); + } else { + if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + regval = rd32(E1000_RXSTMPL); + regval |= (u64)rd32(E1000_RXSTMPH) << 32; + } + + igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); +} + +/** + * igb_ptp_hwtstamp_ioctl - control hardware time stamping + * @netdev: + * @ifreq: + * @cmd: + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't case any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware + * filters. Not all combinations are supported, in particular event + * type has to be specified. Matching the kind of event packet is + * not supported, with the exception of "all V2 events regardless of + * level 2 or 4". + * + **/ +int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct hwtstamp_config config; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 tsync_rx_cfg = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* reserved for future extensions */ + if (config.flags) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_ALL: + /* + * register TSYNCRXCFG must be set, therefore it is not + * possible to time stamp both Sync and Delay_Req messages + * => fall back to time stamping all packets + */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + is_l4 = true; + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + is_l4 = true; + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + default: + return -ERANGE; + } + + if (hw->mac.type == e1000_82575) { + if (tsync_rx_ctl | tsync_tx_ctl) + return -EINVAL; + return 0; + } + + /* + * Per-packet timestamping only works if all packets are + * timestamped, so enable timestamping in all packets as + * long as one rx filter was configured. + */ + if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { + tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + + if ((hw->mac.type == e1000_i210) || + (hw->mac.type == e1000_i211)) { + regval = rd32(E1000_RXPBS); + regval |= E1000_RXPBS_CFG_TS_EN; + wr32(E1000_RXPBS, regval); + } + } + + /* enable/disable TX */ + regval = rd32(E1000_TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + wr32(E1000_TSYNCTXCTL, regval); + + /* enable/disable RX */ + regval = rd32(E1000_TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + wr32(E1000_TSYNCRXCTL, regval); + + /* define which PTP packets are time stamped */ + wr32(E1000_TSYNCRXCFG, tsync_rx_cfg); + + /* define ethertype filter for timestamped packets */ + if (is_l2) + wr32(E1000_ETQF(3), + (E1000_ETQF_FILTER_ENABLE | /* enable filter */ + E1000_ETQF_1588 | /* enable timestamping */ + ETH_P_1588)); /* 1588 eth protocol type */ + else + wr32(E1000_ETQF(3), 0); + +#define PTP_PORT 319 + /* L4 Queue Filter[3]: filter by destination port and protocol */ + if (is_l4) { + u32 ftqf = (IPPROTO_UDP /* UDP */ + | E1000_FTQF_VF_BP /* VF not compared */ + | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */ + | E1000_FTQF_MASK); /* mask all inputs */ + ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */ + + wr32(E1000_IMIR(3), htons(PTP_PORT)); + wr32(E1000_IMIREXT(3), + (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP)); + if (hw->mac.type == e1000_82576) { + /* enable source port check */ + wr32(E1000_SPQF(3), htons(PTP_PORT)); + ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP; + } + wr32(E1000_FTQF(3), ftqf); + } else { + wr32(E1000_FTQF(3), E1000_FTQF_MASK); + } + wrfl(); + + /* clear TX/RX time stamp registers, just to be sure */ + regval = rd32(E1000_TXSTMPL); + regval = rd32(E1000_TXSTMPH); + regval = rd32(E1000_RXSTMPL); + regval = rd32(E1000_RXSTMPH); + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; } void igb_ptp_init(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; switch (hw->mac.type) { - case e1000_i210: - case e1000_i211: - case e1000_i350: + case e1000_82576: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 1000000000; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettime = igb_ptp_gettime_82576; + adapter->ptp_caps.settime = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_enable; + adapter->cc.read = igb_ptp_read_82576; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.mult = 1; + adapter->cc.shift = IGB_82576_TSYNC_SHIFT; + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; case e1000_82580: - adapter->caps.owner = THIS_MODULE; - strcpy(adapter->caps.name, "igb-82580"); - adapter->caps.max_adj = 62499999; - adapter->caps.n_ext_ts = 0; - adapter->caps.pps = 0; - adapter->caps.adjfreq = ptp_82580_adjfreq; - adapter->caps.adjtime = igb_adjtime; - adapter->caps.gettime = igb_gettime; - adapter->caps.settime = igb_settime; - adapter->caps.enable = ptp_82580_enable; - adapter->cc.read = igb_82580_systim_read; - adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); - adapter->cc.mult = 1; - adapter->cc.shift = 0; + case e1000_i350: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; + adapter->ptp_caps.gettime = igb_ptp_gettime_82576; + adapter->ptp_caps.settime = igb_ptp_settime_82576; + adapter->ptp_caps.enable = igb_ptp_enable; + adapter->cc.read = igb_ptp_read_82580; + adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580); + adapter->cc.mult = 1; + adapter->cc.shift = 0; /* Enable the timer functions by clearing bit 31. */ wr32(E1000_TSAUXC, 0x0); break; - - case e1000_82576: - adapter->caps.owner = THIS_MODULE; - strcpy(adapter->caps.name, "igb-82576"); - adapter->caps.max_adj = 1000000000; - adapter->caps.n_ext_ts = 0; - adapter->caps.pps = 0; - adapter->caps.adjfreq = ptp_82576_adjfreq; - adapter->caps.adjtime = igb_adjtime; - adapter->caps.gettime = igb_gettime; - adapter->caps.settime = igb_settime; - adapter->caps.enable = ptp_82576_enable; - adapter->cc.read = igb_82576_systim_read; - adapter->cc.mask = CLOCKSOURCE_MASK(64); - adapter->cc.mult = 1; - adapter->cc.shift = IGB_82576_TSYNC_SHIFT; - /* Dial the nominal frequency. */ - wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + case e1000_i210: + case e1000_i211: + snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); + adapter->ptp_caps.owner = THIS_MODULE; + adapter->ptp_caps.max_adj = 62499999; + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.pps = 0; + adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; + adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; + adapter->ptp_caps.gettime = igb_ptp_gettime_i210; + adapter->ptp_caps.settime = igb_ptp_settime_i210; + adapter->ptp_caps.enable = igb_ptp_enable; + /* Enable the timer functions by clearing bit 31. */ + wr32(E1000_TSAUXC, 0x0); break; - default: adapter->ptp_clock = NULL; return; @@ -310,86 +727,114 @@ void igb_ptp_init(struct igb_adapter *adapter) wrfl(); - timecounter_init(&adapter->tc, &adapter->cc, - ktime_to_ns(ktime_get_real())); + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + /* Initialize the clock and overflow work for devices that need it. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec ts = ktime_to_timespec(ktime_get_real()); - INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check); + igb_ptp_settime_i210(&adapter->ptp_caps, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); - spin_lock_init(&adapter->tmreg_lock); + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); - schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD); + schedule_delayed_work(&adapter->ptp_overflow_work, + IGB_SYSTIM_OVERFLOW_PERIOD); + } + + /* Initialize the time sync interrupts for devices that support it. */ + if (hw->mac.type >= e1000_82580) { + wr32(E1000_TSIM, E1000_TSIM_TXTS); + wr32(E1000_IMS, E1000_IMS_TS); + } - adapter->ptp_clock = ptp_clock_register(&adapter->caps); + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n"); - } else + } else { dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); + adapter->flags |= IGB_FLAG_PTP; + } } -void igb_ptp_remove(struct igb_adapter *adapter) +/** + * igb_ptp_stop - Disable PTP device and stop the overflow check. + * @adapter: Board private structure. + * + * This function stops the PTP support and cancels the delayed work. + **/ +void igb_ptp_stop(struct igb_adapter *adapter) { switch (adapter->hw.mac.type) { - case e1000_i211: - case e1000_i210: - case e1000_i350: - case e1000_82580: case e1000_82576: - cancel_delayed_work_sync(&adapter->overflow_work); + case e1000_82580: + case e1000_i350: + cancel_delayed_work_sync(&adapter->ptp_overflow_work); + break; + case e1000_i210: + case e1000_i211: + /* No delayed work to cancel. */ break; default: return; } + cancel_work_sync(&adapter->ptp_tx_work); + if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); dev_info(&adapter->pdev->dev, "removed PHC on %s\n", adapter->netdev->name); + adapter->flags &= ~IGB_FLAG_PTP; } } /** - * igb_systim_to_hwtstamp - convert system time value to hw timestamp - * @adapter: board private structure - * @hwtstamps: timestamp structure to update - * @systim: unsigned 64bit system time value. - * - * We need to convert the system time value stored in the RX/TXSTMP registers - * into a hwtstamp which can be used by the upper level timestamping functions. + * igb_ptp_reset - Re-enable the adapter for PTP following a reset. + * @adapter: Board private structure. * - * The 'tmreg_lock' spinlock is used to protect the consistency of the - * system time value. This is needed because reading the 64 bit time - * value involves reading two (or three) 32 bit registers. The first - * read latches the value. Ditto for writing. - * - * In addition, here have extended the system time with an overflow - * counter in software. + * This function handles the reset work required to re-enable the PTP device. **/ -void igb_systim_to_hwtstamp(struct igb_adapter *adapter, - struct skb_shared_hwtstamps *hwtstamps, - u64 systim) +void igb_ptp_reset(struct igb_adapter *adapter) { - u64 ns; - unsigned long flags; + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags & IGB_FLAG_PTP)) + return; switch (adapter->hw.mac.type) { + case e1000_82576: + /* Dial the nominal frequency. */ + wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576); + break; + case e1000_82580: + case e1000_i350: case e1000_i210: case e1000_i211: - case e1000_i350: - case e1000_82580: - case e1000_82576: + /* Enable the timer functions and interrupts. */ + wr32(E1000_TSAUXC, 0x0); + wr32(E1000_TSIM, E1000_TSIM_TXTS); + wr32(E1000_IMS, E1000_IMS_TS); break; default: + /* No work to do. */ return; } - spin_lock_irqsave(&adapter->tmreg_lock, flags); + /* Re-initialize the timer. */ + if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { + struct timespec ts = ktime_to_timespec(ktime_get_real()); - ns = timecounter_cyc2time(&adapter->tc, systim); - - spin_unlock_irqrestore(&adapter->tmreg_lock, flags); - - memset(hwtstamps, 0, sizeof(*hwtstamps)); - hwtstamps->hwtstamp = ns_to_ktime(ns); + igb_ptp_settime_i210(&adapter->ptp_caps, &ts); + } else { + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + } } diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 5fd5d04c26c9..89f40e51fc13 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -32,7 +32,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o -ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ +ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index b9623e9ea895..5bd26763554c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -78,6 +78,9 @@ /* Supported Rx Buffer Sizes */ #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +#define IXGBE_RXBUFFER_2K 2048 +#define IXGBE_RXBUFFER_3K 3072 +#define IXGBE_RXBUFFER_4K 4096 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ /* @@ -104,6 +107,7 @@ #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) #define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8) +#define IXGBE_TX_FLAGS_NO_IFCS (u32)(1 << 9) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 @@ -293,16 +297,25 @@ struct ixgbe_ring_feature { * this is twice the size of a half page we need to double the page order * for FCoE enabled Rx queues. */ -#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192) -static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) { - return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0; +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : + IXGBE_RXBUFFER_3K; +#endif + return IXGBE_RXBUFFER_2K; } -#else -#define ixgbe_rx_pg_order(_ring) 0 + +static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) +{ +#ifdef IXGBE_FCOE + if (test_bit(__IXGBE_RX_FCOE, &ring->state)) + return (PAGE_SIZE < 8192) ? 1 : 0; #endif + return 0; +} #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) -#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring)) struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ @@ -584,6 +597,9 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_HWMON struct hwmon_buff ixgbe_hwmon_buff; #endif /* CONFIG_IXGBE_HWMON */ +#ifdef CONFIG_DEBUG_FS + struct dentry *ixgbe_dbg_adapter; +#endif /*CONFIG_DEBUG_FS*/ }; struct ixgbe_fdir_filter { @@ -712,7 +728,12 @@ extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, struct netdev_fcoe_hbainfo *info); extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); #endif /* IXGBE_FCOE */ - +#ifdef CONFIG_DEBUG_FS +extern void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); +extern void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); +extern void ixgbe_dbg_init(void); +extern void ixgbe_dbg_exit(void); +#endif /* CONFIG_DEBUG_FS */ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) { return netdev_get_tx_queue(ring->netdev, ring->queue_index); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 50fc137501da..18bf08c9d7a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); - if (speed & IXGBE_LINK_SPEED_10GB_FULL) + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && (hw->phy.smart_speed_active == false)) autoc |= IXGBE_AUTOC_KR_SUPP; + } if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c new file mode 100644 index 000000000000..8d3a21889099 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c @@ -0,0 +1,300 @@ +/******************************************************************************* + + Intel 10 Gigabit PCI Express Linux driver + Copyright(c) 1999 - 2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifdef CONFIG_DEBUG_FS + +#include <linux/debugfs.h> +#include <linux/module.h> + +#include "ixgbe.h" + +static struct dentry *ixgbe_dbg_root; + +static char ixgbe_dbg_reg_ops_buf[256] = ""; + +/** + * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened + * @inode: inode that was opened + * @filp: file info + * + * Stash the adapter pointer hiding in the inode into the file pointer where + * we can find it later in the read and write calls + **/ +static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + +/** + * ixgbe_dbg_reg_ops_read - read for reg_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + char buf[256]; + int bytes_not_copied; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + len = snprintf(buf, sizeof(buf), "%s: %s\n", + adapter->netdev->name, ixgbe_dbg_reg_ops_buf); + if (count < len) + return -ENOSPC; + bytes_not_copied = copy_to_user(buffer, buf, len); + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos = len; + return len; +} + +/** + * ixgbe_dbg_reg_ops_write - write into reg_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + int bytes_not_copied; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ixgbe_dbg_reg_ops_buf)) + return -ENOSPC; + + bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count); + if (bytes_not_copied < 0) + return bytes_not_copied; + else if (bytes_not_copied < count) + count -= bytes_not_copied; + else + return -ENOSPC; + ixgbe_dbg_reg_ops_buf[count] = '\0'; + + if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ixgbe_dbg_reg_ops_buf[5], "%x %x", ®, &value); + if (cnt == 2) { + IXGBE_WRITE_REG(&adapter->hw, reg, value); + value = IXGBE_READ_REG(&adapter->hw, reg); + e_dev_info("write: 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("write <reg> <value>\n"); + } + } else if (strncmp(ixgbe_dbg_reg_ops_buf, "read", 4) == 0) { + u32 reg, value; + int cnt; + cnt = sscanf(&ixgbe_dbg_reg_ops_buf[4], "%x", ®); + if (cnt == 1) { + value = IXGBE_READ_REG(&adapter->hw, reg); + e_dev_info("read 0x%08x = 0x%08x\n", reg, value); + } else { + e_dev_info("read <reg>\n"); + } + } else { + e_dev_info("Unknown command %s\n", ixgbe_dbg_reg_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" read <reg>\n"); + e_dev_info(" write <reg> <value>\n"); + } + return count; +} + +static const struct file_operations ixgbe_dbg_reg_ops_fops = { + .owner = THIS_MODULE, + .open = ixgbe_dbg_reg_ops_open, + .read = ixgbe_dbg_reg_ops_read, + .write = ixgbe_dbg_reg_ops_write, +}; + +static char ixgbe_dbg_netdev_ops_buf[256] = ""; + +/** + * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item + * @inode: inode that was opened + * @filp: file info + * + * Stash the adapter pointer hiding in the inode into the file pointer + * where we can find it later in the read and write calls + **/ +static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp) +{ + filp->private_data = inode->i_private; + return 0; +} + +/** + * ixgbe_dbg_netdev_ops_read - read for netdev_ops datum + * @filp: the opened file + * @buffer: where to write the data for the user to read + * @count: the size of the user's buffer + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp, + char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + char buf[256]; + int bytes_not_copied; + int len; + + /* don't allow partial reads */ + if (*ppos != 0) + return 0; + + len = snprintf(buf, sizeof(buf), "%s: %s\n", + adapter->netdev->name, ixgbe_dbg_netdev_ops_buf); + if (count < len) + return -ENOSPC; + bytes_not_copied = copy_to_user(buffer, buf, len); + if (bytes_not_copied < 0) + return bytes_not_copied; + + *ppos = len; + return len; +} + +/** + * ixgbe_dbg_netdev_ops_write - write into netdev_ops datum + * @filp: the opened file + * @buffer: where to find the user's data + * @count: the length of the user's data + * @ppos: file position offset + **/ +static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct ixgbe_adapter *adapter = filp->private_data; + int bytes_not_copied; + + /* don't allow partial writes */ + if (*ppos != 0) + return 0; + if (count >= sizeof(ixgbe_dbg_netdev_ops_buf)) + return -ENOSPC; + + bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf, + buffer, count); + if (bytes_not_copied < 0) + return bytes_not_copied; + else if (bytes_not_copied < count) + count -= bytes_not_copied; + else + return -ENOSPC; + ixgbe_dbg_netdev_ops_buf[count] = '\0'; + + if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); + e_dev_info("Available commands:\n"); + e_dev_info(" tx_timeout\n"); + } + return count; +} + +static const struct file_operations ixgbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = ixgbe_dbg_netdev_ops_open, + .read = ixgbe_dbg_netdev_ops_read, + .write = ixgbe_dbg_netdev_ops_write, +}; + +/** + * ixgbe_dbg_adapter_init - setup the debugfs directory for the adapter + * @adapter: the adapter that is starting up + **/ +void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) +{ + const char *name = pci_name(adapter->pdev); + struct dentry *pfile; + adapter->ixgbe_dbg_adapter = debugfs_create_dir(name, ixgbe_dbg_root); + if (adapter->ixgbe_dbg_adapter) { + pfile = debugfs_create_file("reg_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_reg_ops_fops); + if (!pfile) + e_dev_err("debugfs reg_ops for %s failed\n", name); + pfile = debugfs_create_file("netdev_ops", 0600, + adapter->ixgbe_dbg_adapter, adapter, + &ixgbe_dbg_netdev_ops_fops); + if (!pfile) + e_dev_err("debugfs netdev_ops for %s failed\n", name); + } else { + e_dev_err("debugfs entry for %s failed\n", name); + } +} + +/** + * ixgbe_dbg_adapter_exit - clear out the adapter's debugfs entries + * @pf: the pf that is stopping + **/ +void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) +{ + if (adapter->ixgbe_dbg_adapter) + debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); + adapter->ixgbe_dbg_adapter = NULL; +} + +/** + * ixgbe_dbg_init - start up debugfs for the driver + **/ +void ixgbe_dbg_init(void) +{ + ixgbe_dbg_root = debugfs_create_dir(ixgbe_driver_name, NULL); + if (ixgbe_dbg_root == NULL) + pr_err("init of debugfs failed\n"); +} + +/** + * ixgbe_dbg_exit - clean out the driver's debugfs entries + **/ +void ixgbe_dbg_exit(void) +{ + debugfs_remove_recursive(ixgbe_dbg_root); +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 4326f74f7137..29465be2a14a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1167,7 +1167,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, } bi->dma = dma; - bi->page_offset ^= ixgbe_rx_bufsz(rx_ring); + bi->page_offset = 0; return true; } @@ -1320,29 +1320,6 @@ static unsigned int ixgbe_get_headlen(unsigned char *data, return max_len; } -static void ixgbe_get_rsc_cnt(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - __le32 rsc_enabled; - u32 rsc_cnt; - - if (!ring_is_rsc_enabled(rx_ring)) - return; - - rsc_enabled = rx_desc->wb.lower.lo_dword.data & - cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); - - /* If this is an RSC frame rsc_cnt should be non-zero */ - if (!rsc_enabled) - return; - - rsc_cnt = le32_to_cpu(rsc_enabled); - rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; - - IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; -} - static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, struct sk_buff *skb) { @@ -1440,16 +1417,28 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, prefetch(IXGBE_RX_DESC(rx_ring, ntc)); - if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) - return false; + /* update RSC append count if present */ + if (ring_is_rsc_enabled(rx_ring)) { + __le32 rsc_enabled = rx_desc->wb.lower.lo_dword.data & + cpu_to_le32(IXGBE_RXDADV_RSCCNT_MASK); + + if (unlikely(rsc_enabled)) { + u32 rsc_cnt = le32_to_cpu(rsc_enabled); + + rsc_cnt >>= IXGBE_RXDADV_RSCCNT_SHIFT; + IXGBE_CB(skb)->append_cnt += rsc_cnt - 1; - /* append_cnt indicates packet is RSC, if so fetch nextp */ - if (IXGBE_CB(skb)->append_cnt) { - ntc = le32_to_cpu(rx_desc->wb.upper.status_error); - ntc &= IXGBE_RXDADV_NEXTP_MASK; - ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + /* update ntc based on RSC value */ + ntc = le32_to_cpu(rx_desc->wb.upper.status_error); + ntc &= IXGBE_RXDADV_NEXTP_MASK; + ntc >>= IXGBE_RXDADV_NEXTP_SHIFT; + } } + /* if we are the last buffer then there is nothing else to do */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + return false; + /* place skb in next buffer to be received */ rx_ring->rx_buffer_info[ntc].skb = skb; rx_ring->rx_stats.non_eop_descs++; @@ -1458,6 +1447,78 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, } /** + * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbe specific version of __pskb_pull_tail. The + * main difference between this version and the original function is that + * this function can make several assumptions about the state of things + * that allow for significant optimizations versus the standard function. + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; + unsigned int pull_len; + + /* + * it is valid to use page_address instead of kmap since we are + * working with pages allocated out of the lomem pool per + * alloc_page(GFP_ATOMIC) + */ + va = skb_frag_address(frag); + + /* + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ + pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); + + /* update all of the pointers */ + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; +} + +/** + * ixgbe_dma_sync_frag - perform DMA sync for first frag of SKB + * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being updated + * + * This function provides a basic DMA sync up for the first fragment of an + * skb. The reason for doing this is that the first fragment cannot be + * unmapped until we have reached the end of packet descriptor for a buffer + * chain. + */ +static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) +{ + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { + dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, + ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); + IXGBE_CB(skb)->page_released = false; + } else { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + frag->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + IXGBE_CB(skb)->dma = 0; +} + +/** * ixgbe_cleanup_headers - Correct corrupted or empty headers * @rx_ring: rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor @@ -1479,24 +1540,7 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) { - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; struct net_device *netdev = rx_ring->netdev; - unsigned char *va; - unsigned int pull_len; - - /* if the page was released unmap it, else just sync our portion */ - if (unlikely(IXGBE_CB(skb)->page_released)) { - dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, - ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); - IXGBE_CB(skb)->page_released = false; - } else { - dma_sync_single_range_for_cpu(rx_ring->dev, - IXGBE_CB(skb)->dma, - frag->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - } - IXGBE_CB(skb)->dma = 0; /* verify that the packet does not have any known errors */ if (unlikely(ixgbe_test_staterr(rx_desc, @@ -1506,40 +1550,9 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, return true; } - /* - * it is valid to use page_address instead of kmap since we are - * working with pages allocated out of the lomem pool per - * alloc_page(GFP_ATOMIC) - */ - va = skb_frag_address(frag); - - /* - * we need the header to contain the greater of either ETH_HLEN or - * 60 bytes if the skb->len is less than 60 for skb_pad. - */ - pull_len = skb_frag_size(frag); - if (pull_len > IXGBE_RX_HDR_SIZE) - pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); - - /* update all of the pointers */ - skb_frag_size_sub(frag, pull_len); - frag->page_offset += pull_len; - skb->data_len -= pull_len; - skb->tail += pull_len; - - /* - * if we sucked the frag empty then we should free it, - * if there are other frags here something is screwed up in hardware - */ - if (skb_frag_size(frag) == 0) { - BUG_ON(skb_shinfo(skb)->nr_frags != 1); - skb_shinfo(skb)->nr_frags = 0; - __skb_frag_unref(frag); - skb->truesize -= ixgbe_rx_bufsz(rx_ring); - } + /* place header in linear portion of buffer */ + if (skb_is_nonlinear(skb)) + ixgbe_pull_tail(rx_ring, skb); #ifdef IXGBE_FCOE /* do not attempt to pad FCoE Frames as this will disrupt DDP */ @@ -1560,33 +1573,17 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, } /** - * ixgbe_can_reuse_page - determine if we can reuse a page - * @rx_buffer: pointer to rx_buffer containing the page we want to reuse - * - * Returns true if page can be reused in another Rx buffer - **/ -static inline bool ixgbe_can_reuse_page(struct ixgbe_rx_buffer *rx_buffer) -{ - struct page *page = rx_buffer->page; - - /* if we are only owner of page and it is local we can reuse it */ - return likely(page_count(page) == 1) && - likely(page_to_nid(page) == numa_node_id()); -} - -/** * ixgbe_reuse_rx_page - page flip buffer and store it back on the ring * @rx_ring: rx descriptor ring to store buffers on * @old_buff: donor buffer to have page reused * - * Syncronizes page for reuse by the adapter + * Synchronizes page for reuse by the adapter **/ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *old_buff) { struct ixgbe_rx_buffer *new_buff; u16 nta = rx_ring->next_to_alloc; - u16 bufsz = ixgbe_rx_bufsz(rx_ring); new_buff = &rx_ring->rx_buffer_info[nta]; @@ -1597,17 +1594,13 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, /* transfer page from old buffer to new buffer */ new_buff->page = old_buff->page; new_buff->dma = old_buff->dma; - - /* flip page offset to other buffer and store to new_buff */ - new_buff->page_offset = old_buff->page_offset ^ bufsz; + new_buff->page_offset = old_buff->page_offset; /* sync the buffer for use by the device */ dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, - new_buff->page_offset, bufsz, + new_buff->page_offset, + ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); - - /* bump ref count on page before it is given to the stack */ - get_page(new_buff->page); } /** @@ -1617,20 +1610,159 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, * @rx_desc: descriptor containing length of buffer written by hardware * @skb: sk_buff to place the data into * - * This function is based on skb_add_rx_frag. I would have used that - * function however it doesn't handle the truesize case correctly since we - * are allocating more memory than might be used for a single receive. + * This function will add the data contained in rx_buffer->page to the skb. + * This is done either through a direct copy if the data in the buffer is + * less than the skb header size, otherwise it will just attach the page as + * a frag to the skb. + * + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. **/ -static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, +static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, struct ixgbe_rx_buffer *rx_buffer, - struct sk_buff *skb, int size) + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_buffer->page, rx_buffer->page_offset, - size); - skb->len += size; - skb->data_len += size; - skb->truesize += ixgbe_rx_bufsz(rx_ring); + struct page *page = rx_buffer->page; + unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); +#if (PAGE_SIZE < 8192) + unsigned int truesize = ixgbe_rx_bufsz(rx_ring); +#else + unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); + unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - + ixgbe_rx_bufsz(rx_ring); +#endif + + if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { + unsigned char *va = page_address(page) + rx_buffer->page_offset; + + memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); + + /* we can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(page) == numa_node_id())) + return true; + + /* this page cannot be reused so discard it */ + put_page(page); + return false; + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + rx_buffer->page_offset, size, truesize); + + /* avoid re-using remote pages */ + if (unlikely(page_to_nid(page) != numa_node_id())) + return false; + +#if (PAGE_SIZE < 8192) + /* if we are only owner of page we can reuse it */ + if (unlikely(page_count(page) != 1)) + return false; + + /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; + + /* + * since we are the only owner of the page and we need to + * increment it, just set the value to 2 in order to avoid + * an unecessary locked operation + */ + atomic_set(&page->_count, 2); +#else + /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; + + if (rx_buffer->page_offset > last_offset) + return false; + + /* bump ref count on page before it is given to the stack */ + get_page(page); +#endif + + return true; +} + +static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc) +{ + struct ixgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; + struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; + page = rx_buffer->page; + prefetchw(page); + + skb = rx_buffer->skb; + + if (likely(!skb)) { + void *page_addr = page_address(page) + + rx_buffer->page_offset; + + /* prefetch first cache line of first page */ + prefetch(page_addr); +#if L1_CACHE_BYTES < 128 + prefetch(page_addr + L1_CACHE_BYTES); +#endif + + /* allocate a skb to store the frags */ + skb = netdev_alloc_skb_ip_align(rx_ring->netdev, + IXGBE_RX_HDR_SIZE); + if (unlikely(!skb)) { + rx_ring->rx_stats.alloc_rx_buff_failed++; + return NULL; + } + + /* + * we will be copying header into skb->data in + * pskb_may_pull so it is in our interest to prefetch + * it now to avoid a possible cache miss + */ + prefetchw(skb->data); + + /* + * Delay unmapping of the first packet. It carries the + * header information, HW may still access the header + * after the writeback. Only unmap it when EOP is + * reached + */ + if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) + goto dma_sync; + + IXGBE_CB(skb)->dma = rx_buffer->dma; + } else { + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) + ixgbe_dma_sync_frag(rx_ring, skb); + +dma_sync: + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_buffer->dma, + rx_buffer->page_offset, + ixgbe_rx_bufsz(rx_ring), + DMA_FROM_DEVICE); + } + + /* pull page into skb */ + if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { + /* hand second half of page back to the ring */ + ixgbe_reuse_rx_page(rx_ring, rx_buffer); + } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { + /* the page has been released from the ring */ + IXGBE_CB(skb)->page_released = true; + } else { + /* we are not reusing the buffer so unmap it */ + dma_unmap_page(rx_ring->dev, rx_buffer->dma, + ixgbe_rx_pg_size(rx_ring), + DMA_FROM_DEVICE); + } + + /* clear contents of buffer_info */ + rx_buffer->skb = NULL; + rx_buffer->dma = 0; + rx_buffer->page = NULL; + + return skb; } /** @@ -1653,16 +1785,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int total_rx_bytes = 0, total_rx_packets = 0; #ifdef IXGBE_FCOE struct ixgbe_adapter *adapter = q_vector->adapter; - int ddp_bytes = 0; + int ddp_bytes; + unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); do { - struct ixgbe_rx_buffer *rx_buffer; union ixgbe_adv_rx_desc *rx_desc; struct sk_buff *skb; - struct page *page; - u16 ntc; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { @@ -1670,9 +1800,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, cleaned_count = 0; } - ntc = rx_ring->next_to_clean; - rx_desc = IXGBE_RX_DESC(rx_ring, ntc); - rx_buffer = &rx_ring->rx_buffer_info[ntc]; + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) break; @@ -1684,75 +1812,12 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ rmb(); - page = rx_buffer->page; - prefetchw(page); - - skb = rx_buffer->skb; - - if (likely(!skb)) { - void *page_addr = page_address(page) + - rx_buffer->page_offset; - - /* prefetch first cache line of first page */ - prefetch(page_addr); -#if L1_CACHE_BYTES < 128 - prefetch(page_addr + L1_CACHE_BYTES); -#endif + /* retrieve a buffer from the ring */ + skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); - /* allocate a skb to store the frags */ - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - IXGBE_RX_HDR_SIZE); - if (unlikely(!skb)) { - rx_ring->rx_stats.alloc_rx_buff_failed++; - break; - } - - /* - * we will be copying header into skb->data in - * pskb_may_pull so it is in our interest to prefetch - * it now to avoid a possible cache miss - */ - prefetchw(skb->data); - - /* - * Delay unmapping of the first packet. It carries the - * header information, HW may still access the header - * after the writeback. Only unmap it when EOP is - * reached - */ - IXGBE_CB(skb)->dma = rx_buffer->dma; - } else { - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - ixgbe_rx_bufsz(rx_ring), - DMA_FROM_DEVICE); - } - - /* pull page into skb */ - ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, - le16_to_cpu(rx_desc->wb.upper.length)); - - if (ixgbe_can_reuse_page(rx_buffer)) { - /* hand second half of page back to the ring */ - ixgbe_reuse_rx_page(rx_ring, rx_buffer); - } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { - /* the page has been released from the ring */ - IXGBE_CB(skb)->page_released = true; - } else { - /* we are not reusing the buffer so unmap it */ - dma_unmap_page(rx_ring->dev, rx_buffer->dma, - ixgbe_rx_pg_size(rx_ring), - DMA_FROM_DEVICE); - } - - /* clear contents of buffer_info */ - rx_buffer->skb = NULL; - rx_buffer->dma = 0; - rx_buffer->page = NULL; - - ixgbe_get_rsc_cnt(rx_ring, rx_desc, skb); + /* exit if we failed to retrieve a buffer */ + if (!skb) + break; cleaned_count++; @@ -1775,6 +1840,20 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* if ddp, not passing to ULD unless for FCP_RSP or error */ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { + mss = rx_ring->netdev->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); + if (mss > 512) + mss &= ~511; + } + total_rx_bytes += ddp_bytes; + total_rx_packets += DIV_ROUND_UP(ddp_bytes, + mss); + } if (!ddp_bytes) { dev_kfree_skb_any(skb); continue; @@ -1788,21 +1867,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, budget--; } while (likely(budget)); -#ifdef IXGBE_FCOE - /* include DDPed FCoE data */ - if (ddp_bytes > 0) { - unsigned int mss; - - mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) - - sizeof(struct fc_frame_header) - - sizeof(struct fcoe_crc_eof); - if (mss > 512) - mss &= ~511; - total_rx_bytes += ddp_bytes; - total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss); - } - -#endif /* IXGBE_FCOE */ u64_stats_update_begin(&rx_ring->syncp); rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; @@ -2868,11 +2932,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; /* configure the packet buffer length */ -#if PAGE_SIZE > IXGBE_MAX_RXBUFFER - srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#else srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; -#endif /* configure descriptor type */ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; @@ -2980,13 +3040,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, * total size of max desc * buf_len is not greater * than 65536 */ -#if (PAGE_SIZE <= 8192) rscctrl |= IXGBE_RSCCTL_MAXDESC_16; -#elif (PAGE_SIZE <= 16384) - rscctrl |= IXGBE_RSCCTL_MAXDESC_8; -#else - rscctrl |= IXGBE_RSCCTL_MAXDESC_4; -#endif IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); } @@ -3606,8 +3660,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82598EB) netif_set_gso_max_size(adapter->netdev, 32768); - hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); - #ifdef IXGBE_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); @@ -3807,6 +3859,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) #ifdef CONFIG_IXGBE_DCB ixgbe_configure_dcb(adapter); #endif + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated + */ + ixgbe_configure_virtualization(adapter); ixgbe_set_rx_mode(adapter->netdev); ixgbe_restore_vlan(adapter); @@ -3838,8 +3895,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) break; } - ixgbe_configure_virtualization(adapter); - #ifdef IXGBE_FCOE /* configure FCoE L2 filters, redirection table, and Rx control */ ixgbe_configure_fcoe(adapter); @@ -4130,27 +4185,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) } /** - * ixgbe_init_rx_page_offset - initialize page offset values for Rx buffers - * @rx_ring: ring to setup - * - * On many IA platforms the L1 cache has a critical stride of 4K, this - * results in each receive buffer starting in the same cache set. To help - * reduce the pressure on this cache set we can interleave the offsets so - * that only every other buffer will be in the same cache set. - **/ -static void ixgbe_init_rx_page_offset(struct ixgbe_ring *rx_ring) -{ - struct ixgbe_rx_buffer *rx_buffer = rx_ring->rx_buffer_info; - u16 i; - - for (i = 0; i < rx_ring->count; i += 2) { - rx_buffer[0].page_offset = 0; - rx_buffer[1].page_offset = ixgbe_rx_bufsz(rx_ring); - rx_buffer = &rx_buffer[2]; - } -} - -/** * ixgbe_clean_rx_ring - Free Rx Buffers per Queue * @rx_ring: ring to free buffers from **/ @@ -4195,8 +4229,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; memset(rx_ring->rx_buffer_info, 0, size); - ixgbe_init_rx_page_offset(rx_ring); - /* Zero out the descriptor ring */ memset(rx_ring->desc, 0, rx_ring->size); @@ -4646,8 +4678,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; - ixgbe_init_rx_page_offset(rx_ring); - return 0; err: vfree(rx_ring->rx_buffer_info); @@ -5530,8 +5560,9 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) { u32 ssvpc; - /* Do not perform spoof check for 82598 */ - if (adapter->hw.mac.type == ixgbe_mac_82598EB) + /* Do not perform spoof check for 82598 or if not in IOV mode */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB || + adapter->num_vfs == 0) return; ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC); @@ -5543,7 +5574,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) if (!ssvpc) return; - e_warn(drv, "%d Spoofed packets detected\n", ssvpc); + e_warn(drv, "%u Spoofed packets detected\n", ssvpc); } /** @@ -5874,9 +5905,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { - if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && - !(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) - return; + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN)) { + if (unlikely(skb->no_fcs)) + first->tx_flags |= IXGBE_TX_FLAGS_NO_IFCS; + if (!(first->tx_flags & IXGBE_TX_FLAGS_TXSW)) + return; + } } else { u8 l4_hdr = 0; switch (first->protocol) { @@ -5938,7 +5972,6 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) { /* set type for advanced descriptor with frame checksum insertion */ __le32 cmd_type = cpu_to_le32(IXGBE_ADVTXD_DTYP_DATA | - IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); /* set HW vlan bit if vlan is present */ @@ -5958,6 +5991,10 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags) #endif cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_TSE); + /* insert frame checksum */ + if (!(tx_flags & IXGBE_TX_FLAGS_NO_IFCS)) + cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS); + return cmd_type; } @@ -6063,8 +6100,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, if (likely(!data_len)) break; - if (unlikely(skb->no_fcs)) - cmd_type &= ~(cpu_to_le32(IXGBE_ADVTXD_DCMD_IFCS)); tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); i++; @@ -6856,7 +6891,7 @@ static int ixgbe_set_features(struct net_device *netdev, static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr, + const unsigned char *addr, u16 flags) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -6893,7 +6928,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr) + const unsigned char *addr) { struct ixgbe_adapter *adapter = netdev_priv(dev); int err = -EOPNOTSUPP; @@ -7136,11 +7171,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, goto err_ioremap; } - for (i = 1; i <= 5; i++) { - if (pci_resource_len(pdev, i) == 0) - continue; - } - netdev->netdev_ops = &ixgbe_netdev_ops; ixgbe_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; @@ -7419,6 +7449,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, e_err(probe, "failed to allocate sysfs resources\n"); #endif /* CONFIG_IXGBE_HWMON */ +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_adapter_init(adapter); +#endif /* CONFIG_DEBUG_FS */ + return 0; err_register: @@ -7453,6 +7487,10 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); struct net_device *netdev = adapter->netdev; +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_adapter_exit(adapter); +#endif /*CONFIG_DEBUG_FS */ + set_bit(__IXGBE_DOWN, &adapter->state); cancel_work_sync(&adapter->service_task); @@ -7708,6 +7746,10 @@ static int __init ixgbe_init_module(void) pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); pr_info("%s\n", ixgbe_copyright); +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_init(); +#endif /* CONFIG_DEBUG_FS */ + #ifdef CONFIG_IXGBE_DCA dca_register_notify(&dca_notifier); #endif @@ -7730,6 +7772,11 @@ static void __exit ixgbe_exit_module(void) dca_unregister_notify(&dca_notifier); #endif pci_unregister_driver(&ixgbe_driver); + +#ifdef CONFIG_DEBUG_FS + ixgbe_dbg_exit(); +#endif /* CONFIG_DEBUG_FS */ + rcu_barrier(); /* Wait for completion of call_rcu()'s */ } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 3456d5617143..39881cb17a4b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -960,7 +960,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) /* (Re)start the overflow check */ adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED; - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps); + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, + &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { adapter->ptp_clock = NULL; e_dev_err("ptp_clock_register failed\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 4fea8716ab64..dce48bf64d96 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -346,6 +346,10 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) { + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); } @@ -414,6 +418,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) VLAN_PRIO_SHIFT)), vf); ixgbe_set_vmolr(hw, vf, false); } else { + ixgbe_set_vf_vlan(adapter, true, 0, vf); ixgbe_set_vmvir(adapter, 0, vf); ixgbe_set_vmolr(hw, vf, true); } @@ -810,9 +815,9 @@ out: return err; } -static int ixgbe_link_mbps(int internal_link_speed) +static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) { - switch (internal_link_speed) { + switch (adapter->link_speed) { case IXGBE_LINK_SPEED_100_FULL: return 100; case IXGBE_LINK_SPEED_1GB_FULL: @@ -824,27 +829,30 @@ static int ixgbe_link_mbps(int internal_link_speed) } } -static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, - int link_speed) +static void ixgbe_set_vf_rate_limit(struct ixgbe_adapter *adapter, int vf) { - int rf_dec, rf_int; - u32 bcnrc_val; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_hw *hw = &adapter->hw; + u32 bcnrc_val = 0; + u16 queue, queues_per_pool; + u16 tx_rate = adapter->vfinfo[vf].tx_rate; + + if (tx_rate) { + /* start with base link speed value */ + bcnrc_val = adapter->vf_rate_link_speed; - if (tx_rate != 0) { /* Calculate the rate factor values to set */ - rf_int = link_speed / tx_rate; - rf_dec = (link_speed - (rf_int * tx_rate)); - rf_dec = (rf_dec * (1<<IXGBE_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; - - bcnrc_val = IXGBE_RTTBCNRC_RS_ENA; - bcnrc_val |= ((rf_int<<IXGBE_RTTBCNRC_RF_INT_SHIFT) & - IXGBE_RTTBCNRC_RF_INT_MASK); - bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK); - } else { - bcnrc_val = 0; + bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT; + bcnrc_val /= tx_rate; + + /* clear everything but the rate factor */ + bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK | + IXGBE_RTTBCNRC_RF_DEC_MASK; + + /* enable the rate scheduler */ + bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA; } - IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */ /* * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported @@ -861,53 +869,68 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, break; } - IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + /* determine how many queues per pool based on VMDq mask */ + queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); + + /* write value for all Tx queues belonging to VF */ + for (queue = 0; queue < queues_per_pool; queue++) { + unsigned int reg_idx = (vf * queues_per_pool) + queue; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, reg_idx); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val); + } } void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) { - int actual_link_speed, i; - bool reset_rate = false; + int i; /* VF Tx rate limit was not set */ - if (adapter->vf_rate_link_speed == 0) + if (!adapter->vf_rate_link_speed) return; - actual_link_speed = ixgbe_link_mbps(adapter->link_speed); - if (actual_link_speed != adapter->vf_rate_link_speed) { - reset_rate = true; + if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { adapter->vf_rate_link_speed = 0; dev_info(&adapter->pdev->dev, - "Link speed has been changed. VF Transmit rate " - "is disabled\n"); + "Link speed has been changed. VF Transmit rate is disabled\n"); } for (i = 0; i < adapter->num_vfs; i++) { - if (reset_rate) + if (!adapter->vf_rate_link_speed) adapter->vfinfo[i].tx_rate = 0; - ixgbe_set_vf_rate_limit(&adapter->hw, i, - adapter->vfinfo[i].tx_rate, - actual_link_speed); + ixgbe_set_vf_rate_limit(adapter, i); } } int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; - int actual_link_speed; + int link_speed; + + /* verify VF is active */ + if (vf >= adapter->num_vfs) + return -EINVAL; - actual_link_speed = ixgbe_link_mbps(adapter->link_speed); - if ((vf >= adapter->num_vfs) || (!adapter->link_up) || - (tx_rate > actual_link_speed) || (actual_link_speed != 10000) || - ((tx_rate != 0) && (tx_rate <= 10))) - /* rate limit cannot be set to 10Mb or less in 10Gb adapters */ + /* verify link is up */ + if (!adapter->link_up) return -EINVAL; - adapter->vf_rate_link_speed = actual_link_speed; - adapter->vfinfo[vf].tx_rate = (u16)tx_rate; - ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + /* verify we are linked at 10Gbps */ + link_speed = ixgbe_link_mbps(adapter); + if (link_speed != 10000) + return -EINVAL; + + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed))) + return -EINVAL; + + /* store values */ + adapter->vf_rate_link_speed = link_speed; + adapter->vfinfo[vf].tx_rate = tx_rate; + + /* update hardware configuration */ + ixgbe_set_vf_rate_limit(adapter, vf); return 0; } diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 418af827b230..da17ccf5c09d 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -272,5 +272,6 @@ struct ixgbe_adv_tx_context_desc { /* Error Codes */ #define IXGBE_ERR_INVALID_MAC_ADDR -1 #define IXGBE_ERR_RESET_FAILED -2 +#define IXGBE_ERR_INVALID_ARGUMENT -3 #endif /* _IXGBEVF_DEFINES_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 98cadb0c4dab..383b4e1cd175 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -101,7 +101,9 @@ struct ixgbevf_ring { /* Supported Rx Buffer Sizes */ #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ -#define IXGBEVF_RXBUFFER_2048 2048 +#define IXGBEVF_RXBUFFER_3K 3072 +#define IXGBEVF_RXBUFFER_7K 7168 +#define IXGBEVF_RXBUFFER_15K 15360 #define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */ #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 @@ -259,6 +261,11 @@ enum ixbgevf_state_t { __IXGBEVF_DOWN }; +struct ixgbevf_cb { + struct sk_buff *prev; +}; +#define IXGBE_CB(skb) ((struct ixgbevf_cb *)(skb)->cb) + enum ixgbevf_boards { board_82599_vf, board_X540_vf, diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 60ef64587412..cf372ee49d0c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -263,6 +263,8 @@ cont_loop: tx_ring->total_bytes += total_bytes; tx_ring->total_packets += total_packets; u64_stats_update_end(&tx_ring->syncp); + q_vector->tx.total_bytes += total_bytes; + q_vector->tx.total_packets += total_packets; return count < tx_ring->count; } @@ -272,12 +274,10 @@ cont_loop: * @q_vector: structure containing interrupt and ring information * @skb: packet to send up * @status: hardware indication of status of receive - * @rx_ring: rx descriptor ring (for a specific queue) to setup * @rx_desc: rx descriptor **/ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, struct sk_buff *skb, u8 status, - struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc) { struct ixgbevf_adapter *adapter = q_vector->adapter; @@ -433,11 +433,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, if (!(staterr & IXGBE_RXD_STAT_EOP)) { skb->next = next_buffer->skb; - skb->next->prev = skb; + IXGBE_CB(skb->next)->prev = skb; adapter->non_eop_descs++; goto next_desc; } + /* we should not be chaining buffers, if we did drop the skb */ + if (IXGBE_CB(skb)->prev) { + do { + struct sk_buff *this = skb; + skb = IXGBE_CB(skb)->prev; + dev_kfree_skb(this); + } while (skb); + goto next_desc; + } + /* ERR_MASK will only have valid bits if EOP set */ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { dev_kfree_skb_irq(skb); @@ -461,7 +471,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, } skb->protocol = eth_type_trans(skb, rx_ring->netdev); - ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); + ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc); next_desc: rx_desc->wb.upper.status_error = 0; @@ -490,6 +500,8 @@ next_desc: rx_ring->total_packets += total_rx_packets; rx_ring->total_bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; return !!budget; } @@ -716,40 +728,15 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector) } } -static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) +static irqreturn_t ixgbevf_msix_other(int irq, void *data) { struct ixgbevf_adapter *adapter = data; struct ixgbe_hw *hw = &adapter->hw; - u32 msg; - bool got_ack = false; - - if (!hw->mbx.ops.check_for_ack(hw)) - got_ack = true; - if (!hw->mbx.ops.check_for_msg(hw)) { - hw->mbx.ops.read(hw, &msg, 1); + hw->mac.get_link_status = 1; - if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + 1)); - - if (msg & IXGBE_VT_MSGTYPE_NACK) - pr_warn("Last Request of type %2.2x to PF Nacked\n", - msg & 0xFF); - /* - * Restore the PFSTS bit in case someone is polling for a - * return message from the PF - */ - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; - } - - /* - * checking for the ack clears the PFACK bit. Place - * it back in the v2p_mailbox cache so that anyone - * polling for an ack will not miss it - */ - if (got_ack) - hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; + if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); @@ -899,10 +886,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) } err = request_irq(adapter->msix_entries[vector].vector, - &ixgbevf_msix_mbx, 0, netdev->name, adapter); + &ixgbevf_msix_other, 0, netdev->name, adapter); if (err) { hw_dbg(&adapter->hw, - "request_irq for msix_mbx failed: %d\n", err); + "request_irq for msix_other failed: %d\n", err); goto free_queue_irqs; } @@ -1057,15 +1044,46 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; - if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) - srrctl |= IXGBEVF_RXBUFFER_2048 >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; - else - srrctl |= rx_ring->rx_buf_len >> - IXGBE_SRRCTL_BSIZEPKT_SHIFT; + srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> + IXGBE_SRRCTL_BSIZEPKT_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); } +static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + int i; + u16 rx_buf_len; + + /* notify the PF of our intent to use this size of frame */ + ixgbevf_rlpml_set_vf(hw, max_frame); + + /* PF will allow an extra 4 bytes past for vlan tagged frames */ + max_frame += VLAN_HLEN; + + /* + * Make best use of allocation by using all but 1K of a + * power of 2 allocation that will be used for skb->head. + */ + if ((hw->mac.type == ixgbe_mac_X540_vf) && + (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) + rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; + else if (max_frame <= IXGBEVF_RXBUFFER_3K) + rx_buf_len = IXGBEVF_RXBUFFER_3K; + else if (max_frame <= IXGBEVF_RXBUFFER_7K) + rx_buf_len = IXGBEVF_RXBUFFER_7K; + else if (max_frame <= IXGBEVF_RXBUFFER_15K) + rx_buf_len = IXGBEVF_RXBUFFER_15K; + else + rx_buf_len = IXGBEVF_MAX_RXBUFFER; + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].rx_buf_len = rx_buf_len; +} + /** * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset * @adapter: board private structure @@ -1076,18 +1094,14 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) { u64 rdba; struct ixgbe_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; - int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, j; u32 rdlen; - int rx_buf_len; /* PSRTYPE must be initialized in 82599 */ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); - if (netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; - else - rx_buf_len = ALIGN(max_frame, 1024); + + /* set_rx_buffer_len must be called before ring initialization */ + ixgbevf_set_rx_buffer_len(adapter); rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); /* Setup the HW Rx Head and Tail Descriptor Pointers and @@ -1103,7 +1117,6 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); adapter->rx_ring[i].head = IXGBE_VFRDH(j); adapter->rx_ring[i].tail = IXGBE_VFRDT(j); - adapter->rx_ring[i].rx_buf_len = rx_buf_len; ixgbevf_configure_srrctl(adapter, j); } @@ -1113,36 +1126,47 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + int err; + + if (!hw->mac.ops.set_vfta) + return -EOPNOTSUPP; spin_lock(&adapter->mbx_lock); /* add VID to filter table */ - if (hw->mac.ops.set_vfta) - hw->mac.ops.set_vfta(hw, vid, 0, true); + err = hw->mac.ops.set_vfta(hw, vid, 0, true); spin_unlock(&adapter->mbx_lock); + /* translate error return types so error makes sense */ + if (err == IXGBE_ERR_MBX) + return -EIO; + + if (err == IXGBE_ERR_INVALID_ARGUMENT) + return -EACCES; + set_bit(vid, adapter->active_vlans); - return 0; + return err; } static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + int err = -EOPNOTSUPP; spin_lock(&adapter->mbx_lock); /* remove VID from filter table */ if (hw->mac.ops.set_vfta) - hw->mac.ops.set_vfta(hw, vid, 0, false); + err = hw->mac.ops.set_vfta(hw, vid, 0, false); spin_unlock(&adapter->mbx_lock); clear_bit(vid, adapter->active_vlans); - return 0; + return err; } static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) @@ -1308,6 +1332,25 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; } +static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int api[] = { ixgbe_mbox_api_10, + ixgbe_mbox_api_unknown }; + int err = 0, idx = 0; + + spin_lock(&adapter->mbx_lock); + + while (api[idx] != ixgbe_mbox_api_unknown) { + err = ixgbevf_negotiate_api_version(hw, api[idx]); + if (!err) + break; + idx++; + } + + spin_unlock(&adapter->mbx_lock); +} + static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -1315,7 +1358,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) int i, j = 0; int num_rx_rings = adapter->num_rx_queues; u32 txdctl, rxdctl; - u32 msg[2]; for (i = 0; i < adapter->num_tx_queues; i++) { j = adapter->tx_ring[i].reg_idx; @@ -1356,10 +1398,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); } - msg[0] = IXGBE_VF_SET_LPE; - msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; - hw->mbx.ops.write_posted(hw, msg, 2); - spin_unlock(&adapter->mbx_lock); clear_bit(__IXGBEVF_DOWN, &adapter->state); @@ -1371,6 +1409,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) ixgbevf_save_reset_stats(adapter); ixgbevf_init_last_counter_stats(adapter); + hw->mac.get_link_status = 1; mod_timer(&adapter->watchdog_timer, jiffies); } @@ -1378,6 +1417,8 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + ixgbevf_negotiate_api(adapter); + ixgbevf_configure(adapter); ixgbevf_up_complete(adapter); @@ -1419,7 +1460,7 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, rx_buffer_info->skb = NULL; do { struct sk_buff *this = skb; - skb = skb->prev; + skb = IXGBE_CB(skb)->prev; dev_kfree_skb(this); } while (skb); } @@ -1547,8 +1588,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) { - struct ixgbe_hw *hw = &adapter->hw; - WARN_ON(in_interrupt()); while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -1561,10 +1600,8 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) * watchdog task will continue to schedule reset tasks until * the PF is up and running. */ - if (!hw->mac.ops.reset_hw(hw)) { - ixgbevf_down(adapter); - ixgbevf_up(adapter); - } + ixgbevf_down(adapter); + ixgbevf_up(adapter); clear_bit(__IXGBEVF_RESETTING, &adapter->state); } @@ -1867,6 +1904,22 @@ err_set_interrupt: } /** + * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings + * @adapter: board private structure to clear interrupt scheme on + * + * We go through and clear interrupt specific resources and reset the structure + * to pre-load conditions + **/ +static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) +{ + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + + ixgbevf_free_q_vectors(adapter); + ixgbevf_reset_interrupt_capability(adapter); +} + +/** * ixgbevf_sw_init - Initialize general software structures * (struct ixgbevf_adapter) * @adapter: board private structure to initialize @@ -2351,6 +2404,8 @@ static int ixgbevf_open(struct net_device *netdev) } } + ixgbevf_negotiate_api(adapter); + /* allocate transmit descriptors */ err = ixgbevf_setup_all_tx_resources(adapter); if (err) @@ -2860,10 +2915,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); - struct ixgbe_hw *hw = &adapter->hw; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; - u32 msg[2]; if (adapter->hw.mac.type == ixgbe_mac_X540_vf) max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; @@ -2877,35 +2930,91 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - if (!netif_running(netdev)) { - msg[0] = IXGBE_VF_SET_LPE; - msg[1] = max_frame; - hw->mbx.ops.write_posted(hw, msg, 2); - } - if (netif_running(netdev)) ixgbevf_reinit_locked(adapter); return 0; } -static void ixgbevf_shutdown(struct pci_dev *pdev) +static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif netif_device_detach(netdev); if (netif_running(netdev)) { + rtnl_lock(); ixgbevf_down(adapter); ixgbevf_free_irq(adapter); ixgbevf_free_all_tx_resources(adapter); ixgbevf_free_all_rx_resources(adapter); + rtnl_unlock(); } - pci_save_state(pdev); + ixgbevf_clear_interrupt_scheme(adapter); +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; + +#endif pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int ixgbevf_resume(struct pci_dev *pdev) +{ + struct ixgbevf_adapter *adapter = pci_get_drvdata(pdev); + struct net_device *netdev = adapter->netdev; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + /* + * pci_restore_state clears dev->state_saved so call + * pci_save_state to restore it. + */ + pci_save_state(pdev); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + rtnl_lock(); + err = ixgbevf_init_interrupt_scheme(adapter); + rtnl_unlock(); + if (err) { + dev_err(&pdev->dev, "Cannot initialize interrupts\n"); + return err; + } + + ixgbevf_reset(adapter); + + if (netif_running(netdev)) { + err = ixgbevf_open(netdev); + if (err) + return err; + } + + netif_device_attach(netdev); + + return err; +} + +#endif /* CONFIG_PM */ +static void ixgbevf_shutdown(struct pci_dev *pdev) +{ + ixgbevf_suspend(pdev, PMSG_SUSPEND); } static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, @@ -2946,7 +3055,7 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } -static const struct net_device_ops ixgbe_netdev_ops = { +static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, .ndo_start_xmit = ixgbevf_xmit_frame, @@ -2962,7 +3071,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { static void ixgbevf_assign_netdev_ops(struct net_device *dev) { - dev->netdev_ops = &ixgbe_netdev_ops; + dev->netdev_ops = &ixgbevf_netdev_ops; ixgbevf_set_ethtool_ops(dev); dev->watchdog_timeo = 5 * HZ; } @@ -3131,6 +3240,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, return 0; err_register: + ixgbevf_clear_interrupt_scheme(adapter); err_sw_init: ixgbevf_reset_interrupt_capability(adapter); iounmap(hw->hw_addr); @@ -3168,6 +3278,7 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); iounmap(adapter->hw.hw_addr); @@ -3267,6 +3378,11 @@ static struct pci_driver ixgbevf_driver = { .id_table = ixgbevf_pci_tbl, .probe = ixgbevf_probe, .remove = __devexit_p(ixgbevf_remove), +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = ixgbevf_suspend, + .resume = ixgbevf_resume, +#endif .shutdown = ixgbevf_shutdown, .err_handler = &ixgbevf_err_handler }; diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c index 9c955900fe64..d5028ddf4b31 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.c +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c @@ -86,14 +86,17 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val = IXGBE_ERR_MBX; + s32 ret_val = -IXGBE_ERR_MBX; + + if (!mbx->ops.read) + goto out; ret_val = ixgbevf_poll_for_msg(hw); /* if ack received read message, otherwise we timed out */ if (!ret_val) ret_val = mbx->ops.read(hw, msg, size); - +out: return ret_val; } @@ -109,7 +112,11 @@ static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) { struct ixgbe_mbx_info *mbx = &hw->mbx; - s32 ret_val; + s32 ret_val = -IXGBE_ERR_MBX; + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; /* send msg */ ret_val = mbx->ops.write(hw, msg, size); @@ -117,7 +124,7 @@ static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size) /* if msg sent wait until we receive an ack */ if (!ret_val) ret_val = ixgbevf_poll_for_ack(hw); - +out: return ret_val; } diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index cf9131c5c115..946ce86f337f 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -76,12 +76,29 @@ /* bits 23:16 are used for exra info for certain messages */ #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) +/* definitions to support mailbox API version negotiation */ + +/* + * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ +enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ +}; + +/* mailbox API, legacy requests */ #define IXGBE_VF_RESET 0x01 /* VF requests reset */ #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ -#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ -#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ + +/* mailbox API, version 1.0 VF requests */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ +#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index ec89b86f7ca4..0c7447e6fcc8 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -79,6 +79,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) /* Call adapter stop to disable tx/rx and clear interrupts */ hw->mac.ops.stop_adapter(hw); + /* reset the api version */ + hw->api_version = ixgbe_mbox_api_10; + IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST); IXGBE_WRITE_FLUSH(hw); @@ -97,7 +100,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) msgbuf[0] = IXGBE_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); - msleep(10); + mdelay(10); /* set our "perm_addr" based on info provided by PF */ /* also set up the mc_filter_type which is piggy backed @@ -346,16 +349,32 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) { + struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; + s32 err; msgbuf[0] = IXGBE_VF_SET_VLAN; msgbuf[1] = vlan; /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; - ixgbevf_write_msg_read_ack(hw, msgbuf, 2); + err = mbx->ops.write_posted(hw, msgbuf, 2); + if (err) + goto mbx_err; - return 0; + err = mbx->ops.read_posted(hw, msgbuf, 2); + if (err) + goto mbx_err; + + /* remove extra bits from the message */ + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT); + + if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_ACK)) + err = IXGBE_ERR_INVALID_ARGUMENT; + +mbx_err: + return err; } /** @@ -389,20 +408,23 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, bool *link_up, bool autoneg_wait_to_complete) { + struct ixgbe_mbx_info *mbx = &hw->mbx; + struct ixgbe_mac_info *mac = &hw->mac; + s32 ret_val = 0; u32 links_reg; + u32 in_msg = 0; - if (!(hw->mbx.ops.check_for_rst(hw))) { - *link_up = false; - *speed = 0; - return -1; - } + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw) || !mbx->timeout) + mac->get_link_status = true; - links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!mac->get_link_status) + goto out; - if (links_reg & IXGBE_LINKS_UP) - *link_up = true; - else - *link_up = false; + /* if link status is down no point in checking to see if pf is up */ + links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); + if (!(links_reg & IXGBE_LINKS_UP)) + goto out; switch (links_reg & IXGBE_LINKS_SPEED_82599) { case IXGBE_LINKS_SPEED_10G_82599: @@ -416,7 +438,79 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, break; } - return 0; + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error */ + if (mbx->ops.read(hw, &in_msg, 1)) + goto out; + + if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { + /* msg is not CTS and is NACK we must have lost CTS status */ + if (in_msg & IXGBE_VT_MSGTYPE_NACK) + ret_val = -1; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -1; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = false; + +out: + *link_up = !mac->get_link_status; + return ret_val; +} + +/** + * ixgbevf_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size) +{ + u32 msgbuf[2]; + + msgbuf[0] = IXGBE_VF_SET_LPE; + msgbuf[1] = max_size; + ixgbevf_write_msg_read_ack(hw, msgbuf, 2); +} + +/** + * ixgbevf_negotiate_api_version - Negotiate supported API version + * @hw: pointer to the HW structure + * @api: integer containing requested API version + **/ +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api) +{ + int err; + u32 msg[3]; + + /* Negotiate the mailbox API version */ + msg[0] = IXGBE_VF_API_NEGOTIATE; + msg[1] = api; + msg[2] = 0; + err = hw->mbx.ops.write_posted(hw, msg, 3); + + if (!err) + err = hw->mbx.ops.read_posted(hw, msg, 3); + + if (!err) { + msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* Store value and return 0 on success */ + if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) { + hw->api_version = api; + return 0; + } + + err = IXGBE_ERR_INVALID_ARGUMENT; + } + + return err; } static const struct ixgbe_mac_operations ixgbevf_mac_ops = { diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 25c951daee5d..47f11a584d8c 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -137,6 +137,8 @@ struct ixgbe_hw { u8 revision_id; bool adapter_stopped; + + int api_version; }; struct ixgbevf_hw_stats { @@ -170,5 +172,7 @@ struct ixgbevf_info { const struct ixgbe_mac_operations *mac_ops; }; +void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); +int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); #endif /* __IXGBE_VF_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f32e70300770..5aba5ecdf1e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* If source MAC is equal to our own MAC and not performing * the selftest or flb disabled - drop the packet */ if (s_mac == priv->mac && - (!(dev->features & NETIF_F_LOOPBACK) || - !priv->validate_loopback)) + !((dev->features & NETIF_F_LOOPBACK) || + priv->validate_loopback)) goto next; /* diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 019d856b1334..10bba09c44ea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->cons = 0xffffffff; ring->last_nr_txbb = 1; ring->poll_cnt = 0; - ring->blocked = 0; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); @@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ring->cons += txbbs_skipped; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); - /* Wakeup Tx queue if this ring stopped it */ - if (unlikely(ring->blocked)) { - if ((u32) (ring->prod - ring->cons) <= - ring->size - HEADROOM - MAX_DESC_TXBBS) { - ring->blocked = 0; - netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; - } + /* + * Wakeup Tx queue if this stopped, and at least 1 packet + * was completed + */ + if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { + netif_tx_wake_queue(ring->tx_queue); + priv->port_stats.wake_queue++; } } @@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - ring->blocked = 1; priv->port_stats.queue_stopped++; return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 88b7b3e75ab1..daf417923661 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -358,13 +358,14 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, } int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, - u64 virt, int obj_size, int nobj, int reserved, + u64 virt, int obj_size, u32 nobj, int reserved, int use_lowmem, int use_coherent) { int obj_per_chunk; int num_icm; unsigned chunk_size; int i; + u64 size; obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; @@ -380,10 +381,12 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, table->coherent = use_coherent; mutex_init(&table->mutex); + size = (u64) nobj * obj_size; for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { chunk_size = MLX4_TABLE_CHUNK_SIZE; - if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size) - chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE); + if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) + chunk_size = PAGE_ALIGN(size - + i * MLX4_TABLE_CHUNK_SIZE); table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index 19e4efc0b342..a67744f53506 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -78,7 +78,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, int start, int end); int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, - u64 virt, int obj_size, int nobj, int reserved, + u64 virt, int obj_size, u32 nobj, int reserved, int use_lowmem, int use_coherent); void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 48d0e90194cb..2f816c6aed72 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev, "on this HCA, aborting.\n"); return -EINVAL; } - if (port_type[i] == MLX4_PORT_TYPE_ETH && - port_type[i + 1] == MLX4_PORT_TYPE_IB) - return -EINVAL; } } @@ -1237,13 +1234,13 @@ static int mlx4_init_hca(struct mlx4_dev *dev) mlx4_info(dev, "non-primary physical function, skipping.\n"); else mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); - goto unmap_bf; + return err; } err = mlx4_load_fw(dev); if (err) { mlx4_err(dev, "Failed to start FW, aborting.\n"); - goto unmap_bf; + return err; } mlx4_cfg.log_pg_sz_m = 1; @@ -1307,7 +1304,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_init_slave(dev); if (err) { mlx4_err(dev, "Failed to initialize slave\n"); - goto unmap_bf; + return err; } err = mlx4_slave_cap(dev); @@ -1327,7 +1324,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); - goto err_close; + goto unmap_bf; } priv->eq_table.inta_pin = adapter.inta_pin; @@ -1335,6 +1332,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev) return 0; +unmap_bf: + unmap_bf_area(dev); + err_close: mlx4_close_hca(dev); @@ -1347,8 +1347,6 @@ err_stop_fw: mlx4_UNMAP_FA(dev); mlx4_free_icm(dev, priv->fw.fw_icm, 0); } -unmap_bf: - unmap_bf_area(dev); return err; } @@ -1999,7 +1997,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } slave_start: - if (mlx4_cmd_init(dev)) { + err = mlx4_cmd_init(dev); + if (err) { mlx4_err(dev, "Failed to init command interface, aborting.\n"); goto err_sriov; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 4ec3835e1bc2..e151c21baf2b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -137,11 +137,11 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, return err; } -static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, +static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) { - struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num]; + struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1]; struct mlx4_promisc_qp *pqp; list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { @@ -182,7 +182,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, /* If the given qpn is also a promisc qp, * it should be inserted to duplicates list */ - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, port, steer, qpn); if (pqp) { dqp = kmalloc(sizeof *dqp, GFP_KERNEL); if (!dqp) { @@ -256,7 +256,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port, s_steer = &mlx4_priv(dev)->steer[port - 1]; - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, port, steer, qpn); if (!pqp) return 0; /* nothing to do */ @@ -302,7 +302,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, s_steer = &mlx4_priv(dev)->steer[port - 1]; /* if qp is not promisc, it cannot be duplicated */ - if (!get_promisc_qp(dev, 0, steer, qpn)) + if (!get_promisc_qp(dev, port, steer, qpn)) return false; /* The qp is promisc qp so it is a duplicate on this index @@ -352,7 +352,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, members_count = be32_to_cpu(mgm->members_count) & 0xffffff; for (i = 0; i < members_count; i++) { qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; - if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) { + if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { /* the qp is not promisc, the entry can't be removed */ goto out; } @@ -398,7 +398,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, mutex_lock(&priv->mcg_table.mutex); - if (get_promisc_qp(dev, 0, steer, qpn)) { + if (get_promisc_qp(dev, port, steer, qpn)) { err = 0; /* Noting to do, already exists */ goto out_mutex; } @@ -432,8 +432,10 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { /* Entry already exists, add to duplicates */ dqp = kmalloc(sizeof *dqp, GFP_KERNEL); - if (!dqp) + if (!dqp) { + err = -ENOMEM; goto out_mailbox; + } dqp->qpn = qpn; list_add_tail(&dqp->list, &entry->duplicates); found = true; @@ -501,7 +503,7 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, s_steer = &mlx4_priv(dev)->steer[port - 1]; mutex_lock(&priv->mcg_table.mutex); - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, port, steer, qpn); if (unlikely(!pqp)) { mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); /* nothing to do */ @@ -648,13 +650,6 @@ static int find_entry(struct mlx4_dev *dev, u8 port, return err; } -struct mlx4_net_trans_rule_hw_ctrl { - __be32 ctrl; - __be32 vf_vep_port; - __be32 qpn; - __be32 reserved; -}; - static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, struct mlx4_net_trans_rule_hw_ctrl *hw) { @@ -678,87 +673,18 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, hw->qpn = cpu_to_be32(ctrl->qpn); } -struct mlx4_net_trans_rule_hw_ib { - u8 size; - u8 rsvd1; - __be16 id; - u32 rsvd2; - __be32 qpn; - __be32 qpn_mask; - u8 dst_gid[16]; - u8 dst_gid_msk[16]; -} __packed; - -struct mlx4_net_trans_rule_hw_eth { - u8 size; - u8 rsvd; - __be16 id; - u8 rsvd1[6]; - u8 dst_mac[6]; - u16 rsvd2; - u8 dst_mac_msk[6]; - u16 rsvd3; - u8 src_mac[6]; - u16 rsvd4; - u8 src_mac_msk[6]; - u8 rsvd5; - u8 ether_type_enable; - __be16 ether_type; - __be16 vlan_id_msk; - __be16 vlan_id; -} __packed; - -struct mlx4_net_trans_rule_hw_tcp_udp { - u8 size; - u8 rsvd; - __be16 id; - __be16 rsvd1[3]; - __be16 dst_port; - __be16 rsvd2; - __be16 dst_port_msk; - __be16 rsvd3; - __be16 src_port; - __be16 rsvd4; - __be16 src_port_msk; -} __packed; - -struct mlx4_net_trans_rule_hw_ipv4 { - u8 size; - u8 rsvd; - __be16 id; - __be32 rsvd1; - __be32 dst_ip; - __be32 dst_ip_msk; - __be32 src_ip; - __be32 src_ip_msk; -} __packed; - -struct _rule_hw { - union { - struct { - u8 size; - u8 rsvd; - __be16 id; - }; - struct mlx4_net_trans_rule_hw_eth eth; - struct mlx4_net_trans_rule_hw_ib ib; - struct mlx4_net_trans_rule_hw_ipv4 ipv4; - struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; - }; +const u16 __sw_id_hw[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, + [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, + [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, + [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, + [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 }; static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, struct _rule_hw *rule_hw) { - static const u16 __sw_id_hw[] = { - [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, - [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, - [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, - [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, - [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, - [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 - }; - static const size_t __rule_hw_sz[] = { [MLX4_NET_TRANS_RULE_ID_ETH] = sizeof(struct mlx4_net_trans_rule_hw_eth), diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 59ebc0339638..dba69d98734a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -249,7 +249,7 @@ struct mlx4_bitmap { struct mlx4_buddy { unsigned long **bits; unsigned int *num_free; - int max_order; + u32 max_order; spinlock_t lock; }; @@ -258,7 +258,7 @@ struct mlx4_icm; struct mlx4_icm_table { u64 virt; int num_icm; - int num_obj; + u32 num_obj; int obj_size; int lowmem; int coherent; @@ -690,6 +690,82 @@ struct mlx4_steer { struct list_head steer_entries[MLX4_NUM_STEERS]; }; +struct mlx4_net_trans_rule_hw_ctrl { + __be32 ctrl; + __be32 vf_vep_port; + __be32 qpn; + __be32 reserved; +}; + +struct mlx4_net_trans_rule_hw_ib { + u8 size; + u8 rsvd1; + __be16 id; + u32 rsvd2; + __be32 qpn; + __be32 qpn_mask; + u8 dst_gid[16]; + u8 dst_gid_msk[16]; +} __packed; + +struct mlx4_net_trans_rule_hw_eth { + u8 size; + u8 rsvd; + __be16 id; + u8 rsvd1[6]; + u8 dst_mac[6]; + u16 rsvd2; + u8 dst_mac_msk[6]; + u16 rsvd3; + u8 src_mac[6]; + u16 rsvd4; + u8 src_mac_msk[6]; + u8 rsvd5; + u8 ether_type_enable; + __be16 ether_type; + __be16 vlan_id_msk; + __be16 vlan_id; +} __packed; + +struct mlx4_net_trans_rule_hw_tcp_udp { + u8 size; + u8 rsvd; + __be16 id; + __be16 rsvd1[3]; + __be16 dst_port; + __be16 rsvd2; + __be16 dst_port_msk; + __be16 rsvd3; + __be16 src_port; + __be16 rsvd4; + __be16 src_port_msk; +} __packed; + +struct mlx4_net_trans_rule_hw_ipv4 { + u8 size; + u8 rsvd; + __be16 id; + __be32 rsvd1; + __be32 dst_ip; + __be32 dst_ip_msk; + __be32 src_ip; + __be32 src_ip_msk; +} __packed; + +struct _rule_hw { + union { + struct { + u8 size; + u8 rsvd; + __be16 id; + }; + struct mlx4_net_trans_rule_hw_eth eth; + struct mlx4_net_trans_rule_hw_ib ib; + struct mlx4_net_trans_rule_hw_ipv4 ipv4; + struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; + }; +}; + struct mlx4_priv { struct mlx4_dev dev; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5f1ab105debc..9d27e42264e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -248,7 +248,6 @@ struct mlx4_en_tx_ring { u32 doorbell_qpn; void *buf; u16 poll_cnt; - int blocked; struct mlx4_en_tx_info *tx_info; u8 *bounce_buf; u32 last_nr_txbb; diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index af55b7ce5341..c202d3ad2a0e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -37,6 +37,7 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/kernel.h> +#include <linux/vmalloc.h> #include <linux/mlx4/cmd.h> @@ -120,7 +121,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) buddy->max_order = max_order; spin_lock_init(&buddy->lock); - buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), + buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); @@ -129,10 +130,12 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) for (i = 0; i <= buddy->max_order; ++i) { s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); - if (!buddy->bits[i]) - goto err_out_free; - bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); + buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); + if (!buddy->bits[i]) { + buddy->bits[i] = vzalloc(s * sizeof(long)); + if (!buddy->bits[i]) + goto err_out_free; + } } set_bit(0, buddy->bits[buddy->max_order]); @@ -142,7 +145,10 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) err_out_free: for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i])) + vfree(buddy->bits[i]); + else + kfree(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -156,7 +162,10 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + if (is_vmalloc_addr(buddy->bits[i])) + vfree(buddy->bits[i]); + else + kfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); @@ -668,7 +677,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) return err; err = mlx4_buddy_init(&mr_table->mtt_buddy, - ilog2(dev->caps.num_mtts / + ilog2((u32)dev->caps.num_mtts / (1 << log_mtts_per_seg))); if (err) goto err_buddy; @@ -678,7 +687,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)); if (priv->reserved_mtts < 0) { - mlx4_warn(dev, "MTT table of order %d is too small.\n", + mlx4_warn(dev, "MTT table of order %u is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; goto err_reserve_mtts; diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 9ee4725363d5..8e0c3cc2a1ec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -76,7 +76,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, u64 size; u64 start; int type; - int num; + u32 num; int log_num; }; @@ -105,7 +105,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, si_meminfo(&si); request->num_mtt = roundup_pow_of_two(max_t(unsigned, request->num_mtt, - min(1UL << 31, + min(1UL << (31 - log_mtts_per_seg), si.totalram >> (log_mtts_per_seg - 1)))); profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 94ceddd17ab2..293c9e820c49 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -42,6 +42,7 @@ #include <linux/mlx4/cmd.h> #include <linux/mlx4/qp.h> #include <linux/if_ether.h> +#include <linux/etherdevice.h> #include "mlx4.h" #include "fw.h" @@ -2776,18 +2777,133 @@ ex_put: return err; } +/* + * MAC validation for Flow Steering rules. + * VF can attach rules only with a mac address which is assigned to it. + */ +static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, + struct list_head *rlist) +{ + struct mac_res *res, *tmp; + __be64 be_mac; + + /* make sure it isn't multicast or broadcast mac*/ + if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && + !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { + list_for_each_entry_safe(res, tmp, rlist, list) { + be_mac = cpu_to_be64(res->mac << 16); + if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN)) + return 0; + } + pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", + eth_header->eth.dst_mac, slave); + return -EINVAL; + } + return 0; +} + +/* + * In case of missing eth header, append eth header with a MAC address + * assigned to the VF. + */ +static int add_eth_header(struct mlx4_dev *dev, int slave, + struct mlx4_cmd_mailbox *inbox, + struct list_head *rlist, int header_id) +{ + struct mac_res *res, *tmp; + u8 port; + struct mlx4_net_trans_rule_hw_ctrl *ctrl; + struct mlx4_net_trans_rule_hw_eth *eth_header; + struct mlx4_net_trans_rule_hw_ipv4 *ip_header; + struct mlx4_net_trans_rule_hw_tcp_udp *l4_header; + __be64 be_mac = 0; + __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); + + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + port = be32_to_cpu(ctrl->vf_vep_port) & 0xff; + eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); + + /* Clear a space in the inbox for eth header */ + switch (header_id) { + case MLX4_NET_TRANS_RULE_ID_IPV4: + ip_header = + (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); + memmove(ip_header, eth_header, + sizeof(*ip_header) + sizeof(*l4_header)); + break; + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) + (eth_header + 1); + memmove(l4_header, eth_header, sizeof(*l4_header)); + break; + default: + return -EINVAL; + } + list_for_each_entry_safe(res, tmp, rlist, list) { + if (port == res->port) { + be_mac = cpu_to_be64(res->mac << 16); + break; + } + } + if (!be_mac) { + pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", + port); + return -EINVAL; + } + + memset(eth_header, 0, sizeof(*eth_header)); + eth_header->size = sizeof(*eth_header) >> 2; + eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); + memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); + memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); + + return 0; + +} + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; int err; + struct mlx4_net_trans_rule_hw_ctrl *ctrl; + struct _rule_hw *rule_header; + int header_id; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) return -EOPNOTSUPP; + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + rule_header = (struct _rule_hw *)(ctrl + 1); + header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); + + switch (header_id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + if (validate_eth_header_mac(slave, rule_header, rlist)) + return -EINVAL; + break; + case MLX4_NET_TRANS_RULE_ID_IPV4: + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); + if (add_eth_header(dev, slave, inbox, rlist, header_id)) + return -EINVAL; + vhcr->in_modifier += + sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; + break; + default: + pr_err("Corrupted mailbox.\n"); + return -EINVAL; + } + err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, vhcr->in_modifier, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c index 802498293528..34ee09bae36e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/sense.c +++ b/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev, } /* - * Adjust port configuration: - * If port 1 sensed nothing and port 2 is IB, set both as IB - * If port 2 sensed nothing and port 1 is Eth, set both as Eth - */ - if (stype[0] == MLX4_PORT_TYPE_ETH) { - for (i = 1; i < dev->caps.num_ports; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; - } - if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { - for (i = 0; i < dev->caps.num_ports - 1; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; - } - - /* * If sensed nothing, remain in current configuration. */ for (i = 0; i < dev->caps.num_ports; i++) diff --git a/drivers/net/ethernet/mipsnet.c b/drivers/net/ethernet/mipsnet.c deleted file mode 100644 index db5285befe2a..000000000000 --- a/drivers/net/ethernet/mipsnet.c +++ /dev/null @@ -1,345 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ - -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/io.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/etherdevice.h> -#include <linux/platform_device.h> -#include <asm/mips-boards/simint.h> - -#define MIPSNET_VERSION "2007-11-17" - -/* - * Net status/control block as seen by sw in the core. - */ -struct mipsnet_regs { - /* - * Device info for probing, reads as MIPSNET%d where %d is some - * form of version. - */ - u64 devId; /*0x00 */ - - /* - * read only busy flag. - * Set and cleared by the Net Device to indicate that an rx or a tx - * is in progress. - */ - u32 busy; /*0x08 */ - - /* - * Set by the Net Device. - * The device will set it once data has been received. - * The value is the number of bytes that should be read from - * rxDataBuffer. The value will decrease till 0 until all the data - * from rxDataBuffer has been read. - */ - u32 rxDataCount; /*0x0c */ -#define MIPSNET_MAX_RXTX_DATACOUNT (1 << 16) - - /* - * Settable from the MIPS core, cleared by the Net Device. - * The core should set the number of bytes it wants to send, - * then it should write those bytes of data to txDataBuffer. - * The device will clear txDataCount has been processed (not - * necessarily sent). - */ - u32 txDataCount; /*0x10 */ - - /* - * Interrupt control - * - * Used to clear the interrupted generated by this dev. - * Write a 1 to clear the interrupt. (except bit31). - * - * Bit0 is set if it was a tx-done interrupt. - * Bit1 is set when new rx-data is available. - * Until this bit is cleared there will be no other RXs. - * - * Bit31 is used for testing, it clears after a read. - * Writing 1 to this bit will cause an interrupt to be generated. - * To clear the test interrupt, write 0 to this register. - */ - u32 interruptControl; /*0x14 */ -#define MIPSNET_INTCTL_TXDONE (1u << 0) -#define MIPSNET_INTCTL_RXDONE (1u << 1) -#define MIPSNET_INTCTL_TESTBIT (1u << 31) - - /* - * Readonly core-specific interrupt info for the device to signal - * the core. The meaning of the contents of this field might change. - */ - /* XXX: the whole memIntf interrupt scheme is messy: the device - * should have no control what so ever of what VPE/register set is - * being used. - * The MemIntf should only expose interrupt lines, and something in - * the config should be responsible for the line<->core/vpe bindings. - */ - u32 interruptInfo; /*0x18 */ - - /* - * This is where the received data is read out. - * There is more data to read until rxDataReady is 0. - * Only 1 byte at this regs offset is used. - */ - u32 rxDataBuffer; /*0x1c */ - - /* - * This is where the data to transmit is written. - * Data should be written for the amount specified in the - * txDataCount register. - * Only 1 byte at this regs offset is used. - */ - u32 txDataBuffer; /*0x20 */ -}; - -#define regaddr(dev, field) \ - (dev->base_addr + offsetof(struct mipsnet_regs, field)) - -static char mipsnet_string[] = "mipsnet"; - -/* - * Copy data from the MIPSNET rx data port - */ -static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata, - int len) -{ - for (; len > 0; len--, kdata++) - *kdata = inb(regaddr(dev, rxDataBuffer)); - - return inl(regaddr(dev, rxDataCount)); -} - -static inline void mipsnet_put_todevice(struct net_device *dev, - struct sk_buff *skb) -{ - int count_to_go = skb->len; - char *buf_ptr = skb->data; - - outl(skb->len, regaddr(dev, txDataCount)); - - for (; count_to_go; buf_ptr++, count_to_go--) - outb(*buf_ptr, regaddr(dev, txDataBuffer)); - - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; - - dev_kfree_skb(skb); -} - -static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) -{ - /* - * Only one packet at a time. Once TXDONE interrupt is serviced, the - * queue will be restarted. - */ - netif_stop_queue(dev); - mipsnet_put_todevice(dev, skb); - - return NETDEV_TX_OK; -} - -static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t len) -{ - struct sk_buff *skb; - - if (!len) - return len; - - skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); - if (!skb) { - dev->stats.rx_dropped++; - return -ENOMEM; - } - - skb_reserve(skb, NET_IP_ALIGN); - if (ioiocpy_frommipsnet(dev, skb_put(skb, len), len)) - return -EFAULT; - - skb->protocol = eth_type_trans(skb, dev); - skb->ip_summed = CHECKSUM_UNNECESSARY; - - netif_rx(skb); - - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - - return len; -} - -static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - u32 int_flags; - irqreturn_t ret = IRQ_NONE; - - if (irq != dev->irq) - goto out_badirq; - - /* TESTBIT is cleared on read. */ - int_flags = inl(regaddr(dev, interruptControl)); - if (int_flags & MIPSNET_INTCTL_TESTBIT) { - /* TESTBIT takes effect after a write with 0. */ - outl(0, regaddr(dev, interruptControl)); - ret = IRQ_HANDLED; - } else if (int_flags & MIPSNET_INTCTL_TXDONE) { - /* Only one packet at a time, we are done. */ - dev->stats.tx_packets++; - netif_wake_queue(dev); - outl(MIPSNET_INTCTL_TXDONE, - regaddr(dev, interruptControl)); - ret = IRQ_HANDLED; - } else if (int_flags & MIPSNET_INTCTL_RXDONE) { - mipsnet_get_fromdev(dev, inl(regaddr(dev, rxDataCount))); - outl(MIPSNET_INTCTL_RXDONE, regaddr(dev, interruptControl)); - ret = IRQ_HANDLED; - } - return ret; - -out_badirq: - printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", - dev->name, __func__, irq); - return ret; -} - -static int mipsnet_open(struct net_device *dev) -{ - int err; - - err = request_irq(dev->irq, mipsnet_interrupt, - IRQF_SHARED, dev->name, (void *) dev); - if (err) { - release_region(dev->base_addr, sizeof(struct mipsnet_regs)); - return err; - } - - netif_start_queue(dev); - - /* test interrupt handler */ - outl(MIPSNET_INTCTL_TESTBIT, regaddr(dev, interruptControl)); - - return 0; -} - -static int mipsnet_close(struct net_device *dev) -{ - netif_stop_queue(dev); - free_irq(dev->irq, dev); - return 0; -} - -static void mipsnet_set_mclist(struct net_device *dev) -{ -} - -static const struct net_device_ops mipsnet_netdev_ops = { - .ndo_open = mipsnet_open, - .ndo_stop = mipsnet_close, - .ndo_start_xmit = mipsnet_xmit, - .ndo_set_rx_mode = mipsnet_set_mclist, - .ndo_change_mtu = eth_change_mtu, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, -}; - -static int __devinit mipsnet_probe(struct platform_device *dev) -{ - struct net_device *netdev; - int err; - - netdev = alloc_etherdev(0); - if (!netdev) { - err = -ENOMEM; - goto out; - } - - platform_set_drvdata(dev, netdev); - - netdev->netdev_ops = &mipsnet_netdev_ops; - - /* - * TODO: probe for these or load them from PARAM - */ - netdev->base_addr = 0x4200; - netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 + - inl(regaddr(netdev, interruptInfo)); - - /* Get the io region now, get irq on open() */ - if (!request_region(netdev->base_addr, sizeof(struct mipsnet_regs), - "mipsnet")) { - err = -EBUSY; - goto out_free_netdev; - } - - /* - * Lacking any better mechanism to allocate a MAC address we use a - * random one ... - */ - eth_hw_addr_random(netdev); - - err = register_netdev(netdev); - if (err) { - printk(KERN_ERR "MIPSNet: failed to register netdev.\n"); - goto out_free_region; - } - - return 0; - -out_free_region: - release_region(netdev->base_addr, sizeof(struct mipsnet_regs)); - -out_free_netdev: - free_netdev(netdev); - -out: - return err; -} - -static int __devexit mipsnet_device_remove(struct platform_device *device) -{ - struct net_device *dev = platform_get_drvdata(device); - - unregister_netdev(dev); - release_region(dev->base_addr, sizeof(struct mipsnet_regs)); - free_netdev(dev); - platform_set_drvdata(device, NULL); - - return 0; -} - -static struct platform_driver mipsnet_driver = { - .driver = { - .name = mipsnet_string, - .owner = THIS_MODULE, - }, - .probe = mipsnet_probe, - .remove = __devexit_p(mipsnet_device_remove), -}; - -static int __init mipsnet_init_module(void) -{ - int err; - - printk(KERN_INFO "MIPSNet Ethernet driver. Version: %s. " - "(c)2005 MIPS Technologies, Inc.\n", MIPSNET_VERSION); - - err = platform_driver_register(&mipsnet_driver); - if (err) - printk(KERN_ERR "Driver registration failed\n"); - - return err; -} - -static void __exit mipsnet_exit_module(void) -{ - platform_driver_unregister(&mipsnet_driver); -} - -module_init(mipsnet_init_module); -module_exit(mipsnet_exit_module); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index f45def01a98e..876beceaf2d7 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -3409,7 +3409,7 @@ set_speed: pause_flags = 0; /* setup pause frame */ - if (np->duplex != 0) { + if (netif_running(dev) && (np->duplex != 0)) { if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM); @@ -4435,7 +4435,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void regs->version = FORCEDETH_REGS_VER; spin_lock_irq(&np->lock); - for (i = 0; i <= np->register_size/sizeof(u32); i++) + for (i = 0; i < np->register_size/sizeof(u32); i++) rbuf[i] = readl(base + i*sizeof(u32)); spin_unlock_irq(&np->lock); } @@ -5455,6 +5455,7 @@ static int nv_close(struct net_device *dev) netif_stop_queue(dev); spin_lock_irq(&np->lock); + nv_update_pause(dev, 0); /* otherwise stop_tx bricks NIC */ nv_stop_rxtx(dev); nv_txrx_reset(dev); @@ -5904,11 +5905,19 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i goto out_error; } + netif_carrier_off(dev); + + /* Some NICs freeze when TX pause is enabled while NIC is + * down, and this stays across warm reboots. The sequence + * below should be enough to recover from that state. + */ + nv_update_pause(dev, 0); + nv_start_tx(dev); + nv_stop_tx(dev); + if (id->driver_data & DEV_HAS_VLAN) nv_vlan_mode(dev, dev->features); - netif_carrier_off(dev); - dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 4069edab229e..53743f7a2ca9 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev) "phy-mode", NULL); if (mode && !strcmp(mode, "mii")) return PHY_INTERFACE_MODE_MII; - return PHY_INTERFACE_MODE_RMII; } - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT - return PHY_INTERFACE_MODE_MII; -#else return PHY_INTERFACE_MODE_RMII; -#endif } static bool use_iram_for_net(struct device *dev) { if (dev && dev->of_node) return of_property_read_bool(dev->of_node, "use-iram"); - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET - return true; -#else return false; -#endif } /* Receive Status information word */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b47d5b35024e..8b4e0a93f45e 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -77,7 +77,7 @@ static const int multicast_filter_limit = 32; #define MAX_READ_REQUEST_SHIFT 12 -#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ #define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ @@ -287,6 +287,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 46df3a04030c..24c2305d7948 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -8,7 +8,7 @@ config SH_ETH (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ - CPU_SUBTYPE_SH7757 || ARCH_R8A7740) + CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779) select CRC32 select NET_CORE select MII @@ -18,4 +18,4 @@ config SH_ETH Renesas SuperH Ethernet device driver. This driver supporting CPUs are: - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, - and R8A7740. + R8A7740 and R8A7779. diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index af0b867a6cf6..bad8f2eec9b4 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -78,7 +78,7 @@ static void sh_eth_select_mii(struct net_device *ndev) #endif /* There is CPU dependent code */ -#if defined(CONFIG_CPU_SUBTYPE_SH7724) +#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) #define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { @@ -93,13 +93,18 @@ static void sh_eth_set_duplex(struct net_device *ndev) static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned int bits = ECMR_RTM; + +#if defined(CONFIG_ARCH_R8A7779) + bits |= ECMR_ELB; +#endif switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); break; default: break; diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c index bb8c8222122b..4d15bf413bdc 100644 --- a/drivers/net/ethernet/seeq/sgiseeq.c +++ b/drivers/net/ethernet/seeq/sgiseeq.c @@ -751,6 +751,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev) sp->srings = sr; sp->rx_desc = sp->srings->rxvector; sp->tx_desc = sp->srings->txvector; + spin_lock_init(&sp->tx_lock); /* A couple calculations now, saves many cycles later. */ setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index fb3cbc27063c..25906c1d1b15 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -34,3 +34,10 @@ config SFC_SRIOV This enables support for the SFC9000 I/O Virtualization features, allowing accelerated network performance in virtualized environments. +config SFC_PTP + bool "Solarflare SFC9000-family PTP support" + depends on SFC && PTP_1588_CLOCK && !(SFC=y && PTP_1588_CLOCK=m) + default y + ---help--- + This enables support for the Precision Time Protocol (PTP) + on SFC9000-family NICs diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile index ea1f8db57318..e11f2ecf69d9 100644 --- a/drivers/net/ethernet/sfc/Makefile +++ b/drivers/net/ethernet/sfc/Makefile @@ -5,5 +5,6 @@ sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ mcdi.o mcdi_phy.o mcdi_mon.o sfc-$(CONFIG_SFC_MTD) += mtd.o sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o +sfc-$(CONFIG_SFC_PTP) += ptp.o obj-$(CONFIG_SFC) += sfc.o diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 65a8d49106a4..96bd980e828d 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx); #define EFX_ASSERT_RESET_SERIALISED(efx) \ do { \ - if ((efx->state == STATE_RUNNING) || \ + if ((efx->state == STATE_READY) || \ (efx->state == STATE_DISABLED)) \ ASSERT_RTNL(); \ } while (0) +static int efx_check_disabled(struct efx_nic *efx) +{ + if (efx->state == STATE_DISABLED) { + netif_err(efx, drv, efx->net_dev, + "device is disabled due to earlier errors\n"); + return -EIO; + } + return 0; +} + /************************************************************************** * * Event queue processing @@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx) efx->rx_buffer_order = get_order(efx->rx_buffer_len + sizeof(struct efx_rx_page_state)); + /* We must keep at least one descriptor in a TX ring empty. + * We could avoid this when the queue size does not exactly + * match the hardware ring size, but it's not that important. + * Therefore we stop the queue when one more skb might fill + * the ring completely. We wake it when half way back to + * empty. + */ + efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx); + efx->txq_wake_thresh = efx->txq_stop_thresh / 2; + /* Initialise the channels */ efx_for_each_channel(channel, efx) { efx_for_each_channel_tx_queue(tx_queue, channel) @@ -714,6 +734,7 @@ static void efx_remove_channel(struct efx_channel *channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_remove_tx_queue(tx_queue); efx_remove_eventq(channel); + channel->type->post_remove(channel); } static void efx_remove_channels(struct efx_nic *efx) @@ -730,7 +751,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; u32 old_rxq_entries, old_txq_entries; unsigned i, next_buffer_table = 0; - int rc = 0; + int rc; + + rc = efx_check_disabled(efx); + if (rc) + return rc; /* Not all channels should be reallocated. We must avoid * reallocating their buffer table entries. @@ -828,6 +853,7 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) static const struct efx_channel_type efx_default_channel_type = { .pre_probe = efx_channel_dummy_op_int, + .post_remove = efx_channel_dummy_op_void, .get_name = efx_get_channel_name, .copy = efx_copy_channel, .keep_eventq = false, @@ -838,6 +864,10 @@ int efx_channel_dummy_op_int(struct efx_channel *channel) return 0; } +void efx_channel_dummy_op_void(struct efx_channel *channel) +{ +} + /************************************************************************** * * Port handling @@ -1365,6 +1395,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) { struct efx_channel *channel; + BUG_ON(efx->state == STATE_DISABLED); + if (efx->legacy_irq) efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); @@ -1382,6 +1414,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) { struct efx_channel *channel; + if (efx->state == STATE_DISABLED) + return; + efx_mcdi_mode_poll(efx); efx_nic_disable_interrupts(efx); @@ -1422,10 +1457,16 @@ static void efx_set_channels(struct efx_nic *efx) efx->tx_channel_offset = separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; - /* We need to adjust the TX queue numbers if we have separate + /* We need to mark which channels really have RX and TX + * queues, and adjust the TX queue numbers if we have separate * RX-only and TX-only channels. */ efx_for_each_channel(channel, efx) { + if (channel->channel < efx->n_rx_channels) + channel->rx_queue.core_index = channel->channel; + else + channel->rx_queue.core_index = -1; + efx_for_each_channel_tx_queue(tx_queue, channel) tx_queue->queue -= (efx->tx_channel_offset * EFX_TXQ_TYPES); @@ -1533,22 +1574,21 @@ static int efx_probe_all(struct efx_nic *efx) return rc; } -/* Called after previous invocation(s) of efx_stop_all, restarts the port, - * kernel transmit queues and NAPI processing, and ensures that the port is - * scheduled to be reconfigured. This function is safe to call multiple - * times when the NIC is in any state. +/* If the interface is supposed to be running but is not, start + * the hardware and software data path, regular activity for the port + * (MAC statistics, link polling, etc.) and schedule the port to be + * reconfigured. Interrupts must already be enabled. This function + * is safe to call multiple times, so long as the NIC is not disabled. + * Requires the RTNL lock. */ static void efx_start_all(struct efx_nic *efx) { EFX_ASSERT_RESET_SERIALISED(efx); + BUG_ON(efx->state == STATE_DISABLED); /* Check that it is appropriate to restart the interface. All * of these flags are safe to read under just the rtnl lock */ - if (efx->port_enabled) - return; - if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) - return; - if (!netif_running(efx->net_dev)) + if (efx->port_enabled || !netif_running(efx->net_dev)) return; efx_start_port(efx); @@ -1582,11 +1622,11 @@ static void efx_flush_all(struct efx_nic *efx) cancel_work_sync(&efx->mac_work); } -/* Quiesce hardware and software without bringing the link down. - * Safe to call multiple times, when the nic and interface is in any - * state. The caller is guaranteed to subsequently be in a position - * to modify any hardware and software state they see fit without - * taking locks. */ +/* Quiesce the hardware and software data path, and regular activity + * for the port without bringing the link down. Safe to call multiple + * times with the NIC in almost any state, but interrupts should be + * enabled. Requires the RTNL lock. + */ static void efx_stop_all(struct efx_nic *efx) { EFX_ASSERT_RESET_SERIALISED(efx); @@ -1739,7 +1779,8 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) struct efx_nic *efx = netdev_priv(net_dev); struct mii_ioctl_data *data = if_mii(ifr); - EFX_ASSERT_RESET_SERIALISED(efx); + if (cmd == SIOCSHWTSTAMP) + return efx_ptp_ioctl(efx, ifr, cmd); /* Convert phy_id from older PRTAD/DEVAD format */ if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && @@ -1820,13 +1861,14 @@ static void efx_netpoll(struct net_device *net_dev) static int efx_net_open(struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); - EFX_ASSERT_RESET_SERIALISED(efx); + int rc; netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", raw_smp_processor_id()); - if (efx->state == STATE_DISABLED) - return -EIO; + rc = efx_check_disabled(efx); + if (rc) + return rc; if (efx->phy_mode & PHY_MODE_SPECIAL) return -EBUSY; if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) @@ -1852,10 +1894,8 @@ static int efx_net_stop(struct net_device *net_dev) netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", raw_smp_processor_id()); - if (efx->state != STATE_DISABLED) { - /* Stop the device and flush all the channels */ - efx_stop_all(efx); - } + /* Stop the device and flush all the channels */ + efx_stop_all(efx); return 0; } @@ -1915,9 +1955,11 @@ static void efx_watchdog(struct net_device *net_dev) static int efx_change_mtu(struct net_device *net_dev, int new_mtu) { struct efx_nic *efx = netdev_priv(net_dev); + int rc; - EFX_ASSERT_RESET_SERIALISED(efx); - + rc = efx_check_disabled(efx); + if (rc) + return rc; if (new_mtu > EFX_MAX_MTU) return -EINVAL; @@ -1926,8 +1968,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); mutex_lock(&efx->mac_lock); - /* Reconfigure the MAC before enabling the dma queues so that - * the RX buffers don't overflow */ net_dev->mtu = new_mtu; efx->type->reconfigure_mac(efx); mutex_unlock(&efx->mac_lock); @@ -1942,8 +1982,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) struct sockaddr *addr = data; char *new_addr = addr->sa_data; - EFX_ASSERT_RESET_SERIALISED(efx); - if (!is_valid_ether_addr(new_addr)) { netif_err(efx, drv, efx->net_dev, "invalid ethernet MAC address requested: %pM\n", @@ -2079,11 +2117,27 @@ static int efx_register_netdev(struct efx_nic *efx) rtnl_lock(); + /* Enable resets to be scheduled and check whether any were + * already requested. If so, the NIC is probably hosed so we + * abort. + */ + efx->state = STATE_READY; + smp_mb(); /* ensure we change state before checking reset_pending */ + if (efx->reset_pending) { + netif_err(efx, probe, efx->net_dev, + "aborting probe due to scheduled reset\n"); + rc = -EIO; + goto fail_locked; + } + rc = dev_alloc_name(net_dev, net_dev->name); if (rc < 0) goto fail_locked; efx_update_name(efx); + /* Always start with carrier off; PHY events will detect the link */ + netif_carrier_off(net_dev); + rc = register_netdevice(net_dev); if (rc) goto fail_locked; @@ -2094,9 +2148,6 @@ static int efx_register_netdev(struct efx_nic *efx) efx_init_tx_queue_core_txq(tx_queue); } - /* Always start with carrier off; PHY events will detect the link */ - netif_carrier_off(net_dev); - rtnl_unlock(); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); @@ -2108,14 +2159,14 @@ static int efx_register_netdev(struct efx_nic *efx) return 0; +fail_registered: + rtnl_lock(); + unregister_netdevice(net_dev); fail_locked: + efx->state = STATE_UNINIT; rtnl_unlock(); netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); return rc; - -fail_registered: - unregister_netdev(net_dev); - return rc; } static void efx_unregister_netdev(struct efx_nic *efx) @@ -2138,7 +2189,11 @@ static void efx_unregister_netdev(struct efx_nic *efx) strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); - unregister_netdev(efx->net_dev); + + rtnl_lock(); + unregister_netdevice(efx->net_dev); + efx->state = STATE_UNINIT; + rtnl_unlock(); } /************************************************************************** @@ -2154,9 +2209,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) EFX_ASSERT_RESET_SERIALISED(efx); efx_stop_all(efx); - mutex_lock(&efx->mac_lock); - efx_stop_interrupts(efx, false); + + mutex_lock(&efx->mac_lock); if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) efx->phy_op->fini(efx); efx->type->fini(efx); @@ -2276,16 +2331,15 @@ static void efx_reset_work(struct work_struct *data) if (!pending) return; - /* If we're not RUNNING then don't reset. Leave the reset_pending - * flags set so that efx_pci_probe_main will be retried */ - if (efx->state != STATE_RUNNING) { - netif_info(efx, drv, efx->net_dev, - "scheduled reset quenched. NIC not RUNNING\n"); - return; - } - rtnl_lock(); - (void)efx_reset(efx, fls(pending) - 1); + + /* We checked the state in efx_schedule_reset() but it may + * have changed by now. Now that we have the RTNL lock, + * it cannot change again. + */ + if (efx->state == STATE_READY) + (void)efx_reset(efx, fls(pending) - 1); + rtnl_unlock(); } @@ -2311,6 +2365,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) } set_bit(method, &efx->reset_pending); + smp_mb(); /* ensure we change reset_pending before checking state */ + + /* If we're not READY then just leave the flags set as the cue + * to abort probing or reschedule the reset later. + */ + if (ACCESS_ONCE(efx->state) != STATE_READY) + return; /* efx_process_channel() will no longer read events once a * reset is scheduled. So switch back to poll'd MCDI completions. */ @@ -2376,13 +2437,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = { /* This zeroes out and then fills in the invariants in a struct * efx_nic (including all sub-structures). */ -static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, +static int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev, struct net_device *net_dev) { int i; /* Initialise common structures */ - memset(efx, 0, sizeof(*efx)); spin_lock_init(&efx->biu_lock); #ifdef CONFIG_SFC_MTD INIT_LIST_HEAD(&efx->mtd_list); @@ -2392,7 +2452,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); efx->pci_dev = pci_dev; efx->msg_enable = debug; - efx->state = STATE_INIT; + efx->state = STATE_UNINIT; strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); efx->net_dev = net_dev; @@ -2409,8 +2469,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, goto fail; } - efx->type = type; - EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); /* Higher numbered interrupt modes are less capable! */ @@ -2455,6 +2513,12 @@ static void efx_fini_struct(struct efx_nic *efx) */ static void efx_pci_remove_main(struct efx_nic *efx) { + /* Flush reset_work. It can no longer be scheduled since we + * are not READY. + */ + BUG_ON(efx->state == STATE_READY); + cancel_work_sync(&efx->reset_work); + #ifdef CONFIG_RFS_ACCEL free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); efx->net_dev->rx_cpu_rmap = NULL; @@ -2480,24 +2544,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev) /* Mark the NIC as fini, then stop the interface */ rtnl_lock(); - efx->state = STATE_FINI; dev_close(efx->net_dev); - - /* Allow any queued efx_resets() to complete */ + efx_stop_interrupts(efx, false); rtnl_unlock(); - efx_stop_interrupts(efx, false); efx_sriov_fini(efx); efx_unregister_netdev(efx); efx_mtd_remove(efx); - /* Wait for any scheduled resets to complete. No more will be - * scheduled from this point because efx_stop_all() has been - * called, we are no longer registered with driverlink, and - * the net_device's have been removed. */ - cancel_work_sync(&efx->reset_work); - efx_pci_remove_main(efx); efx_fini_io(efx); @@ -2617,7 +2672,6 @@ static int efx_pci_probe_main(struct efx_nic *efx) static int __devinit efx_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *entry) { - const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; struct net_device *net_dev; struct efx_nic *efx; int rc; @@ -2627,10 +2681,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, EFX_MAX_RX_QUEUES); if (!net_dev) return -ENOMEM; - net_dev->features |= (type->offload_features | NETIF_F_SG | + efx = netdev_priv(net_dev); + efx->type = (const struct efx_nic_type *) entry->driver_data; + net_dev->features |= (efx->type->offload_features | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_RXCSUM); - if (type->offload_features & NETIF_F_V6_CSUM) + if (efx->type->offload_features & NETIF_F_V6_CSUM) net_dev->features |= NETIF_F_TSO6; /* Mask for features that also apply to VLAN devices */ net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | @@ -2638,10 +2694,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, NETIF_F_RXCSUM); /* All offloads can be toggled */ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; - efx = netdev_priv(net_dev); pci_set_drvdata(pci_dev, efx); SET_NETDEV_DEV(net_dev, &pci_dev->dev); - rc = efx_init_struct(efx, type, pci_dev, net_dev); + rc = efx_init_struct(efx, pci_dev, net_dev); if (rc) goto fail1; @@ -2656,28 +2711,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, goto fail2; rc = efx_pci_probe_main(efx); - - /* Serialise against efx_reset(). No more resets will be - * scheduled since efx_stop_all() has been called, and we have - * not and never have been registered. - */ - cancel_work_sync(&efx->reset_work); - if (rc) goto fail3; - /* If there was a scheduled reset during probe, the NIC is - * probably hosed anyway. - */ - if (efx->reset_pending) { - rc = -EIO; - goto fail4; - } - - /* Switch to the running state before we expose the device to the OS, - * so that dev_open()|efx_start_all() will actually start the device */ - efx->state = STATE_RUNNING; - rc = efx_register_netdev(efx); if (rc) goto fail4; @@ -2717,12 +2753,18 @@ static int efx_pm_freeze(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); - efx->state = STATE_FINI; + rtnl_lock(); - netif_device_detach(efx->net_dev); + if (efx->state != STATE_DISABLED) { + efx->state = STATE_UNINIT; - efx_stop_all(efx); - efx_stop_interrupts(efx, false); + netif_device_detach(efx->net_dev); + + efx_stop_all(efx); + efx_stop_interrupts(efx, false); + } + + rtnl_unlock(); return 0; } @@ -2731,21 +2773,25 @@ static int efx_pm_thaw(struct device *dev) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); - efx->state = STATE_INIT; + rtnl_lock(); - efx_start_interrupts(efx, false); + if (efx->state != STATE_DISABLED) { + efx_start_interrupts(efx, false); - mutex_lock(&efx->mac_lock); - efx->phy_op->reconfigure(efx); - mutex_unlock(&efx->mac_lock); + mutex_lock(&efx->mac_lock); + efx->phy_op->reconfigure(efx); + mutex_unlock(&efx->mac_lock); - efx_start_all(efx); + efx_start_all(efx); - netif_device_attach(efx->net_dev); + netif_device_attach(efx->net_dev); - efx->state = STATE_RUNNING; + efx->state = STATE_READY; - efx->type->resume_wol(efx); + efx->type->resume_wol(efx); + } + + rtnl_unlock(); /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ queue_work(reset_workqueue, &efx->reset_work); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 70755c97251a..f11170bc48bf 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -102,6 +102,7 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} /* Channels */ extern int efx_channel_dummy_op_int(struct efx_channel *channel); +extern void efx_channel_dummy_op_void(struct efx_channel *channel); extern void efx_process_channel_now(struct efx_channel *channel); extern int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 8cba2df82b18..9df556c01b8e 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev, if (!efx_tests) goto fail; - - ASSERT_RTNL(); - if (efx->state != STATE_RUNNING) { + if (efx->state != STATE_READY) { rc = -EIO; goto fail1; } @@ -863,8 +861,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, &ip_entry->ip4dst, &ip_entry->pdst); if (rc != 0) { rc = efx_filter_get_ipv4_full( - &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, - &ip_entry->ip4dst, &ip_entry->pdst); + &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, + &ip_entry->ip4src, &ip_entry->psrc); EFX_WARN_ON_PARANOID(rc); ip_mask->ip4src = ~0; ip_mask->psrc = ~0; @@ -1176,6 +1174,7 @@ const struct ethtool_ops efx_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_indir = efx_ethtool_get_rxfh_indir, .set_rxfh_indir = efx_ethtool_set_rxfh_indir, + .get_ts_info = efx_ptp_get_ts_info, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, }; diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c index 8687a6c3db0d..ec1e99d0dcad 100644 --- a/drivers/net/ethernet/sfc/falcon_boards.c +++ b/drivers/net/ethernet/sfc/falcon_boards.c @@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev, new_mode = PHY_MODE_SPECIAL; if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { err = 0; - } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { + } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) { err = -EBUSY; } else { /* Reset the PHY, reconfigure the MAC and enable/disable diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index fc5e7bbcbc9e..aea43cbd0520 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -320,14 +320,20 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, efx_mcdi_complete(mcdi); } -/* Issue the given command by writing the data into the shared memory PDU, - * ring the doorbell and wait for completion. Copyout the result. */ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, size_t *outlen_actual) { + efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); + return efx_mcdi_rpc_finish(efx, cmd, inlen, + outbuf, outlen, outlen_actual); +} + +void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, + size_t inlen) +{ struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - int rc; + BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); efx_mcdi_acquire(mcdi); @@ -338,6 +344,15 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, spin_unlock_bh(&mcdi->iface_lock); efx_mcdi_copyin(efx, cmd, inbuf, inlen); +} + +int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, + u8 *outbuf, size_t outlen, size_t *outlen_actual) +{ + struct efx_mcdi_iface *mcdi = efx_mcdi(efx); + int rc; + + BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); if (mcdi->mode == MCDI_MODE_POLL) rc = efx_mcdi_poll(efx); @@ -563,6 +578,11 @@ void efx_mcdi_process_event(struct efx_channel *channel, case MCDI_EVENT_CODE_FLR: efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF)); break; + case MCDI_EVENT_CODE_PTP_RX: + case MCDI_EVENT_CODE_PTP_FAULT: + case MCDI_EVENT_CODE_PTP_PPS: + efx_ptp_event(efx, event); + break; default: netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", @@ -641,9 +661,8 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, u16 *fw_subtype_list, u32 *capabilities) { uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN]; - size_t outlen; + size_t outlen, offset, i; int port_num = efx_port_num(efx); - int offset; int rc; BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); @@ -663,11 +682,18 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; if (mac_address) memcpy(mac_address, outbuf + offset, ETH_ALEN); - if (fw_subtype_list) - memcpy(fw_subtype_list, - outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, - MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM * - sizeof(fw_subtype_list[0])); + if (fw_subtype_list) { + /* Byte-swap and truncate or zero-pad as necessary */ + offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST; + for (i = 0; + i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; + i++) { + fw_subtype_list[i] = + (offset + 2 <= outlen) ? + le16_to_cpup((__le16 *)(outbuf + offset)) : 0; + offset += 2; + } + } if (capabilities) { if (port_num) *capabilities = MCDI_DWORD(outbuf, @@ -1169,6 +1195,9 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx) __le32 *qid; int rc, count; + BUILD_BUG_ON(EFX_MAX_CHANNELS > + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); + qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL); if (qid == NULL) return -ENOMEM; diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h index 0bdf3e331832..dc25caaa3983 100644 --- a/drivers/net/ethernet/sfc/mcdi.h +++ b/drivers/net/ethernet/sfc/mcdi.h @@ -71,6 +71,12 @@ extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, size_t *outlen_actual); +extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, + const u8 *inbuf, size_t inlen); +extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, + u8 *outbuf, size_t outlen, + size_t *outlen_actual); + extern int efx_mcdi_poll_reboot(struct efx_nic *efx); extern void efx_mcdi_mode_poll(struct efx_nic *efx); extern void efx_mcdi_mode_event(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h index db4beed97669..9d426d0457bd 100644 --- a/drivers/net/ethernet/sfc/mcdi_pcol.h +++ b/drivers/net/ethernet/sfc/mcdi_pcol.h @@ -289,6 +289,7 @@ #define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */ #define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */ #define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */ +#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */ #define MCDI_EVENT_CMDDONE_DATA_OFST 0 #define MCDI_EVENT_CMDDONE_DATA_LBN 0 #define MCDI_EVENT_CMDDONE_DATA_WIDTH 32 @@ -491,12 +492,12 @@ /* MC_CMD_GET_FPGAREG_OUT msgresponse */ #define MC_CMD_GET_FPGAREG_OUT_LENMIN 1 -#define MC_CMD_GET_FPGAREG_OUT_LENMAX 255 +#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252 #define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num)) #define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0 #define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1 #define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1 -#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 255 +#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252 /***********************************/ @@ -507,13 +508,13 @@ /* MC_CMD_PUT_FPGAREG_IN msgrequest */ #define MC_CMD_PUT_FPGAREG_IN_LENMIN 5 -#define MC_CMD_PUT_FPGAREG_IN_LENMAX 255 +#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252 #define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num)) #define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0 #define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4 #define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1 #define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1 -#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 251 +#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248 /* MC_CMD_PUT_FPGAREG_OUT msgresponse */ #define MC_CMD_PUT_FPGAREG_OUT_LEN 0 @@ -560,7 +561,7 @@ /* MC_CMD_PTP_IN_TRANSMIT msgrequest */ #define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13 -#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 255 +#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252 #define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num)) /* MC_CMD_PTP_IN_CMD_OFST 0 */ /* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */ @@ -568,7 +569,7 @@ #define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1 #define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1 -#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 243 +#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */ #define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8 @@ -1145,7 +1146,7 @@ /* MC_CMD_PUTS_IN msgrequest */ #define MC_CMD_PUTS_IN_LENMIN 13 -#define MC_CMD_PUTS_IN_LENMAX 255 +#define MC_CMD_PUTS_IN_LENMAX 252 #define MC_CMD_PUTS_IN_LEN(num) (12+1*(num)) #define MC_CMD_PUTS_IN_DEST_OFST 0 #define MC_CMD_PUTS_IN_UART_LBN 0 @@ -1157,7 +1158,7 @@ #define MC_CMD_PUTS_IN_STRING_OFST 12 #define MC_CMD_PUTS_IN_STRING_LEN 1 #define MC_CMD_PUTS_IN_STRING_MINNUM 1 -#define MC_CMD_PUTS_IN_STRING_MAXNUM 243 +#define MC_CMD_PUTS_IN_STRING_MAXNUM 240 /* MC_CMD_PUTS_OUT msgresponse */ #define MC_CMD_PUTS_OUT_LEN 0 @@ -1947,12 +1948,12 @@ /* MC_CMD_NVRAM_READ_OUT msgresponse */ #define MC_CMD_NVRAM_READ_OUT_LENMIN 1 -#define MC_CMD_NVRAM_READ_OUT_LENMAX 255 +#define MC_CMD_NVRAM_READ_OUT_LENMAX 252 #define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num)) #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0 #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1 #define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1 -#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 255 +#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252 /***********************************/ @@ -1963,7 +1964,7 @@ /* MC_CMD_NVRAM_WRITE_IN msgrequest */ #define MC_CMD_NVRAM_WRITE_IN_LENMIN 13 -#define MC_CMD_NVRAM_WRITE_IN_LENMAX 255 +#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252 #define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num)) #define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0 /* Enum values, see field(s): */ @@ -1973,7 +1974,7 @@ #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1 #define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1 -#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 243 +#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240 /* MC_CMD_NVRAM_WRITE_OUT msgresponse */ #define MC_CMD_NVRAM_WRITE_OUT_LEN 0 @@ -2305,13 +2306,13 @@ /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */ #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5 -#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 255 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num)) #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1 #define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1 -#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 251 +#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248 /***********************************/ diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c index 758148379b0e..08f825b71ac8 100644 --- a/drivers/net/ethernet/sfc/mtd.c +++ b/drivers/net/ethernet/sfc/mtd.c @@ -585,6 +585,7 @@ static const struct siena_nvram_type_info siena_nvram_types[] = { [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" }, [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" }, [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" }, + [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" }, }; static int siena_mtd_probe_partition(struct efx_nic *efx, @@ -598,7 +599,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx, bool protected; int rc; - if (type >= ARRAY_SIZE(siena_nvram_types)) + if (type >= ARRAY_SIZE(siena_nvram_types) || + siena_nvram_types[type].name == NULL) return -ENODEV; info = &siena_nvram_types[type]; @@ -627,7 +629,8 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx, struct efx_mtd *efx_mtd) { struct efx_mtd_partition *part; - uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM]; + uint16_t fw_subtype_list[ + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM]; int rc; rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL); diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index cd9c0a989692..c1a010cda89b 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -37,7 +37,7 @@ * **************************************************************************/ -#define EFX_DRIVER_VERSION "3.1" +#define EFX_DRIVER_VERSION "3.2" #ifdef DEBUG #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) @@ -56,7 +56,8 @@ #define EFX_MAX_CHANNELS 32U #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS #define EFX_EXTRA_CHANNEL_IOV 0 -#define EFX_MAX_EXTRA_CHANNELS 1U +#define EFX_EXTRA_CHANNEL_PTP 1 +#define EFX_MAX_EXTRA_CHANNELS 2U /* Checksum generation is a per-queue option in hardware, so each * queue visible to the networking core is backed by two hardware TX @@ -68,6 +69,9 @@ #define EFX_TXQ_TYPES 4 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) +/* Forward declare Precision Time Protocol (PTP) support structure. */ +struct efx_ptp_data; + struct efx_self_tests; /** @@ -91,29 +95,31 @@ struct efx_special_buffer { }; /** - * struct efx_tx_buffer - An Efx TX buffer - * @skb: The associated socket buffer. - * Set only on the final fragment of a packet; %NULL for all other - * fragments. When this fragment completes, then we can free this - * skb. - * @tsoh: The associated TSO header structure, or %NULL if this - * buffer is not a TSO header. + * struct efx_tx_buffer - buffer state for a TX descriptor + * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be + * freed when descriptor completes + * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be + * freed when descriptor completes. * @dma_addr: DMA address of the fragment. + * @flags: Flags for allocation and DMA mapping type * @len: Length of this fragment. * This field is zero when the queue slot is empty. - * @continuation: True if this fragment is not the end of a packet. - * @unmap_single: True if dma_unmap_single should be used. * @unmap_len: Length of this fragment to unmap */ struct efx_tx_buffer { - const struct sk_buff *skb; - struct efx_tso_header *tsoh; + union { + const struct sk_buff *skb; + void *heap_buf; + }; dma_addr_t dma_addr; + unsigned short flags; unsigned short len; - bool continuation; - bool unmap_single; unsigned short unmap_len; }; +#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ +#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ +#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ +#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ /** * struct efx_tx_queue - An Efx TX queue @@ -133,6 +139,7 @@ struct efx_tx_buffer { * @channel: The associated channel * @core_txq: The networking core TX queue structure * @buffer: The software buffer ring + * @tsoh_page: Array of pages of TSO header buffers * @txd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. * @initialised: Has hardware queue been initialised? @@ -156,9 +163,6 @@ struct efx_tx_buffer { * variable indicates that the queue is full. This is to * avoid cache-line ping-pong between the xmit path and the * completion path. - * @tso_headers_free: A list of TSO headers allocated for this TX queue - * that are not in use, and so available for new TSO sends. The list - * is protected by the TX queue lock. * @tso_bursts: Number of times TSO xmit invoked by kernel * @tso_long_headers: Number of packets with headers too long for standard * blocks @@ -175,6 +179,7 @@ struct efx_tx_queue { struct efx_channel *channel; struct netdev_queue *core_txq; struct efx_tx_buffer *buffer; + struct efx_buffer *tsoh_page; struct efx_special_buffer txd; unsigned int ptr_mask; bool initialised; @@ -187,7 +192,6 @@ struct efx_tx_queue { unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int write_count; unsigned int old_read_count; - struct efx_tso_header *tso_headers_free; unsigned int tso_bursts; unsigned int tso_long_headers; unsigned int tso_packets; @@ -242,6 +246,8 @@ struct efx_rx_page_state { /** * struct efx_rx_queue - An Efx RX queue * @efx: The associated Efx NIC + * @core_index: Index of network core RX queue. Will be >= 0 iff this + * is associated with a real RX queue. * @buffer: The software buffer ring * @rxd: The hardware descriptor ring * @ptr_mask: The size of the ring minus 1. @@ -263,6 +269,7 @@ struct efx_rx_page_state { */ struct efx_rx_queue { struct efx_nic *efx; + int core_index; struct efx_rx_buffer *buffer; struct efx_special_buffer rxd; unsigned int ptr_mask; @@ -390,14 +397,17 @@ struct efx_channel { * @get_name: Generate the channel's name (used for its IRQ handler) * @copy: Copy the channel state prior to reallocation. May be %NULL if * reallocation is not supported. + * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() * @keep_eventq: Flag for whether event queue should be kept initialised * while the device is stopped */ struct efx_channel_type { void (*handle_no_channel)(struct efx_nic *); int (*pre_probe)(struct efx_channel *); + void (*post_remove)(struct efx_channel *); void (*get_name)(struct efx_channel *, char *buf, size_t len); struct efx_channel *(*copy)(const struct efx_channel *); + void (*receive_skb)(struct efx_channel *, struct sk_buff *); bool keep_eventq; }; @@ -430,11 +440,9 @@ enum efx_int_mode { #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) enum nic_state { - STATE_INIT = 0, - STATE_RUNNING = 1, - STATE_FINI = 2, - STATE_DISABLED = 3, - STATE_MAX, + STATE_UNINIT = 0, /* device being probed/removed or is frozen */ + STATE_READY = 1, /* hardware ready and netdev registered */ + STATE_DISABLED = 2, /* device disabled due to hardware errors */ }; /* @@ -654,7 +662,7 @@ struct vfdi_status; * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues * @irq_rx_moderation: IRQ moderation time for RX event queues * @msg_enable: Log message enable flags - * @state: Device state flag. Serialised by the rtnl_lock. + * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. * @reset_pending: Bitmask for pending resets * @tx_queue: TX DMA queues * @rx_queue: RX DMA queues @@ -664,6 +672,8 @@ struct vfdi_status; * should be allocated for this NIC * @rxq_entries: Size of receive queues requested by user. * @txq_entries: Size of transmit queues requested by user. + * @txq_stop_thresh: TX queue fill level at or above which we stop it. + * @txq_wake_thresh: TX queue fill level at or below which we wake it. * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches * @sram_lim_qw: Qword address limit of SRAM @@ -730,6 +740,7 @@ struct vfdi_status; * %local_addr_list. Protected by %local_lock. * @local_lock: Mutex protecting %local_addr_list and %local_page_list. * @peer_work: Work item to broadcast peer addresses to VMs. + * @ptp_data: PTP state data * @monitor_work: Hardware monitor workitem * @biu_lock: BIU (bus interface unit) lock * @last_irq_cpu: Last CPU to handle a possible test interrupt. This @@ -774,6 +785,9 @@ struct efx_nic { unsigned rxq_entries; unsigned txq_entries; + unsigned int txq_stop_thresh; + unsigned int txq_wake_thresh; + unsigned tx_dc_base; unsigned rx_dc_base; unsigned sram_lim_qw; @@ -854,6 +868,10 @@ struct efx_nic { struct work_struct peer_work; #endif +#ifdef CONFIG_SFC_PTP + struct efx_ptp_data *ptp_data; +#endif + /* The following fields may be written more often */ struct delayed_work monitor_work ____cacheline_aligned_in_smp; @@ -1044,7 +1062,7 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) { - return channel->channel < channel->efx->n_rx_channels; + return channel->rx_queue.core_index >= 0; } static inline struct efx_rx_queue * @@ -1116,5 +1134,13 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr) #define EFX_MAX_FRAME_LEN(mtu) \ ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) +static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) +{ + return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; +} +static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) +{ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +} #endif /* EFX_NET_DRIVER_H */ diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 326d799762d6..cdff40b65729 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) /************************************************************************** * * Generic buffer handling - * These buffers are used for interrupt status and MAC stats + * These buffers are used for interrupt status, MAC stats, etc. * **************************************************************************/ @@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) ++tx_queue->write_count; /* Create TX descriptor ring entry */ + BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); EFX_POPULATE_QWORD_4(*txd, - FSF_AZ_TX_KER_CONT, buffer->continuation, + FSF_AZ_TX_KER_CONT, + buffer->flags & EFX_TX_BUF_CONT, FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, FSF_AZ_TX_KER_BUF_REGION, 0, FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index bab5cd9f5740..438cef11f727 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -11,6 +11,7 @@ #ifndef EFX_NIC_H #define EFX_NIC_H +#include <linux/net_tstamp.h> #include <linux/i2c-algo-bit.h> #include "net_driver.h" #include "efx.h" @@ -250,6 +251,41 @@ extern int efx_sriov_get_vf_config(struct net_device *dev, int vf, extern int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf, bool spoofchk); +struct ethtool_ts_info; +#ifdef CONFIG_SFC_PTP +extern void efx_ptp_probe(struct efx_nic *efx); +extern int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd); +extern int efx_ptp_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info); +extern bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +extern int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); +extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev); +#else +static inline void efx_ptp_probe(struct efx_nic *efx) {} +static inline int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd) +{ + return -EOPNOTSUPP; +} +static inline int efx_ptp_get_ts_info(struct net_device *net_dev, + struct ethtool_ts_info *ts_info) +{ + ts_info->so_timestamping = (SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE); + ts_info->phc_index = -1; + + return 0; +} +static inline bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return false; +} +static inline int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return NETDEV_TX_OK; +} +static inline void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) {} +#endif + extern const struct efx_nic_type falcon_a1_nic_type; extern const struct efx_nic_type falcon_b0_nic_type; extern const struct efx_nic_type siena_a0_nic_type; diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c new file mode 100644 index 000000000000..5b3dd028ce85 --- /dev/null +++ b/drivers/net/ethernet/sfc/ptp.c @@ -0,0 +1,1484 @@ +/**************************************************************************** + * Driver for Solarflare Solarstorm network controllers and boards + * Copyright 2011 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +/* Theory of operation: + * + * PTP support is assisted by firmware running on the MC, which provides + * the hardware timestamping capabilities. Both transmitted and received + * PTP event packets are queued onto internal queues for subsequent processing; + * this is because the MC operations are relatively long and would block + * block NAPI/interrupt operation. + * + * Receive event processing: + * The event contains the packet's UUID and sequence number, together + * with the hardware timestamp. The PTP receive packet queue is searched + * for this UUID/sequence number and, if found, put on a pending queue. + * Packets not matching are delivered without timestamps (MCDI events will + * always arrive after the actual packet). + * It is important for the operation of the PTP protocol that the ordering + * of packets between the event and general port is maintained. + * + * Work queue processing: + * If work waiting, synchronise host/hardware time + * + * Transmit: send packet through MC, which returns the transmission time + * that is converted to an appropriate timestamp. + * + * Receive: the packet's reception time is converted to an appropriate + * timestamp. + */ +#include <linux/ip.h> +#include <linux/udp.h> +#include <linux/time.h> +#include <linux/ktime.h> +#include <linux/module.h> +#include <linux/net_tstamp.h> +#include <linux/pps_kernel.h> +#include <linux/ptp_clock_kernel.h> +#include "net_driver.h" +#include "efx.h" +#include "mcdi.h" +#include "mcdi_pcol.h" +#include "io.h" +#include "regs.h" +#include "nic.h" + +/* Maximum number of events expected to make up a PTP event */ +#define MAX_EVENT_FRAGS 3 + +/* Maximum delay, ms, to begin synchronisation */ +#define MAX_SYNCHRONISE_WAIT_MS 2 + +/* How long, at most, to spend synchronising */ +#define SYNCHRONISE_PERIOD_NS 250000 + +/* How often to update the shared memory time */ +#define SYNCHRONISATION_GRANULARITY_NS 200 + +/* Minimum permitted length of a (corrected) synchronisation time */ +#define MIN_SYNCHRONISATION_NS 120 + +/* Maximum permitted length of a (corrected) synchronisation time */ +#define MAX_SYNCHRONISATION_NS 1000 + +/* How many (MC) receive events that can be queued */ +#define MAX_RECEIVE_EVENTS 8 + +/* Length of (modified) moving average. */ +#define AVERAGE_LENGTH 16 + +/* How long an unmatched event or packet can be held */ +#define PKT_EVENT_LIFETIME_MS 10 + +/* Offsets into PTP packet for identification. These offsets are from the + * start of the IP header, not the MAC header. Note that neither PTP V1 nor + * PTP V2 permit the use of IPV4 options. + */ +#define PTP_DPORT_OFFSET 22 + +#define PTP_V1_VERSION_LENGTH 2 +#define PTP_V1_VERSION_OFFSET 28 + +#define PTP_V1_UUID_LENGTH 6 +#define PTP_V1_UUID_OFFSET 50 + +#define PTP_V1_SEQUENCE_LENGTH 2 +#define PTP_V1_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V1 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V1_MIN_LENGTH 64 + +#define PTP_V2_VERSION_LENGTH 1 +#define PTP_V2_VERSION_OFFSET 29 + +/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), + * the MC only captures the last six bytes of the clock identity. These values + * reflect those, not the ones used in the standard. The standard permits + * mapping of V1 UUIDs to V2 UUIDs with these same values. + */ +#define PTP_V2_MC_UUID_LENGTH 6 +#define PTP_V2_MC_UUID_OFFSET 50 + +#define PTP_V2_SEQUENCE_LENGTH 2 +#define PTP_V2_SEQUENCE_OFFSET 58 + +/* The minimum length of a PTP V2 packet for offsets, etc. to be valid: + * includes IP header. + */ +#define PTP_V2_MIN_LENGTH 63 + +#define PTP_MIN_LENGTH 63 + +#define PTP_ADDRESS 0xe0000181 /* 224.0.1.129 */ +#define PTP_EVENT_PORT 319 +#define PTP_GENERAL_PORT 320 + +/* Annoyingly the format of the version numbers are different between + * versions 1 and 2 so it isn't possible to simply look for 1 or 2. + */ +#define PTP_VERSION_V1 1 + +#define PTP_VERSION_V2 2 +#define PTP_VERSION_V2_MASK 0x0f + +enum ptp_packet_state { + PTP_PACKET_STATE_UNMATCHED = 0, + PTP_PACKET_STATE_MATCHED, + PTP_PACKET_STATE_TIMED_OUT, + PTP_PACKET_STATE_MATCH_UNWANTED +}; + +/* NIC synchronised with single word of time only comprising + * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds. + */ +#define MC_NANOSECOND_BITS 30 +#define MC_NANOSECOND_MASK ((1 << MC_NANOSECOND_BITS) - 1) +#define MC_SECOND_MASK ((1 << (32 - MC_NANOSECOND_BITS)) - 1) + +/* Maximum parts-per-billion adjustment that is acceptable */ +#define MAX_PPB 1000000 + +/* Number of bits required to hold the above */ +#define MAX_PPB_BITS 20 + +/* Number of extra bits allowed when calculating fractional ns. + * EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS + MAX_PPB_BITS should + * be less than 63. + */ +#define PPB_EXTRA_BITS 2 + +/* Precalculate scale word to avoid long long division at runtime */ +#define PPB_SCALE_WORD ((1LL << (PPB_EXTRA_BITS + MC_CMD_PTP_IN_ADJUST_BITS +\ + MAX_PPB_BITS)) / 1000000000LL) + +#define PTP_SYNC_ATTEMPTS 4 + +/** + * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area. + * @words: UUID and (partial) sequence number + * @expiry: Time after which the packet should be delivered irrespective of + * event arrival. + * @state: The state of the packet - whether it is ready for processing or + * whether that is of no interest. + */ +struct efx_ptp_match { + u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)]; + unsigned long expiry; + enum ptp_packet_state state; +}; + +/** + * struct efx_ptp_event_rx - A PTP receive event (from MC) + * @seq0: First part of (PTP) UUID + * @seq1: Second part of (PTP) UUID and sequence number + * @hwtimestamp: Event timestamp + */ +struct efx_ptp_event_rx { + struct list_head link; + u32 seq0; + u32 seq1; + ktime_t hwtimestamp; + unsigned long expiry; +}; + +/** + * struct efx_ptp_timeset - Synchronisation between host and MC + * @host_start: Host time immediately before hardware timestamp taken + * @seconds: Hardware timestamp, seconds + * @nanoseconds: Hardware timestamp, nanoseconds + * @host_end: Host time immediately after hardware timestamp taken + * @waitns: Number of nanoseconds between hardware timestamp being read and + * host end time being seen + * @window: Difference of host_end and host_start + * @valid: Whether this timeset is valid + */ +struct efx_ptp_timeset { + u32 host_start; + u32 seconds; + u32 nanoseconds; + u32 host_end; + u32 waitns; + u32 window; /* Derived: end - start, allowing for wrap */ +}; + +/** + * struct efx_ptp_data - Precision Time Protocol (PTP) state + * @channel: The PTP channel + * @rxq: Receive queue (awaiting timestamps) + * @txq: Transmit queue + * @evt_list: List of MC receive events awaiting packets + * @evt_free_list: List of free events + * @evt_lock: Lock for manipulating evt_list and evt_free_list + * @rx_evts: Instantiated events (on evt_list and evt_free_list) + * @workwq: Work queue for processing pending PTP operations + * @work: Work task + * @reset_required: A serious error has occurred and the PTP task needs to be + * reset (disable, enable). + * @rxfilter_event: Receive filter when operating + * @rxfilter_general: Receive filter when operating + * @config: Current timestamp configuration + * @enabled: PTP operation enabled + * @mode: Mode in which PTP operating (PTP version) + * @evt_frags: Partly assembled PTP events + * @evt_frag_idx: Current fragment number + * @evt_code: Last event code + * @start: Address at which MC indicates ready for synchronisation + * @host_time_pps: Host time at last PPS + * @last_sync_ns: Last number of nanoseconds between readings when synchronising + * @base_sync_ns: Number of nanoseconds for last synchronisation. + * @base_sync_valid: Whether base_sync_time is valid. + * @current_adjfreq: Current ppb adjustment. + * @phc_clock: Pointer to registered phc device + * @phc_clock_info: Registration structure for phc device + * @pps_work: pps work task for handling pps events + * @pps_workwq: pps work queue + * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled + * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids + * allocations in main data path). + * @debug_ptp_dir: PTP debugfs directory + * @missed_rx_sync: Number of packets received without syncrhonisation. + * @good_syncs: Number of successful synchronisations. + * @no_time_syncs: Number of synchronisations with no good times. + * @bad_sync_durations: Number of synchronisations with bad durations. + * @bad_syncs: Number of failed synchronisations. + * @last_sync_time: Number of nanoseconds for last synchronisation. + * @sync_timeouts: Number of synchronisation timeouts + * @fast_syncs: Number of synchronisations requiring short delay + * @min_sync_delta: Minimum time between event and synchronisation + * @max_sync_delta: Maximum time between event and synchronisation + * @average_sync_delta: Average time between event and synchronisation. + * Modified moving average. + * @last_sync_delta: Last time between event and synchronisation + * @mc_stats: Context value for MC statistics + * @timeset: Last set of synchronisation statistics. + */ +struct efx_ptp_data { + struct efx_channel *channel; + struct sk_buff_head rxq; + struct sk_buff_head txq; + struct list_head evt_list; + struct list_head evt_free_list; + spinlock_t evt_lock; + struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; + struct workqueue_struct *workwq; + struct work_struct work; + bool reset_required; + u32 rxfilter_event; + u32 rxfilter_general; + bool rxfilter_installed; + struct hwtstamp_config config; + bool enabled; + unsigned int mode; + efx_qword_t evt_frags[MAX_EVENT_FRAGS]; + int evt_frag_idx; + int evt_code; + struct efx_buffer start; + struct pps_event_time host_time_pps; + unsigned last_sync_ns; + unsigned base_sync_ns; + bool base_sync_valid; + s64 current_adjfreq; + struct ptp_clock *phc_clock; + struct ptp_clock_info phc_clock_info; + struct work_struct pps_work; + struct workqueue_struct *pps_workwq; + bool nic_ts_enabled; + u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN( + MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)]; + struct efx_ptp_timeset + timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM]; +}; + +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts); +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec *e_ts); +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, int on); + +/* Enable MCDI PTP support. */ +static int efx_ptp_enable(struct efx_nic *efx) +{ + u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN]; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE, + efx->ptp_data->channel->channel); + MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode); + + return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +/* Disable MCDI PTP support. + * + * Note that this function should never rely on the presence of ptp_data - + * may be called before that exists. + */ +static int efx_ptp_disable(struct efx_nic *efx) +{ + u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN]; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE); + return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(q))) { + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); + } +} + +static void efx_ptp_handle_no_channel(struct efx_nic *efx) +{ + netif_err(efx, drv, efx->net_dev, + "ERROR: PTP requires MSI-X and 1 additional interrupt" + "vector. PTP disabled\n"); +} + +/* Repeatedly send the host time to the MC which will capture the hardware + * time. + */ +static void efx_ptp_send_times(struct efx_nic *efx, + struct pps_event_time *last_time) +{ + struct pps_event_time now; + struct timespec limit; + struct efx_ptp_data *ptp = efx->ptp_data; + struct timespec start; + int *mc_running = ptp->start.addr; + + pps_get_ts(&now); + start = now.ts_real; + limit = now.ts_real; + timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); + + /* Write host time for specified period or until MC is done */ + while ((timespec_compare(&now.ts_real, &limit) < 0) && + ACCESS_ONCE(*mc_running)) { + struct timespec update_time; + unsigned int host_time; + + /* Don't update continuously to avoid saturating the PCIe bus */ + update_time = now.ts_real; + timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); + do { + pps_get_ts(&now); + } while ((timespec_compare(&now.ts_real, &update_time) < 0) && + ACCESS_ONCE(*mc_running)); + + /* Synchronise NIC with single word of time only */ + host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | + now.ts_real.tv_nsec); + /* Update host time in NIC memory */ + _efx_writed(efx, cpu_to_le32(host_time), + FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST); + } + *last_time = now; +} + +/* Read a timeset from the MC's results and partial process. */ +static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset) +{ + unsigned start_ns, end_ns; + + timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART); + timeset->seconds = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_SECONDS); + timeset->nanoseconds = MCDI_DWORD(data, + PTP_OUT_SYNCHRONIZE_NANOSECONDS); + timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND), + timeset->waitns = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS); + + /* Ignore seconds */ + start_ns = timeset->host_start & MC_NANOSECOND_MASK; + end_ns = timeset->host_end & MC_NANOSECOND_MASK; + /* Allow for rollover */ + if (end_ns < start_ns) + end_ns += NSEC_PER_SEC; + /* Determine duration of operation */ + timeset->window = end_ns - start_ns; +} + +/* Process times received from MC. + * + * Extract times from returned results, and establish the minimum value + * seen. The minimum value represents the "best" possible time and events + * too much greater than this are rejected - the machine is, perhaps, too + * busy. A number of readings are taken so that, hopefully, at least one good + * synchronisation will be seen in the results. + */ +static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, + size_t response_length, + const struct pps_event_time *last_time) +{ + unsigned number_readings = (response_length / + MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); + unsigned i; + unsigned min; + unsigned min_set = 0; + unsigned total; + unsigned ngood = 0; + unsigned last_good = 0; + struct efx_ptp_data *ptp = efx->ptp_data; + bool min_valid = false; + u32 last_sec; + u32 start_sec; + struct timespec delta; + + if (number_readings == 0) + return -EAGAIN; + + /* Find minimum value in this set of results, discarding clearly + * erroneous results. + */ + for (i = 0; i < number_readings; i++) { + efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); + synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; + if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) { + if (min_valid) { + if (ptp->timeset[i].window < min_set) + min_set = ptp->timeset[i].window; + } else { + min_valid = true; + min_set = ptp->timeset[i].window; + } + } + } + + if (min_valid) { + if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns)) + min = ptp->base_sync_ns; + else + min = min_set; + } else { + min = SYNCHRONISATION_GRANULARITY_NS; + } + + /* Discard excessively long synchronise durations. The MC times + * when it finishes reading the host time so the corrected window + * time should be fairly constant for a given platform. + */ + total = 0; + for (i = 0; i < number_readings; i++) + if (ptp->timeset[i].window > ptp->timeset[i].waitns) { + unsigned win; + + win = ptp->timeset[i].window - ptp->timeset[i].waitns; + if (win >= MIN_SYNCHRONISATION_NS && + win < MAX_SYNCHRONISATION_NS) { + total += ptp->timeset[i].window; + ngood++; + last_good = i; + } + } + + if (ngood == 0) { + netif_warn(efx, drv, efx->net_dev, + "PTP no suitable synchronisations %dns %dns\n", + ptp->base_sync_ns, min_set); + return -EAGAIN; + } + + /* Average minimum this synchronisation */ + ptp->last_sync_ns = DIV_ROUND_UP(total, ngood); + if (!ptp->base_sync_valid || (ptp->last_sync_ns < ptp->base_sync_ns)) { + ptp->base_sync_valid = true; + ptp->base_sync_ns = ptp->last_sync_ns; + } + + /* Calculate delay from actual PPS to last_time */ + delta.tv_nsec = + ptp->timeset[last_good].nanoseconds + + last_time->ts_real.tv_nsec - + (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK); + + /* It is possible that the seconds rolled over between taking + * the start reading and the last value written by the host. The + * timescales are such that a gap of more than one second is never + * expected. + */ + start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; + last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; + if (start_sec != last_sec) { + if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) { + netif_warn(efx, hw, efx->net_dev, + "PTP bad synchronisation seconds\n"); + return -EAGAIN; + } else { + delta.tv_sec = 1; + } + } else { + delta.tv_sec = 0; + } + + ptp->host_time_pps = *last_time; + pps_sub_ts(&ptp->host_time_pps, delta); + + return 0; +} + +/* Synchronize times between the host and the MC */ +static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX]; + size_t response_length; + int rc; + unsigned long timeout; + struct pps_event_time last_time = {}; + unsigned int loops = 0; + int *start = ptp->start.addr; + + MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE); + MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS, + num_readings); + MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO, + (u32)ptp->start.dma_addr); + MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI, + (u32)((u64)ptp->start.dma_addr >> 32)); + + /* Clear flag that signals MC ready */ + ACCESS_ONCE(*start) = 0; + efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN); + + /* Wait for start from MCDI (or timeout) */ + timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); + while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { + udelay(20); /* Usually start MCDI execution quickly */ + loops++; + } + + if (ACCESS_ONCE(*start)) + efx_ptp_send_times(efx, &last_time); + + /* Collect results */ + rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP, + MC_CMD_PTP_IN_SYNCHRONIZE_LEN, + synch_buf, sizeof(synch_buf), + &response_length); + if (rc == 0) + rc = efx_ptp_process_times(efx, synch_buf, response_length, + &last_time); + + return rc; +} + +/* Transmit a PTP packet, via the MCDI interface, to the wire. */ +static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb) +{ + u8 *txbuf = efx->ptp_data->txbuf; + struct skb_shared_hwtstamps timestamps; + int rc = -EIO; + /* MCDI driver requires word aligned lengths */ + size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4); + u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN]; + + MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT); + MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len); + if (skb_shinfo(skb)->nr_frags != 0) { + rc = skb_linearize(skb); + if (rc != 0) + goto fail; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + rc = skb_checksum_help(skb); + if (rc != 0) + goto fail; + } + skb_copy_from_linear_data(skb, + &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST], + len); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime, + sizeof(txtime), &len); + if (rc != 0) + goto fail; + + memset(×tamps, 0, sizeof(timestamps)); + timestamps.hwtstamp = ktime_set( + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_SECONDS), + MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_NANOSECONDS)); + + skb_tstamp_tx(skb, ×tamps); + + rc = 0; + +fail: + dev_kfree_skb(skb); + + return rc; +} + +static void efx_ptp_drop_time_expired_events(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct list_head *cursor; + struct list_head *next; + + /* Drop time-expired events */ + spin_lock_bh(&ptp->evt_lock); + if (!list_empty(&ptp->evt_list)) { + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; + + evt = list_entry(cursor, struct efx_ptp_event_rx, + link); + if (time_after(jiffies, evt->expiry)) { + list_del(&evt->link); + list_add(&evt->link, &ptp->evt_free_list); + netif_warn(efx, hw, efx->net_dev, + "PTP rx event dropped\n"); + } + } + } + spin_unlock_bh(&ptp->evt_lock); +} + +static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx, + struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + bool evts_waiting; + struct list_head *cursor; + struct list_head *next; + struct efx_ptp_match *match; + enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED; + + spin_lock_bh(&ptp->evt_lock); + evts_waiting = !list_empty(&ptp->evt_list); + spin_unlock_bh(&ptp->evt_lock); + + if (!evts_waiting) + return PTP_PACKET_STATE_UNMATCHED; + + match = (struct efx_ptp_match *)skb->cb; + /* Look for a matching timestamp in the event queue */ + spin_lock_bh(&ptp->evt_lock); + list_for_each_safe(cursor, next, &ptp->evt_list) { + struct efx_ptp_event_rx *evt; + + evt = list_entry(cursor, struct efx_ptp_event_rx, link); + if ((evt->seq0 == match->words[0]) && + (evt->seq1 == match->words[1])) { + struct skb_shared_hwtstamps *timestamps; + + /* Match - add in hardware timestamp */ + timestamps = skb_hwtstamps(skb); + timestamps->hwtstamp = evt->hwtimestamp; + + match->state = PTP_PACKET_STATE_MATCHED; + rc = PTP_PACKET_STATE_MATCHED; + list_del(&evt->link); + list_add(&evt->link, &ptp->evt_free_list); + break; + } + } + spin_unlock_bh(&ptp->evt_lock); + + return rc; +} + +/* Process any queued receive events and corresponding packets + * + * q is returned with all the packets that are ready for delivery. + * true is returned if at least one of those packets requires + * synchronisation. + */ +static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + bool rc = false; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&ptp->rxq))) { + struct efx_ptp_match *match; + + match = (struct efx_ptp_match *)skb->cb; + if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) { + __skb_queue_tail(q, skb); + } else if (efx_ptp_match_rx(efx, skb) == + PTP_PACKET_STATE_MATCHED) { + rc = true; + __skb_queue_tail(q, skb); + } else if (time_after(jiffies, match->expiry)) { + match->state = PTP_PACKET_STATE_TIMED_OUT; + netif_warn(efx, rx_err, efx->net_dev, + "PTP packet - no timestamp seen\n"); + __skb_queue_tail(q, skb); + } else { + /* Replace unprocessed entry and stop */ + skb_queue_head(&ptp->rxq, skb); + break; + } + } + + return rc; +} + +/* Complete processing of a received packet */ +static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb) +{ + local_bh_disable(); + netif_receive_skb(skb); + local_bh_enable(); +} + +static int efx_ptp_start(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_filter_spec rxfilter; + int rc; + + ptp->reset_required = false; + + /* Must filter on both event and general ports to ensure + * that there is no packet re-ordering. + */ + efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index( + efx_channel_get_rx_queue(ptp->channel))); + rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, + htonl(PTP_ADDRESS), + htons(PTP_EVENT_PORT)); + if (rc != 0) + return rc; + + rc = efx_filter_insert_filter(efx, &rxfilter, true); + if (rc < 0) + return rc; + ptp->rxfilter_event = rc; + + efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0, + efx_rx_queue_index( + efx_channel_get_rx_queue(ptp->channel))); + rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP, + htonl(PTP_ADDRESS), + htons(PTP_GENERAL_PORT)); + if (rc != 0) + goto fail; + + rc = efx_filter_insert_filter(efx, &rxfilter, true); + if (rc < 0) + goto fail; + ptp->rxfilter_general = rc; + + rc = efx_ptp_enable(efx); + if (rc != 0) + goto fail2; + + ptp->evt_frag_idx = 0; + ptp->current_adjfreq = 0; + ptp->rxfilter_installed = true; + + return 0; + +fail2: + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_general); +fail: + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + + return rc; +} + +static int efx_ptp_stop(struct efx_nic *efx) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int rc = efx_ptp_disable(efx); + struct list_head *cursor; + struct list_head *next; + + if (ptp->rxfilter_installed) { + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_general); + efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED, + ptp->rxfilter_event); + ptp->rxfilter_installed = false; + } + + /* Make sure RX packets are really delivered */ + efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); + + /* Drop any pending receive events */ + spin_lock_bh(&efx->ptp_data->evt_lock); + list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { + list_del(cursor); + list_add(cursor, &efx->ptp_data->evt_free_list); + } + spin_unlock_bh(&efx->ptp_data->evt_lock); + + return rc; +} + +static void efx_ptp_pps_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp = + container_of(work, struct efx_ptp_data, pps_work); + struct efx_nic *efx = ptp->channel->efx; + struct ptp_clock_event ptp_evt; + + if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS)) + return; + + ptp_evt.type = PTP_CLOCK_PPSUSR; + ptp_evt.pps_times = ptp->host_time_pps; + ptp_clock_event(ptp->phc_clock, &ptp_evt); +} + +/* Process any pending transmissions and timestamp any received packets. + */ +static void efx_ptp_worker(struct work_struct *work) +{ + struct efx_ptp_data *ptp_data = + container_of(work, struct efx_ptp_data, work); + struct efx_nic *efx = ptp_data->channel->efx; + struct sk_buff *skb; + struct sk_buff_head tempq; + + if (ptp_data->reset_required) { + efx_ptp_stop(efx); + efx_ptp_start(efx); + return; + } + + efx_ptp_drop_time_expired_events(efx); + + __skb_queue_head_init(&tempq); + if (efx_ptp_process_events(efx, &tempq) || + !skb_queue_empty(&ptp_data->txq)) { + + while ((skb = skb_dequeue(&ptp_data->txq))) + efx_ptp_xmit_skb(efx, skb); + } + + while ((skb = __skb_dequeue(&tempq))) + efx_ptp_process_rx(efx, skb); +} + +/* Initialise PTP channel and state. + * + * Setting core_index to zero causes the queue to be initialised and doesn't + * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue. + */ +static int efx_ptp_probe_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp; + int rc = 0; + unsigned int pos; + + channel->irq_moderation = 0; + channel->rx_queue.core_index = 0; + + ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL); + efx->ptp_data = ptp; + if (!efx->ptp_data) + return -ENOMEM; + + rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int)); + if (rc != 0) + goto fail1; + + ptp->channel = channel; + skb_queue_head_init(&ptp->rxq); + skb_queue_head_init(&ptp->txq); + ptp->workwq = create_singlethread_workqueue("sfc_ptp"); + if (!ptp->workwq) { + rc = -ENOMEM; + goto fail2; + } + + INIT_WORK(&ptp->work, efx_ptp_worker); + ptp->config.flags = 0; + ptp->config.tx_type = HWTSTAMP_TX_OFF; + ptp->config.rx_filter = HWTSTAMP_FILTER_NONE; + INIT_LIST_HEAD(&ptp->evt_list); + INIT_LIST_HEAD(&ptp->evt_free_list); + spin_lock_init(&ptp->evt_lock); + for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) + list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); + + ptp->phc_clock_info.owner = THIS_MODULE; + snprintf(ptp->phc_clock_info.name, + sizeof(ptp->phc_clock_info.name), + "%pm", efx->net_dev->perm_addr); + ptp->phc_clock_info.max_adj = MAX_PPB; + ptp->phc_clock_info.n_alarm = 0; + ptp->phc_clock_info.n_ext_ts = 0; + ptp->phc_clock_info.n_per_out = 0; + ptp->phc_clock_info.pps = 1; + ptp->phc_clock_info.adjfreq = efx_phc_adjfreq; + ptp->phc_clock_info.adjtime = efx_phc_adjtime; + ptp->phc_clock_info.gettime = efx_phc_gettime; + ptp->phc_clock_info.settime = efx_phc_settime; + ptp->phc_clock_info.enable = efx_phc_enable; + + ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, + &efx->pci_dev->dev); + if (!ptp->phc_clock) + goto fail3; + + INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); + ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); + if (!ptp->pps_workwq) { + rc = -ENOMEM; + goto fail4; + } + ptp->nic_ts_enabled = false; + + return 0; +fail4: + ptp_clock_unregister(efx->ptp_data->phc_clock); + +fail3: + destroy_workqueue(efx->ptp_data->workwq); + +fail2: + efx_nic_free_buffer(efx, &ptp->start); + +fail1: + kfree(efx->ptp_data); + efx->ptp_data = NULL; + + return rc; +} + +static void efx_ptp_remove_channel(struct efx_channel *channel) +{ + struct efx_nic *efx = channel->efx; + + if (!efx->ptp_data) + return; + + (void)efx_ptp_disable(channel->efx); + + cancel_work_sync(&efx->ptp_data->work); + cancel_work_sync(&efx->ptp_data->pps_work); + + skb_queue_purge(&efx->ptp_data->rxq); + skb_queue_purge(&efx->ptp_data->txq); + + ptp_clock_unregister(efx->ptp_data->phc_clock); + + destroy_workqueue(efx->ptp_data->workwq); + destroy_workqueue(efx->ptp_data->pps_workwq); + + efx_nic_free_buffer(efx, &efx->ptp_data->start); + kfree(efx->ptp_data); +} + +static void efx_ptp_get_channel_name(struct efx_channel *channel, + char *buf, size_t len) +{ + snprintf(buf, len, "%s-ptp", channel->efx->name); +} + +/* Determine whether this packet should be processed by the PTP module + * or transmitted conventionally. + */ +bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + return efx->ptp_data && + efx->ptp_data->enabled && + skb->len >= PTP_MIN_LENGTH && + skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM && + likely(skb->protocol == htons(ETH_P_IP)) && + ip_hdr(skb)->protocol == IPPROTO_UDP && + udp_hdr(skb)->dest == htons(PTP_EVENT_PORT); +} + +/* Receive a PTP packet. Packets are queued until the arrival of + * the receive timestamp from the MC - this will probably occur after the + * packet arrival because of the processing in the MC. + */ +static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) +{ + struct efx_nic *efx = channel->efx; + struct efx_ptp_data *ptp = efx->ptp_data; + struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; + u8 *data; + unsigned int version; + + match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + + /* Correct version? */ + if (ptp->mode == MC_CMD_PTP_MODE_V1) { + if (skb->len < PTP_V1_MIN_LENGTH) { + netif_receive_skb(skb); + return; + } + version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); + if (version != PTP_VERSION_V1) { + netif_receive_skb(skb); + return; + } + } else { + if (skb->len < PTP_V2_MIN_LENGTH) { + netif_receive_skb(skb); + return; + } + version = skb->data[PTP_V2_VERSION_OFFSET]; + + BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2); + BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET); + BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH); + BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET); + BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH); + + if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { + netif_receive_skb(skb); + return; + } + } + + /* Does this packet require timestamping? */ + if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { + struct skb_shared_hwtstamps *timestamps; + + match->state = PTP_PACKET_STATE_UNMATCHED; + + /* Clear all timestamps held: filled in later */ + timestamps = skb_hwtstamps(skb); + memset(timestamps, 0, sizeof(*timestamps)); + + /* Extract UUID/Sequence information */ + data = skb->data + PTP_V1_UUID_OFFSET; + match->words[0] = (data[0] | + (data[1] << 8) | + (data[2] << 16) | + (data[3] << 24)); + match->words[1] = (data[4] | + (data[5] << 8) | + (skb->data[PTP_V1_SEQUENCE_OFFSET + + PTP_V1_SEQUENCE_LENGTH - 1] << + 16)); + } else { + match->state = PTP_PACKET_STATE_MATCH_UNWANTED; + } + + skb_queue_tail(&ptp->rxq, skb); + queue_work(ptp->workwq, &ptp->work); +} + +/* Transmit a PTP packet. This has to be transmitted by the MC + * itself, through an MCDI call. MCDI calls aren't permitted + * in the transmit path so defer the actual transmission to a suitable worker. + */ +int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + skb_queue_tail(&ptp->txq, skb); + + if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) && + (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM)) + efx_xmit_hwtstamp_pending(skb); + queue_work(ptp->workwq, &ptp->work); + + return NETDEV_TX_OK; +} + +static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, + unsigned int new_mode) +{ + if ((enable_wanted != efx->ptp_data->enabled) || + (enable_wanted && (efx->ptp_data->mode != new_mode))) { + int rc; + + if (enable_wanted) { + /* Change of mode requires disable */ + if (efx->ptp_data->enabled && + (efx->ptp_data->mode != new_mode)) { + efx->ptp_data->enabled = false; + rc = efx_ptp_stop(efx); + if (rc != 0) + return rc; + } + + /* Set new operating mode and establish + * baseline synchronisation, which must + * succeed. + */ + efx->ptp_data->mode = new_mode; + rc = efx_ptp_start(efx); + if (rc == 0) { + rc = efx_ptp_synchronize(efx, + PTP_SYNC_ATTEMPTS * 2); + if (rc != 0) + efx_ptp_stop(efx); + } + } else { + rc = efx_ptp_stop(efx); + } + + if (rc != 0) + return rc; + + efx->ptp_data->enabled = enable_wanted; + } + + return 0; +} + +static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) +{ + bool enable_wanted = false; + unsigned int new_mode; + int rc; + + if (init->flags) + return -EINVAL; + + if ((init->tx_type != HWTSTAMP_TX_OFF) && + (init->tx_type != HWTSTAMP_TX_ON)) + return -ERANGE; + + new_mode = efx->ptp_data->mode; + /* Determine whether any PTP HW operations are required */ + switch (init->rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; + new_mode = MC_CMD_PTP_MODE_V1; + enable_wanted = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* Although these three are accepted only IPV4 packets will be + * timestamped + */ + init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; + new_mode = MC_CMD_PTP_MODE_V2; + enable_wanted = true; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + /* Non-IP + IPv6 timestamping not supported */ + return -ERANGE; + break; + default: + return -ERANGE; + } + + if (init->tx_type != HWTSTAMP_TX_OFF) + enable_wanted = true; + + rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); + if (rc != 0) + return rc; + + efx->ptp_data->config = *init; + + return 0; +} + +int +efx_ptp_get_ts_info(struct net_device *net_dev, struct ethtool_ts_info *ts_info) +{ + struct efx_nic *efx = netdev_priv(net_dev); + struct efx_ptp_data *ptp = efx->ptp_data; + + if (!ptp) + return -EOPNOTSUPP; + + ts_info->so_timestamping = (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); + ts_info->phc_index = ptp_clock_index(ptp->phc_clock); + ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON; + ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC | + 1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ); + return 0; +} + +int efx_ptp_ioctl(struct efx_nic *efx, struct ifreq *ifr, int cmd) +{ + struct hwtstamp_config config; + int rc; + + /* Not a PTP enabled port */ + if (!efx->ptp_data) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + rc = efx_ptp_ts_init(efx, &config); + if (rc != 0) + return rc; + + return copy_to_user(ifr->ifr_data, &config, sizeof(config)) + ? -EFAULT : 0; +} + +static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + + netif_err(efx, hw, efx->net_dev, + "PTP unexpected event length: got %d expected %d\n", + ptp->evt_frag_idx, expected_frag_len); + ptp->reset_required = true; + queue_work(ptp->workwq, &ptp->work); +} + +/* Process a completed receive event. Put it on the event queue and + * start worker thread. This is required because event and their + * correspoding packets may come in either order. + */ +static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + struct efx_ptp_event_rx *evt = NULL; + + if (ptp->evt_frag_idx != 3) { + ptp_event_failure(efx, 3); + return; + } + + spin_lock_bh(&ptp->evt_lock); + if (!list_empty(&ptp->evt_free_list)) { + evt = list_first_entry(&ptp->evt_free_list, + struct efx_ptp_event_rx, link); + list_del(&evt->link); + + evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA); + evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2], + MCDI_EVENT_SRC) | + (EFX_QWORD_FIELD(ptp->evt_frags[1], + MCDI_EVENT_SRC) << 8) | + (EFX_QWORD_FIELD(ptp->evt_frags[0], + MCDI_EVENT_SRC) << 16)); + evt->hwtimestamp = ktime_set( + EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA), + EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA)); + evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); + list_add_tail(&evt->link, &ptp->evt_list); + + queue_work(ptp->workwq, &ptp->work); + } else { + netif_err(efx, rx_err, efx->net_dev, "No free PTP event"); + } + spin_unlock_bh(&ptp->evt_lock); +} + +static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA); + if (ptp->evt_frag_idx != 1) { + ptp_event_failure(efx, 1); + return; + } + + netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code); +} + +static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp) +{ + if (ptp->nic_ts_enabled) + queue_work(ptp->pps_workwq, &ptp->pps_work); +} + +void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) +{ + struct efx_ptp_data *ptp = efx->ptp_data; + int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); + + if (!ptp->enabled) + return; + + if (ptp->evt_frag_idx == 0) { + ptp->evt_code = code; + } else if (ptp->evt_code != code) { + netif_err(efx, hw, efx->net_dev, + "PTP out of sequence event %d\n", code); + ptp->evt_frag_idx = 0; + } + + ptp->evt_frags[ptp->evt_frag_idx++] = *ev; + if (!MCDI_EVENT_FIELD(*ev, CONT)) { + /* Process resulting event */ + switch (code) { + case MCDI_EVENT_CODE_PTP_RX: + ptp_event_rx(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_FAULT: + ptp_event_fault(efx, ptp); + break; + case MCDI_EVENT_CODE_PTP_PPS: + ptp_event_pps(efx, ptp); + break; + default: + netif_err(efx, hw, efx->net_dev, + "PTP unknown event %d\n", code); + break; + } + ptp->evt_frag_idx = 0; + } else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) { + netif_err(efx, hw, efx->net_dev, + "PTP too many event fragments\n"); + ptp->evt_frag_idx = 0; + } +} + +static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->channel->efx; + u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN]; + s64 adjustment_ns; + int rc; + + if (delta > MAX_PPB) + delta = MAX_PPB; + else if (delta < -MAX_PPB) + delta = -MAX_PPB; + + /* Convert ppb to fixed point ns. */ + adjustment_ns = (((s64)delta * PPB_SCALE_WORD) >> + (PPB_EXTRA_BITS + MAX_PPB_BITS)); + + MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI, + (u32)(adjustment_ns >> 32)); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0); + MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0); + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj), + NULL, 0, NULL); + if (rc != 0) + return rc; + + ptp_data->current_adjfreq = delta; + return 0; +} + +static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->channel->efx; + struct timespec delta_ts = ns_to_timespec(delta); + u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN]; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec); + MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec); + return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + NULL, 0, NULL); +} + +static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + struct efx_nic *efx = ptp_data->channel->efx; + u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN]; + u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN]; + int rc; + + MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME); + + rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), NULL); + if (rc != 0) + return rc; + + ts->tv_sec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_SECONDS); + ts->tv_nsec = MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_NANOSECONDS); + return 0; +} + +static int efx_phc_settime(struct ptp_clock_info *ptp, + const struct timespec *e_ts) +{ + /* Get the current NIC time, efx_phc_gettime. + * Subtract from the desired time to get the offset + * call efx_phc_adjtime with the offset + */ + int rc; + struct timespec time_now; + struct timespec delta; + + rc = efx_phc_gettime(ptp, &time_now); + if (rc != 0) + return rc; + + delta = timespec_sub(*e_ts, time_now); + + efx_phc_adjtime(ptp, timespec_to_ns(&delta)); + if (rc != 0) + return rc; + + return 0; +} + +static int efx_phc_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *request, + int enable) +{ + struct efx_ptp_data *ptp_data = container_of(ptp, + struct efx_ptp_data, + phc_clock_info); + if (request->type != PTP_CLK_REQ_PPS) + return -EOPNOTSUPP; + + ptp_data->nic_ts_enabled = !!enable; + return 0; +} + +static const struct efx_channel_type efx_ptp_channel_type = { + .handle_no_channel = efx_ptp_handle_no_channel, + .pre_probe = efx_ptp_probe_channel, + .post_remove = efx_ptp_remove_channel, + .get_name = efx_ptp_get_channel_name, + /* no copy operation; there is no need to reallocate this channel */ + .receive_skb = efx_ptp_rx, + .keep_eventq = false, +}; + +void efx_ptp_probe(struct efx_nic *efx) +{ + /* Check whether PTP is implemented on this NIC. The DISABLE + * operation will succeed if and only if it is implemented. + */ + if (efx_ptp_disable(efx) == 0) + efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] = + &efx_ptp_channel_type; +} diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 719319b89d7a..9e0ad1b75c33 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -479,7 +479,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel, skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE); - skb_record_rx_queue(skb, channel->channel); + skb_record_rx_queue(skb, channel->rx_queue.core_index); gro_result = napi_gro_frags(napi); } else { @@ -571,8 +571,14 @@ static void efx_rx_deliver(struct efx_channel *channel, /* Set the SKB flags */ skb_checksum_none_assert(skb); + /* Record the rx_queue */ + skb_record_rx_queue(skb, channel->rx_queue.core_index); + /* Pass the packet up */ - netif_receive_skb(skb); + if (channel->type->receive_skb) + channel->type->receive_skb(channel, skb); + else + netif_receive_skb(skb); /* Update allocation strategy method */ channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; @@ -608,13 +614,14 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) * at the ethernet header */ skb->protocol = eth_type_trans(skb, efx->net_dev); - skb_record_rx_queue(skb, channel->channel); + skb_record_rx_queue(skb, channel->rx_queue.core_index); } if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; - if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED))) + if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) && + !channel->type->receive_skb) efx_rx_packet_gro(channel, rx_buf, eh); else efx_rx_deliver(channel, rx_buf); @@ -624,6 +631,11 @@ void efx_rx_strategy(struct efx_channel *channel) { enum efx_rx_alloc_method method = rx_alloc_method; + if (channel->type->receive_skb) { + channel->rx_alloc_push_pages = false; + return; + } + /* Only makes sense to use page based allocation if GRO is enabled */ if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { method = RX_ALLOC_METHOD_SKB; diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c index 6bafd216e55e..84b41bf08a38 100644 --- a/drivers/net/ethernet/sfc/siena.c +++ b/drivers/net/ethernet/sfc/siena.c @@ -335,6 +335,7 @@ static int siena_probe_nic(struct efx_nic *efx) goto fail5; efx_sriov_probe(efx); + efx_ptp_probe(efx); return 0; diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c index 9cb3b84ecae9..a8f48a455849 100644 --- a/drivers/net/ethernet/sfc/siena_sriov.c +++ b/drivers/net/ethernet/sfc/siena_sriov.c @@ -21,6 +21,9 @@ /* Number of longs required to track all the VIs in a VF */ #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX) +/* Maximum number of RX queues supported */ +#define VF_MAX_RX_QUEUES 63 + /** * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour * @VF_TX_FILTER_OFF: Disabled @@ -578,6 +581,7 @@ static int efx_vfdi_init_rxq(struct efx_vf *vf) efx_oword_t reg; if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) || + vf_rxq >= VF_MAX_RX_QUEUES || bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) { if (net_ratelimit()) netif_err(efx, hw, efx->net_dev, @@ -683,6 +687,9 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf) __le32 *rxqs; int rc; + BUILD_BUG_ON(VF_MAX_RX_QUEUES > + MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM); + rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL); if (rxqs == NULL) return VFDI_RC_ENOMEM; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 18713436b443..5e090e54298e 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -22,14 +22,6 @@ #include "nic.h" #include "workarounds.h" -/* - * TX descriptor ring full threshold - * - * The tx_queue descriptor ring fill-level must fall below this value - * before we restart the netif queue - */ -#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) - static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer, unsigned int *pkts_compl, @@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct device *dma_dev = &tx_queue->efx->pci_dev->dev; dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); - if (buffer->unmap_single) + if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); else dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, DMA_TO_DEVICE); buffer->unmap_len = 0; - buffer->unmap_single = false; } - if (buffer->skb) { + if (buffer->flags & EFX_TX_BUF_SKB) { (*pkts_compl)++; (*bytes_compl) += buffer->skb->len; dev_kfree_skb_any((struct sk_buff *) buffer->skb); - buffer->skb = NULL; netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); + } else if (buffer->flags & EFX_TX_BUF_HEAP) { + kfree(buffer->heap_buf); } -} -/** - * struct efx_tso_header - a DMA mapped buffer for packet headers - * @next: Linked list of free ones. - * The list is protected by the TX queue lock. - * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. - * @dma_addr: The DMA address of the header below. - * - * This controls the memory used for a TSO header. Use TSOH_DATA() - * to find the packet header data. Use TSOH_SIZE() to calculate the - * total size required for a given packet header length. TSO headers - * in the free list are exactly %TSOH_STD_SIZE bytes in size. - */ -struct efx_tso_header { - union { - struct efx_tso_header *next; - size_t unmap_len; - }; - dma_addr_t dma_addr; -}; + buffer->len = 0; + buffer->flags = 0; +} static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb); -static void efx_fini_tso(struct efx_tx_queue *tx_queue); -static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, - struct efx_tso_header *tsoh); - -static void efx_tsoh_free(struct efx_tx_queue *tx_queue, - struct efx_tx_buffer *buffer) -{ - if (buffer->tsoh) { - if (likely(!buffer->tsoh->unmap_len)) { - buffer->tsoh->next = tx_queue->tso_headers_free; - tx_queue->tso_headers_free = buffer->tsoh; - } else { - efx_tsoh_heap_free(tx_queue, buffer->tsoh); - } - buffer->tsoh = NULL; - } -} - static inline unsigned efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) @@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) return max_descs; } +/* Get partner of a TX queue, seen as part of the same net core queue */ +static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) +{ + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) + return tx_queue - EFX_TXQ_TYPE_OFFLOAD; + else + return tx_queue + EFX_TXQ_TYPE_OFFLOAD; +} + +static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) +{ + /* We need to consider both queues that the net core sees as one */ + struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1); + struct efx_nic *efx = txq1->efx; + unsigned int fill_level; + + fill_level = max(txq1->insert_count - txq1->old_read_count, + txq2->insert_count - txq2->old_read_count); + if (likely(fill_level < efx->txq_stop_thresh)) + return; + + /* We used the stale old_read_count above, which gives us a + * pessimistic estimate of the fill level (which may even + * validly be >= efx->txq_entries). Now try again using + * read_count (more likely to be a cache miss). + * + * If we read read_count and then conditionally stop the + * queue, it is possible for the completion path to race with + * us and complete all outstanding descriptors in the middle, + * after which there will be no more completions to wake it. + * Therefore we stop the queue first, then read read_count + * (with a memory barrier to ensure the ordering), then + * restart the queue if the fill level turns out to be low + * enough. + */ + netif_tx_stop_queue(txq1->core_txq); + smp_mb(); + txq1->old_read_count = ACCESS_ONCE(txq1->read_count); + txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + + fill_level = max(txq1->insert_count - txq1->old_read_count, + txq2->insert_count - txq2->old_read_count); + EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries); + if (likely(fill_level < efx->txq_stop_thresh)) { + smp_mb(); + if (likely(!efx->loopback_selftest)) + netif_tx_start_queue(txq1->core_txq); + } +} + /* * Add a socket buffer to a TX queue * @@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) * This function is split out from efx_hard_start_xmit to allow the * loopback test to direct packets via specific TX queues. * - * Returns NETDEV_TX_OK or NETDEV_TX_BUSY + * Returns NETDEV_TX_OK. * You must hold netif_tx_lock() to call this function. */ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) @@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) struct device *dma_dev = &efx->pci_dev->dev; struct efx_tx_buffer *buffer; skb_frag_t *fragment; - unsigned int len, unmap_len = 0, fill_level, insert_ptr; + unsigned int len, unmap_len = 0, insert_ptr; dma_addr_t dma_addr, unmap_addr = 0; unsigned int dma_len; - bool unmap_single; - int q_space, i = 0; - netdev_tx_t rc = NETDEV_TX_OK; + unsigned short dma_flags; + int i = 0; EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); @@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) return NETDEV_TX_OK; } - fill_level = tx_queue->insert_count - tx_queue->old_read_count; - q_space = efx->txq_entries - 1 - fill_level; - /* Map for DMA. Use dma_map_single rather than dma_map_page * since this is more efficient on machines with sparse * memory. */ - unmap_single = true; + dma_flags = EFX_TX_BUF_MAP_SINGLE; dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); /* Process all fragments */ @@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Add to TX queue, splitting across DMA boundaries */ do { - if (unlikely(q_space-- <= 0)) { - /* It might be that completions have - * happened since the xmit path last - * checked. Update the xmit path's - * copy of read_count. - */ - netif_tx_stop_queue(tx_queue->core_txq); - /* This memory barrier protects the - * change of queue state from the access - * of read_count. */ - smp_mb(); - tx_queue->old_read_count = - ACCESS_ONCE(tx_queue->read_count); - fill_level = (tx_queue->insert_count - - tx_queue->old_read_count); - q_space = efx->txq_entries - 1 - fill_level; - if (unlikely(q_space-- <= 0)) { - rc = NETDEV_TX_BUSY; - goto unwind; - } - smp_mb(); - if (likely(!efx->loopback_selftest)) - netif_tx_start_queue( - tx_queue->core_txq); - } - insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; - efx_tsoh_free(tx_queue, buffer); - EFX_BUG_ON_PARANOID(buffer->tsoh); - EFX_BUG_ON_PARANOID(buffer->skb); + EFX_BUG_ON_PARANOID(buffer->flags); EFX_BUG_ON_PARANOID(buffer->len); - EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->unmap_len); dma_len = efx_max_tx_len(efx, dma_addr); @@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Fill out per descriptor fields */ buffer->len = dma_len; buffer->dma_addr = dma_addr; + buffer->flags = EFX_TX_BUF_CONT; len -= dma_len; dma_addr += dma_len; ++tx_queue->insert_count; } while (len); /* Transfer ownership of the unmapping to the final buffer */ - buffer->unmap_single = unmap_single; + buffer->flags = EFX_TX_BUF_CONT | dma_flags; buffer->unmap_len = unmap_len; unmap_len = 0; @@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) len = skb_frag_size(fragment); i++; /* Map for DMA */ - unmap_single = false; + dma_flags = 0; dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, DMA_TO_DEVICE); } /* Transfer ownership of the skb to the final buffer */ buffer->skb = skb; - buffer->continuation = false; + buffer->flags = EFX_TX_BUF_SKB | dma_flags; netdev_tx_sent_queue(tx_queue->core_txq, skb->len); /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); + efx_tx_maybe_stop_queue(tx_queue); + return NETDEV_TX_OK; dma_err: @@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) /* Mark the packet as transmitted, and free the SKB ourselves */ dev_kfree_skb_any(skb); - unwind: /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { unsigned int pkts_compl = 0, bytes_compl = 0; @@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); - buffer->len = 0; } /* Free the fragment we were mid-way through pushing */ if (unmap_len) { - if (unmap_single) + if (dma_flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(dma_dev, unmap_addr, unmap_len, DMA_TO_DEVICE); else @@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) DMA_TO_DEVICE); } - return rc; + return NETDEV_TX_OK; } /* Remove packets from the TX queue @@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, } efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); - buffer->continuation = true; - buffer->len = 0; ++tx_queue->read_count; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; @@ -366,6 +339,12 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); + /* PTP "event" packet */ + if (unlikely(efx_xmit_with_hwtstamp(skb)) && + unlikely(efx_ptp_is_ptp_tx(efx, skb))) { + return efx_ptp_tx(efx, skb); + } + index = skb_get_queue_mapping(skb); type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; if (index >= efx->n_tx_channels) { @@ -450,6 +429,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; struct efx_nic *efx = tx_queue->efx; + struct efx_tx_queue *txq2; unsigned int pkts_compl = 0, bytes_compl = 0; EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); @@ -457,15 +437,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); - /* See if we need to restart the netif queue. This barrier - * separates the update of read_count from the test of the - * queue state. */ + /* See if we need to restart the netif queue. This memory + * barrier ensures that we write read_count (inside + * efx_dequeue_buffers()) before reading the queue status. + */ smp_mb(); if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && likely(efx->port_enabled) && likely(netif_device_present(efx->net_dev))) { - fill_level = tx_queue->insert_count - tx_queue->read_count; - if (fill_level < EFX_TXQ_THRESHOLD(efx)) + txq2 = efx_tx_queue_partner(tx_queue); + fill_level = max(tx_queue->insert_count - tx_queue->read_count, + txq2->insert_count - txq2->read_count); + if (fill_level <= efx->txq_wake_thresh) netif_tx_wake_queue(tx_queue->core_txq); } @@ -480,11 +463,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) } } +/* Size of page-based TSO header buffers. Larger blocks must be + * allocated from the heap. + */ +#define TSOH_STD_SIZE 128 +#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE) + +/* At most half the descriptors in the queue at any time will refer to + * a TSO header buffer, since they must always be followed by a + * payload descriptor referring to an skb. + */ +static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue) +{ + return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE); +} + int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; unsigned int entries; - int i, rc; + int rc; /* Create the smallest power-of-two aligned ring */ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); @@ -500,17 +498,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) GFP_KERNEL); if (!tx_queue->buffer) return -ENOMEM; - for (i = 0; i <= tx_queue->ptr_mask; ++i) - tx_queue->buffer[i].continuation = true; + + if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { + tx_queue->tsoh_page = + kcalloc(efx_tsoh_page_count(tx_queue), + sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL); + if (!tx_queue->tsoh_page) { + rc = -ENOMEM; + goto fail1; + } + } /* Allocate hardware ring */ rc = efx_nic_probe_tx(tx_queue); if (rc) - goto fail; + goto fail2; return 0; - fail: +fail2: + kfree(tx_queue->tsoh_page); + tx_queue->tsoh_page = NULL; +fail1: kfree(tx_queue->buffer); tx_queue->buffer = NULL; return rc; @@ -546,8 +555,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) unsigned int pkts_compl = 0, bytes_compl = 0; buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); - buffer->continuation = true; - buffer->len = 0; ++tx_queue->read_count; } @@ -568,13 +575,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) efx_nic_fini_tx(tx_queue); efx_release_tx_buffers(tx_queue); - - /* Free up TSO header cache */ - efx_fini_tso(tx_queue); } void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { + int i; + if (!tx_queue->buffer) return; @@ -582,6 +588,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); + if (tx_queue->tsoh_page) { + for (i = 0; i < efx_tsoh_page_count(tx_queue); i++) + efx_nic_free_buffer(tx_queue->efx, + &tx_queue->tsoh_page[i]); + kfree(tx_queue->tsoh_page); + tx_queue->tsoh_page = NULL; + } + kfree(tx_queue->buffer); tx_queue->buffer = NULL; } @@ -604,22 +618,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) #define TSOH_OFFSET NET_IP_ALIGN #endif -#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) - -/* Total size of struct efx_tso_header, buffer and padding */ -#define TSOH_SIZE(hdr_len) \ - (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) - -/* Size of blocks on free list. Larger blocks must be allocated from - * the heap. - */ -#define TSOH_STD_SIZE 128 - #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) -#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) -#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) -#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) -#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) /** * struct tso_state - TSO state for an SKB @@ -631,10 +630,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) * @in_len: Remaining length in current SKB fragment * @unmap_len: Length of SKB fragment * @unmap_addr: DMA address of SKB fragment - * @unmap_single: DMA single vs page mapping flag + * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0 * @protocol: Network protocol (after any VLAN header) + * @ip_off: Offset of IP header + * @tcp_off: Offset of TCP header * @header_len: Number of bytes of header - * @full_packet_size: Number of bytes to put in each outgoing segment + * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload * * The state used during segmentation. It is put into this data structure * just to make it easy to pass into inline functions. @@ -651,11 +652,13 @@ struct tso_state { unsigned in_len; unsigned unmap_len; dma_addr_t unmap_addr; - bool unmap_single; + unsigned short dma_flags; __be16 protocol; + unsigned int ip_off; + unsigned int tcp_off; unsigned header_len; - int full_packet_size; + unsigned int ip_base_len; }; @@ -687,91 +690,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) return protocol; } - -/* - * Allocate a page worth of efx_tso_header structures, and string them - * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. - */ -static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) +static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, unsigned int len) { - struct device *dma_dev = &tx_queue->efx->pci_dev->dev; - struct efx_tso_header *tsoh; - dma_addr_t dma_addr; - u8 *base_kva, *kva; + u8 *result; - base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC); - if (base_kva == NULL) { - netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, - "Unable to allocate page for TSO headers\n"); - return -ENOMEM; - } - - /* dma_alloc_coherent() allocates pages. */ - EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); - - for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { - tsoh = (struct efx_tso_header *)kva; - tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); - tsoh->next = tx_queue->tso_headers_free; - tx_queue->tso_headers_free = tsoh; - } - - return 0; -} - - -/* Free up a TSO header, and all others in the same page. */ -static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, - struct efx_tso_header *tsoh, - struct device *dma_dev) -{ - struct efx_tso_header **p; - unsigned long base_kva; - dma_addr_t base_dma; - - base_kva = (unsigned long)tsoh & PAGE_MASK; - base_dma = tsoh->dma_addr & PAGE_MASK; - - p = &tx_queue->tso_headers_free; - while (*p != NULL) { - if (((unsigned long)*p & PAGE_MASK) == base_kva) - *p = (*p)->next; - else - p = &(*p)->next; - } + EFX_BUG_ON_PARANOID(buffer->len); + EFX_BUG_ON_PARANOID(buffer->flags); + EFX_BUG_ON_PARANOID(buffer->unmap_len); - dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma); -} + if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { + unsigned index = + (tx_queue->insert_count & tx_queue->ptr_mask) / 2; + struct efx_buffer *page_buf = + &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; + unsigned offset = + TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; + + if (unlikely(!page_buf->addr) && + efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE)) + return NULL; + + result = (u8 *)page_buf->addr + offset; + buffer->dma_addr = page_buf->dma_addr + offset; + buffer->flags = EFX_TX_BUF_CONT; + } else { + tx_queue->tso_long_headers++; -static struct efx_tso_header * -efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) -{ - struct efx_tso_header *tsoh; - - tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); - if (unlikely(!tsoh)) - return NULL; - - tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, - TSOH_BUFFER(tsoh), header_len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, - tsoh->dma_addr))) { - kfree(tsoh); - return NULL; + buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); + if (unlikely(!buffer->heap_buf)) + return NULL; + result = (u8 *)buffer->heap_buf + TSOH_OFFSET; + buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; } - tsoh->unmap_len = header_len; - return tsoh; -} + buffer->len = len; -static void -efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) -{ - dma_unmap_single(&tx_queue->efx->pci_dev->dev, - tsoh->dma_addr, tsoh->unmap_len, - DMA_TO_DEVICE); - kfree(tsoh); + return result; } /** @@ -781,47 +736,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) * @len: Length of fragment * @final_buffer: The final buffer inserted into the queue * - * Push descriptors onto the TX queue. Return 0 on success or 1 if - * @tx_queue full. + * Push descriptors onto the TX queue. */ -static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, - dma_addr_t dma_addr, unsigned len, - struct efx_tx_buffer **final_buffer) +static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue, + dma_addr_t dma_addr, unsigned len, + struct efx_tx_buffer **final_buffer) { struct efx_tx_buffer *buffer; struct efx_nic *efx = tx_queue->efx; - unsigned dma_len, fill_level, insert_ptr; - int q_space; + unsigned dma_len, insert_ptr; EFX_BUG_ON_PARANOID(len <= 0); - fill_level = tx_queue->insert_count - tx_queue->old_read_count; - /* -1 as there is no way to represent all descriptors used */ - q_space = efx->txq_entries - 1 - fill_level; - while (1) { - if (unlikely(q_space-- <= 0)) { - /* It might be that completions have happened - * since the xmit path last checked. Update - * the xmit path's copy of read_count. - */ - netif_tx_stop_queue(tx_queue->core_txq); - /* This memory barrier protects the change of - * queue state from the access of read_count. */ - smp_mb(); - tx_queue->old_read_count = - ACCESS_ONCE(tx_queue->read_count); - fill_level = (tx_queue->insert_count - - tx_queue->old_read_count); - q_space = efx->txq_entries - 1 - fill_level; - if (unlikely(q_space-- <= 0)) { - *final_buffer = NULL; - return 1; - } - smp_mb(); - netif_tx_start_queue(tx_queue->core_txq); - } - insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; ++tx_queue->insert_count; @@ -830,12 +757,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, tx_queue->read_count >= efx->txq_entries); - efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); - EFX_BUG_ON_PARANOID(buffer->skb); - EFX_BUG_ON_PARANOID(!buffer->continuation); - EFX_BUG_ON_PARANOID(buffer->tsoh); + EFX_BUG_ON_PARANOID(buffer->flags); buffer->dma_addr = dma_addr; @@ -845,7 +769,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, if (dma_len >= len) break; - buffer->len = dma_len; /* Don't set the other members */ + buffer->len = dma_len; + buffer->flags = EFX_TX_BUF_CONT; dma_addr += dma_len; len -= dma_len; } @@ -853,7 +778,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, EFX_BUG_ON_PARANOID(!len); buffer->len = len; *final_buffer = buffer; - return 0; } @@ -864,54 +788,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, * a single fragment, and we know it doesn't cross a page boundary. It * also allows us to not worry about end-of-packet etc. */ -static void efx_tso_put_header(struct efx_tx_queue *tx_queue, - struct efx_tso_header *tsoh, unsigned len) +static int efx_tso_put_header(struct efx_tx_queue *tx_queue, + struct efx_tx_buffer *buffer, u8 *header) { - struct efx_tx_buffer *buffer; - - buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; - efx_tsoh_free(tx_queue, buffer); - EFX_BUG_ON_PARANOID(buffer->len); - EFX_BUG_ON_PARANOID(buffer->unmap_len); - EFX_BUG_ON_PARANOID(buffer->skb); - EFX_BUG_ON_PARANOID(!buffer->continuation); - EFX_BUG_ON_PARANOID(buffer->tsoh); - buffer->len = len; - buffer->dma_addr = tsoh->dma_addr; - buffer->tsoh = tsoh; + if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) { + buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev, + header, buffer->len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev, + buffer->dma_addr))) { + kfree(buffer->heap_buf); + buffer->len = 0; + buffer->flags = 0; + return -ENOMEM; + } + buffer->unmap_len = buffer->len; + buffer->flags |= EFX_TX_BUF_MAP_SINGLE; + } ++tx_queue->insert_count; + return 0; } -/* Remove descriptors put into a tx_queue. */ +/* Remove buffers put into a tx_queue. None of the buffers must have + * an skb attached. + */ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; - dma_addr_t unmap_addr; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; - efx_tsoh_free(tx_queue, buffer); - EFX_BUG_ON_PARANOID(buffer->skb); - if (buffer->unmap_len) { - unmap_addr = (buffer->dma_addr + buffer->len - - buffer->unmap_len); - if (buffer->unmap_single) - dma_unmap_single(&tx_queue->efx->pci_dev->dev, - unmap_addr, buffer->unmap_len, - DMA_TO_DEVICE); - else - dma_unmap_page(&tx_queue->efx->pci_dev->dev, - unmap_addr, buffer->unmap_len, - DMA_TO_DEVICE); - buffer->unmap_len = 0; - } - buffer->len = 0; - buffer->continuation = true; + efx_dequeue_buffer(tx_queue, buffer, NULL, NULL); } } @@ -919,17 +831,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) /* Parse the SKB header and initialise state. */ static void tso_start(struct tso_state *st, const struct sk_buff *skb) { - /* All ethernet/IP/TCP headers combined size is TCP header size - * plus offset of TCP header relative to start of packet. - */ - st->header_len = ((tcp_hdr(skb)->doff << 2u) - + PTR_DIFF(tcp_hdr(skb), skb->data)); - st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; - - if (st->protocol == htons(ETH_P_IP)) + st->ip_off = skb_network_header(skb) - skb->data; + st->tcp_off = skb_transport_header(skb) - skb->data; + st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u); + if (st->protocol == htons(ETH_P_IP)) { + st->ip_base_len = st->header_len - st->ip_off; st->ipv4_id = ntohs(ip_hdr(skb)->id); - else + } else { + st->ip_base_len = st->header_len - st->tcp_off; st->ipv4_id = 0; + } st->seqnum = ntohl(tcp_hdr(skb)->seq); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); @@ -938,7 +849,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb) st->out_len = skb->len - st->header_len; st->unmap_len = 0; - st->unmap_single = false; + st->dma_flags = 0; } static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, @@ -947,7 +858,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { - st->unmap_single = false; + st->dma_flags = 0; st->unmap_len = skb_frag_size(frag); st->in_len = skb_frag_size(frag); st->dma_addr = st->unmap_addr; @@ -965,7 +876,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, len, DMA_TO_DEVICE); if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { - st->unmap_single = true; + st->dma_flags = EFX_TX_BUF_MAP_SINGLE; st->unmap_len = len; st->in_len = len; st->dma_addr = st->unmap_addr; @@ -982,20 +893,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, * @st: TSO state * * Form descriptors for the current fragment, until we reach the end - * of fragment or end-of-packet. Return 0 on success, 1 if not enough - * space in @tx_queue. + * of fragment or end-of-packet. */ -static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, - const struct sk_buff *skb, - struct tso_state *st) +static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, + const struct sk_buff *skb, + struct tso_state *st) { struct efx_tx_buffer *buffer; - int n, end_of_packet, rc; + int n; if (st->in_len == 0) - return 0; + return; if (st->packet_space == 0) - return 0; + return; EFX_BUG_ON_PARANOID(st->in_len <= 0); EFX_BUG_ON_PARANOID(st->packet_space <= 0); @@ -1006,25 +916,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, st->out_len -= n; st->in_len -= n; - rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); - if (likely(rc == 0)) { - if (st->out_len == 0) - /* Transfer ownership of the skb */ - buffer->skb = skb; + efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); - end_of_packet = st->out_len == 0 || st->packet_space == 0; - buffer->continuation = !end_of_packet; + if (st->out_len == 0) { + /* Transfer ownership of the skb */ + buffer->skb = skb; + buffer->flags = EFX_TX_BUF_SKB; + } else if (st->packet_space != 0) { + buffer->flags = EFX_TX_BUF_CONT; + } - if (st->in_len == 0) { - /* Transfer ownership of the DMA mapping */ - buffer->unmap_len = st->unmap_len; - buffer->unmap_single = st->unmap_single; - st->unmap_len = 0; - } + if (st->in_len == 0) { + /* Transfer ownership of the DMA mapping */ + buffer->unmap_len = st->unmap_len; + buffer->flags |= st->dma_flags; + st->unmap_len = 0; } st->dma_addr += n; - return rc; } @@ -1035,36 +944,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, * @st: TSO state * * Generate a new header and prepare for the new packet. Return 0 on - * success, or -1 if failed to alloc header. + * success, or -%ENOMEM if failed to alloc header. */ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { - struct efx_tso_header *tsoh; + struct efx_tx_buffer *buffer = + &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; struct tcphdr *tsoh_th; unsigned ip_length; u8 *header; + int rc; - /* Allocate a DMA-mapped header buffer. */ - if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { - if (tx_queue->tso_headers_free == NULL) { - if (efx_tsoh_block_alloc(tx_queue)) - return -1; - } - EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); - tsoh = tx_queue->tso_headers_free; - tx_queue->tso_headers_free = tsoh->next; - tsoh->unmap_len = 0; - } else { - tx_queue->tso_long_headers++; - tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); - if (unlikely(!tsoh)) - return -1; - } + /* Allocate and insert a DMA-mapped header buffer. */ + header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len); + if (!header) + return -ENOMEM; - header = TSOH_BUFFER(tsoh); - tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); + tsoh_th = (struct tcphdr *)(header + st->tcp_off); /* Copy and update the headers. */ memcpy(header, skb->data, st->header_len); @@ -1073,19 +971,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, st->seqnum += skb_shinfo(skb)->gso_size; if (st->out_len > skb_shinfo(skb)->gso_size) { /* This packet will not finish the TSO burst. */ - ip_length = st->full_packet_size - ETH_HDR_LEN(skb); + st->packet_space = skb_shinfo(skb)->gso_size; tsoh_th->fin = 0; tsoh_th->psh = 0; } else { /* This packet will be the last in the TSO burst. */ - ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; + st->packet_space = st->out_len; tsoh_th->fin = tcp_hdr(skb)->fin; tsoh_th->psh = tcp_hdr(skb)->psh; } + ip_length = st->ip_base_len + st->packet_space; if (st->protocol == htons(ETH_P_IP)) { - struct iphdr *tsoh_iph = - (struct iphdr *)(header + SKB_IPV4_OFF(skb)); + struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off); tsoh_iph->tot_len = htons(ip_length); @@ -1094,16 +992,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, st->ipv4_id++; } else { struct ipv6hdr *tsoh_iph = - (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); + (struct ipv6hdr *)(header + st->ip_off); - tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); + tsoh_iph->payload_len = htons(ip_length); } - st->packet_space = skb_shinfo(skb)->gso_size; - ++tx_queue->tso_packets; + rc = efx_tso_put_header(tx_queue, buffer, header); + if (unlikely(rc)) + return rc; - /* Form a descriptor for this header. */ - efx_tso_put_header(tx_queue, tsoh, st->header_len); + ++tx_queue->tso_packets; return 0; } @@ -1118,13 +1016,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, * * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if * @skb was not enqueued. In all cases @skb is consumed. Return - * %NETDEV_TX_OK or %NETDEV_TX_BUSY. + * %NETDEV_TX_OK. */ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; - int frag_i, rc, rc2 = NETDEV_TX_OK; + int frag_i, rc; struct tso_state state; /* Find the packet protocol and sanity-check it */ @@ -1156,11 +1054,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, goto mem_err; while (1) { - rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); - if (unlikely(rc)) { - rc2 = NETDEV_TX_BUSY; - goto unwind; - } + tso_fill_packet_with_fragment(tx_queue, skb, &state); /* Move onto the next fragment? */ if (state.in_len == 0) { @@ -1184,6 +1078,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); + efx_tx_maybe_stop_queue(tx_queue); + tx_queue->tso_bursts++; return NETDEV_TX_OK; @@ -1192,10 +1088,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, "Out of memory for TSO headers, or DMA mapping error\n"); dev_kfree_skb_any(skb); - unwind: /* Free the DMA mapping we were in the process of writing out */ if (state.unmap_len) { - if (state.unmap_single) + if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE) dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, state.unmap_len, DMA_TO_DEVICE); else @@ -1204,25 +1099,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, } efx_enqueue_unwind(tx_queue); - return rc2; -} - - -/* - * Free up all TSO datastructures associated with tx_queue. This - * routine should be called only once the tx_queue is both empty and - * will no longer be used. - */ -static void efx_fini_tso(struct efx_tx_queue *tx_queue) -{ - unsigned i; - - if (tx_queue->buffer) { - for (i = 0; i <= tx_queue->ptr_mask; ++i) - efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); - } - - while (tx_queue->tso_headers_free != NULL) - efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, - &tx_queue->efx->pci_dev->dev); + return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index e2d083228f3a..719be3912aa9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __COMMON_H__ +#define __COMMON_H__ + #include <linux/etherdevice.h> #include <linux/netdevice.h> #include <linux/phy.h> @@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_ring_mode_ops ring_mode_ops; + +#endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 9820ec842cc0..223adf95fd03 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -20,6 +20,10 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ + +#ifndef __DESCS_H__ +#define __DESCS_H__ + struct dma_desc { /* Receive descriptor */ union { @@ -166,3 +170,5 @@ enum tdes_csum_insertion { * is not calculated */ cic_full = 3, /* IP header and pseudoheader */ }; + +#endif /* __DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index dd8d6e19dff6..7ee9499a6e38 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -27,6 +27,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __DESC_COM_H__ +#define __DESC_COM_H__ + #if defined(CONFIG_STMMAC_RING) static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) { @@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) p->des01.tx.buffer1_size = len; } #endif + +#endif /* __DESC_COM_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 7c6d857a9cc7..2ec6aeae349e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __DWMAC100_H__ +#define __DWMAC100_H__ + #include <linux/phy.h> #include "common.h" @@ -119,3 +122,5 @@ enum ttc_control { #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ extern const struct stmmac_dma_ops dwmac100_dma_ops; + +#endif /* __DWMAC100_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index f90fcb5f9573..0e4cacedc1f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -19,6 +19,8 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __DWMAC1000_H__ +#define __DWMAC1000_H__ #include <linux/phy.h> #include "common.h" @@ -229,6 +231,7 @@ enum rtc_control { #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 /* Synopsys Core versions */ -#define DWMAC_CORE_3_40 34 +#define DWMAC_CORE_3_40 0x34 extern const struct stmmac_dma_ops dwmac1000_dma_ops; +#endif /* __DWMAC1000_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e678ce39d014..e49c9a0fd6ff 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __DWMAC_DMA_H__ +#define __DWMAC_DMA_H__ + /* DMA CRS Control and Status Register Mapping */ #define DMA_BUS_MODE 0x00001000 /* Bus Mode */ #define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ @@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr); extern void dwmac_dma_stop_rx(void __iomem *ioaddr); extern int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); + +#endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index a38352024cb8..67995ef25251 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __MMC_H__ +#define __MMC_H__ + /* MMC control register */ /* When set, all counter are reset */ #define MMC_CNTRL_COUNTER_RESET 0x1 @@ -129,3 +132,5 @@ struct stmmac_counters { extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); + +#endif /* __MMC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index c07cfe989f6e..0c74a702d461 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -33,7 +33,7 @@ #define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ #define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ #define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ -#define MMC_DEFAUL_MASK 0xffffffff +#define MMC_DEFAULT_MASK 0xffffffff /* MMC TX counter registers */ @@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) /* To mask all all interrupts.*/ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) { - writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); - writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); } /* This reads the MAC core counters (if actaully supported). diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f2d3665430ad..e872e1da3137 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -20,6 +20,9 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __STMMAC_H__ +#define __STMMAC_H__ + #define STMMAC_RESOURCE_NAME "stmmaceth" #define DRV_MODULE_VERSION "March_2012" @@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void) { } #endif /* CONFIG_STMMAC_PCI */ + +#endif /* __STMMAC_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fd8882f9602a..c136162e6473 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2077,7 +2077,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, goto error_netdev_register; } - priv->stmmac_clk = clk_get(priv->device, NULL); + priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_clk)) { pr_warning("%s: warning: cannot get CSR clock\n", __func__); goto error_clk_get; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index ade108232048..0376a5e6b2bf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -177,7 +177,7 @@ int stmmac_mdio_register(struct net_device *ndev) new_bus->write = &stmmac_mdio_write; new_bus->reset = &stmmac_mdio_reset; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", - new_bus->name, mdio_bus_data->bus_id); + new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; new_bus->irq = irqlist; new_bus->phy_mask = mdio_bus_data->phy_mask; @@ -213,12 +213,10 @@ int stmmac_mdio_register(struct net_device *ndev) * and no PHY number was provided to the MAC, * use the one probed here. */ - if ((priv->plat->bus_id == mdio_bus_data->bus_id) && - (priv->plat->phy_addr == -1)) + if (priv->plat->phy_addr == -1) priv->plat->phy_addr = addr; - act = (priv->plat->bus_id == mdio_bus_data->bus_id) && - (priv->plat->phy_addr == addr); + act = (priv->plat->phy_addr == addr); switch (phydev->irq) { case PHY_POLL: irq_str = "POLL"; @@ -258,6 +256,9 @@ int stmmac_mdio_unregister(struct net_device *ndev) { struct stmmac_priv *priv = netdev_priv(ndev); + if (!priv->mii) + return 0; + mdiobus_unregister(priv->mii); priv->mii->priv = NULL; mdiobus_free(priv->mii); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 13afb8edfadc..1f069b0f6af5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -40,7 +40,6 @@ static void stmmac_default_data(void) plat_dat.has_gmac = 1; plat_dat.force_sf_dma_mode = 1; - mdio_data.bus_id = 1; mdio_data.phy_reset = NULL; mdio_data.phy_mask = 0; plat_dat.mdio_bus_data = &mdio_data; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index cd01ee7ecef1..ed112b55ae7f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -74,10 +74,11 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, * the necessary resources and invokes the main to init * the net device, register the mdio bus etc. */ -static int stmmac_pltfr_probe(struct platform_device *pdev) +static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; + struct device *dev = &pdev->dev; void __iomem *addr = NULL; struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat = NULL; @@ -87,18 +88,10 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) if (!res) return -ENODEV; - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - pr_err("%s: ERROR: memory allocation failed" - "cannot get the I/O addr 0x%x\n", - __func__, (unsigned int)res->start); - return -EBUSY; - } - - addr = ioremap(res->start, resource_size(res)); + addr = devm_request_and_ioremap(dev, res); if (!addr) { pr_err("%s: ERROR: memory mapping failed", __func__); - ret = -ENOMEM; - goto out_release_region; + return -ENOMEM; } if (pdev->dev.of_node) { @@ -107,14 +100,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) GFP_KERNEL); if (!plat_dat) { pr_err("%s: ERROR: no memory", __func__); - ret = -ENOMEM; - goto out_unmap; + return -ENOMEM; } ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); if (ret) { pr_err("%s: main dt probe failed", __func__); - goto out_unmap; + return ret; } } else { plat_dat = pdev->dev.platform_data; @@ -124,13 +116,13 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) if (plat_dat->init) { ret = plat_dat->init(pdev); if (unlikely(ret)) - goto out_unmap; + return ret; } priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed", __func__); - goto out_unmap; + return -ENODEV; } /* Get MAC address if available (DT) */ @@ -142,8 +134,7 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) if (priv->dev->irq == -ENXIO) { pr_err("%s: ERROR: MAC IRQ configuration " "information not found\n", __func__); - ret = -ENXIO; - goto out_unmap; + return -ENXIO; } /* @@ -165,15 +156,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) pr_debug("STMMAC platform driver registration completed"); return 0; - -out_unmap: - iounmap(addr); - platform_set_drvdata(pdev, NULL); - -out_release_region: - release_mem_region(res->start, resource_size(res)); - - return ret; } /** @@ -186,7 +168,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); - struct resource *res; int ret = stmmac_dvr_remove(ndev); if (priv->plat->exit) @@ -194,10 +175,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); - iounmap((void __force __iomem *)priv->ioaddr); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h index 6863590d184b..aea9b14cdfbe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h @@ -21,6 +21,8 @@ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ +#ifndef __STMMAC_TIMER_H__ +#define __STMMAC_TIMER_H__ struct stmmac_timer { void (*timer_start) (unsigned int new_freq); @@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev); extern int tmu2_register_user(void *fnt, void *data); extern void tmu2_unregister_user(void); #endif + +#endif /* __STMMAC_TIMER_H__ */ diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 967fe8cb476e..c9c977bf02ac 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -212,7 +212,6 @@ static void bigmac_clean_rings(struct bigmac *bp) static void bigmac_init_rings(struct bigmac *bp, int from_irq) { struct bmac_init_block *bb = bp->bmac_block; - struct net_device *dev = bp->dev; int i; gfp_t gfp_flags = GFP_KERNEL; diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 1b173a6145d6..b26cbda5efa9 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC config TI_DAVINCI_MDIO tristate "TI DaVinci MDIO Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX ) select PHYLIB ---help--- This driver supports TI's DaVinci MDIO module. @@ -42,7 +42,7 @@ config TI_DAVINCI_MDIO config TI_DAVINCI_CPDMA tristate "TI DaVinci CPDMA Support" - depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) + depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX ) ---help--- This driver supports TI's DaVinci CPDMA dma engine. diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1e5d85b06e71..0cbc0e59252c 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -28,6 +28,9 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/of_net.h> +#include <linux/of_device.h> #include <linux/platform_data/cpsw.h> @@ -709,6 +712,158 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv) slave->sliver = regs + data->sliver_reg_ofs; } +static int cpsw_probe_dt(struct cpsw_platform_data *data, + struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct device_node *slave_node; + int i = 0, ret; + u32 prop; + + if (!node) + return -EINVAL; + + if (of_property_read_u32(node, "slaves", &prop)) { + pr_err("Missing slaves property in the DT.\n"); + return -EINVAL; + } + data->slaves = prop; + + data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) * + data->slaves, GFP_KERNEL); + if (!data->slave_data) { + pr_err("Could not allocate slave memory.\n"); + return -EINVAL; + } + + data->no_bd_ram = of_property_read_bool(node, "no_bd_ram"); + + if (of_property_read_u32(node, "cpdma_channels", &prop)) { + pr_err("Missing cpdma_channels property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->channels = prop; + + if (of_property_read_u32(node, "host_port_no", &prop)) { + pr_err("Missing host_port_no property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->host_port_num = prop; + + if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) { + pr_err("Missing cpdma_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->cpdma_reg_ofs = prop; + + if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) { + pr_err("Missing cpdma_sram_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->cpdma_sram_ofs = prop; + + if (of_property_read_u32(node, "ale_reg_ofs", &prop)) { + pr_err("Missing ale_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->ale_reg_ofs = prop; + + if (of_property_read_u32(node, "ale_entries", &prop)) { + pr_err("Missing ale_entries property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->ale_entries = prop; + + if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) { + pr_err("Missing host_port_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->host_port_reg_ofs = prop; + + if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) { + pr_err("Missing hw_stats_reg_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->hw_stats_reg_ofs = prop; + + if (of_property_read_u32(node, "bd_ram_ofs", &prop)) { + pr_err("Missing bd_ram_ofs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->bd_ram_ofs = prop; + + if (of_property_read_u32(node, "bd_ram_size", &prop)) { + pr_err("Missing bd_ram_size property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->bd_ram_size = prop; + + if (of_property_read_u32(node, "rx_descs", &prop)) { + pr_err("Missing rx_descs property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->rx_descs = prop; + + if (of_property_read_u32(node, "mac_control", &prop)) { + pr_err("Missing mac_control property in the DT.\n"); + ret = -EINVAL; + goto error_ret; + } + data->mac_control = prop; + + for_each_child_of_node(node, slave_node) { + struct cpsw_slave_data *slave_data = data->slave_data + i; + const char *phy_id = NULL; + const void *mac_addr = NULL; + + if (of_property_read_string(slave_node, "phy_id", &phy_id)) { + pr_err("Missing slave[%d] phy_id property\n", i); + ret = -EINVAL; + goto error_ret; + } + slave_data->phy_id = phy_id; + + if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) { + pr_err("Missing slave[%d] slave_reg_ofs property\n", i); + ret = -EINVAL; + goto error_ret; + } + slave_data->slave_reg_ofs = prop; + + if (of_property_read_u32(slave_node, "sliver_reg_ofs", + &prop)) { + pr_err("Missing slave[%d] sliver_reg_ofs property\n", + i); + ret = -EINVAL; + goto error_ret; + } + slave_data->sliver_reg_ofs = prop; + + mac_addr = of_get_mac_address(slave_node); + if (mac_addr) + memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); + + i++; + } + + return 0; + +error_ret: + kfree(data->slave_data); + return ret; +} + static int __devinit cpsw_probe(struct platform_device *pdev) { struct cpsw_platform_data *data = pdev->dev.platform_data; @@ -720,11 +875,6 @@ static int __devinit cpsw_probe(struct platform_device *pdev) struct resource *res; int ret = 0, i, k = 0; - if (!data) { - pr_err("platform data missing\n"); - return -ENODEV; - } - ndev = alloc_etherdev(sizeof(struct cpsw_priv)); if (!ndev) { pr_err("error allocating net_device\n"); @@ -734,13 +884,19 @@ static int __devinit cpsw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); priv = netdev_priv(ndev); spin_lock_init(&priv->lock); - priv->data = *data; priv->pdev = pdev; priv->ndev = ndev; priv->dev = &ndev->dev; priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); priv->rx_packet_max = max(rx_packet_max, 128); + if (cpsw_probe_dt(&priv->data, pdev)) { + pr_err("cpsw: platform data missing\n"); + ret = -ENODEV; + goto clean_ndev_ret; + } + data = &priv->data; + if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); pr_info("Detected MACID = %pM", priv->mac_addr); @@ -996,11 +1152,17 @@ static const struct dev_pm_ops cpsw_pm_ops = { .resume = cpsw_resume, }; +static const struct of_device_id cpsw_of_mtable[] = { + { .compatible = "ti,cpsw", }, + { /* sentinel */ }, +}; + static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw", .owner = THIS_MODULE, .pm = &cpsw_pm_ops, + .of_match_table = of_match_ptr(cpsw_of_mtable), }, .probe = cpsw_probe, .remove = __devexit_p(cpsw_remove), diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 3b5c4571b55e..d15c888e9df8 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -538,11 +538,12 @@ EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_destroy(struct cpdma_chan *chan) { - struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_ctlr *ctlr; unsigned long flags; if (!chan) return -EINVAL; + ctlr = chan->ctlr; spin_lock_irqsave(&ctlr->lock, flags); if (chan->state != CPDMA_STATE_IDLE) diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index cd7ee204e94a..51a96dbee9ac 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -36,6 +36,8 @@ #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/davinci_emac.h> +#include <linux/of.h> +#include <linux/of_device.h> /* * This timeout definition is a worst-case ultra defensive measure against @@ -289,6 +291,25 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id, return 0; } +static int davinci_mdio_probe_dt(struct mdio_platform_data *data, + struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + u32 prop; + + if (!node) + return -EINVAL; + + if (of_property_read_u32(node, "bus_freq", &prop)) { + pr_err("Missing bus_freq property in the DT.\n"); + return -EINVAL; + } + data->bus_freq = prop; + + return 0; +} + + static int __devinit davinci_mdio_probe(struct platform_device *pdev) { struct mdio_platform_data *pdata = pdev->dev.platform_data; @@ -304,8 +325,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev) return -ENOMEM; } - data->pdata = pdata ? (*pdata) : default_pdata; - data->bus = mdiobus_alloc(); if (!data->bus) { dev_err(dev, "failed to alloc mii bus\n"); @@ -313,14 +332,22 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev) goto bail_out; } + if (dev->of_node) { + if (davinci_mdio_probe_dt(&data->pdata, pdev)) + data->pdata = default_pdata; + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + } else { + data->pdata = pdata ? (*pdata) : default_pdata; + snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + } + data->bus->name = dev_name(dev); data->bus->read = davinci_mdio_read, data->bus->write = davinci_mdio_write, data->bus->reset = davinci_mdio_reset, data->bus->parent = dev; data->bus->priv = data; - snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x", - pdev->name, pdev->id); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); @@ -394,8 +421,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct davinci_mdio_data *data = dev_get_drvdata(dev); - if (data->bus) + if (data->bus) { + mdiobus_unregister(data->bus); mdiobus_free(data->bus); + } if (data->clk) clk_put(data->clk); @@ -454,11 +483,17 @@ static const struct dev_pm_ops davinci_mdio_pm_ops = { .resume = davinci_mdio_resume, }; +static const struct of_device_id davinci_mdio_of_mtable[] = { + { .compatible = "ti,davinci_mdio", }, + { /* sentinel */ }, +}; + static struct platform_driver davinci_mdio_driver = { .driver = { .name = "davinci_mdio", .owner = THIS_MODULE, .pm = &davinci_mdio_pm_ops, + .of_match_table = of_match_ptr(davinci_mdio_of_mtable), }, .probe = davinci_mdio_probe, .remove = __devexit_p(davinci_mdio_remove), diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 277c93e9ff4d..8fa947a2d929 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1359,7 +1359,6 @@ static int tsi108_open(struct net_device *dev) } data->rxskbs[i] = skb; - data->rxskbs[i] = skb; data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; } diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index a5826a3111a6..2c08bf6e7bf3 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -637,8 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev) if (data && is_valid_ether_addr(data->mac_addr)) { memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); } else { - eth_random_addr(ndev->dev_addr); - ndev->addr_assign_type |= NET_ADDR_RANDOM; + eth_hw_addr_random(ndev); } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c index bdd8891c215a..88943d90c765 100644 --- a/drivers/net/ethernet/wiznet/w5300.c +++ b/drivers/net/ethernet/wiznet/w5300.c @@ -557,8 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev) if (data && is_valid_ether_addr(data->mac_addr)) { memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN); } else { - eth_random_addr(ndev->dev_addr); - ndev->addr_assign_type |= NET_ADDR_RANDOM; + eth_hw_addr_random(ndev); } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 482648fcf0b6..98934bdf6acf 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev) } int ixp46x_phc_index = -1; +EXPORT_SYMBOL_GPL(ixp46x_phc_index); static int ixp4xx_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c index 24d8566cfd8b..441b4dc79450 100644 --- a/drivers/net/fddi/skfp/pmf.c +++ b/drivers/net/fddi/skfp/pmf.c @@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, sm_pm_get_ls(smc,port_to_mib(smc,port))) ; break ; case SMT_P_REASON : - * (u_long *) to = 0 ; + *(u32 *)to = 0 ; sp_len = 4 ; goto sp_done ; case SMT_P1033 : /* time stamp */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 6cee2917eb02..4a1a5f58fa73 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device) unsigned long flags; net_device = hv_get_drvdata(device); - spin_lock_irqsave(&device->channel->inbound_lock, flags); - net_device->destroy = true; - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); - - /* Wait for all send completions */ - wait_event(net_device->wait_drain, - atomic_read(&net_device->num_outstanding_sends) == 0); netvsc_disconnect_vsp(net_device); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 8c5a1c43c81d..e91111a656f7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -400,7 +400,7 @@ static void netvsc_send_garp(struct work_struct *w) ndev_ctx = container_of(w, struct net_device_context, dwork.work); net_device = hv_get_drvdata(ndev_ctx->device_ctx); net = net_device->ndev; - netif_notify_peers(net); + netdev_notify_peers(net); } diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index e5d6146937fa..06f8601f32fc 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -46,8 +46,14 @@ struct rndis_request { /* Simplify allocation by having a netvsc packet inline */ struct hv_netvsc_packet pkt; struct hv_page_buffer buf; - /* FIXME: We assumed a fixed size request here. */ + struct rndis_message request_msg; + /* + * The buffer for the extended info after the RNDIS message. It's + * referenced based on the data offset in the RNDIS message. Its size + * is enough for current needs, and should be sufficient for the near + * future. + */ u8 ext[100]; }; @@ -718,6 +724,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; + struct netvsc_device *nvdev = dev->net_dev; + struct hv_device *hdev = nvdev->dev; + ulong flags; /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -735,6 +744,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev) dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: + spin_lock_irqsave(&hdev->channel->inbound_lock, flags); + nvdev->destroy = true; + spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); + + /* Wait for all send completions */ + wait_event(nvdev->wait_drain, + atomic_read(&nvdev->num_outstanding_sends) == 0); + if (request) put_rndis_request(dev, request); return; diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig new file mode 100644 index 000000000000..08ae4655423a --- /dev/null +++ b/drivers/net/ieee802154/Kconfig @@ -0,0 +1,47 @@ +menuconfig IEEE802154_DRIVERS + tristate "IEEE 802.15.4 drivers" + depends on NETDEVICES && IEEE802154 + default y + ---help--- + Say Y here to get to see options for IEEE 802.15.4 Low-Rate + Wireless Personal Area Network device drivers. This option alone + does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +config IEEE802154_FAKEHARD + tristate "Fake LR-WPAN driver with several interconnected devices" + depends on IEEE802154_DRIVERS + ---help--- + Say Y here to enable the fake driver that serves as an example + of HardMAC device driver. + + This driver can also be built as a module. To do so say M here. + The module will be called 'fakehard'. + +config IEEE802154_FAKELB + depends on IEEE802154_DRIVERS && MAC802154 + tristate "IEEE 802.15.4 loopback driver" + ---help--- + Say Y here to enable the fake driver that can emulate a net + of several interconnected radio devices. + + This driver can also be built as a module. To do so say M here. + The module will be called 'fakelb'. + +config IEEE802154_AT86RF230 + depends on IEEE802154_DRIVERS && MAC802154 + tristate "AT86RF230/231 transceiver driver" + depends on SPI + +config IEEE802154_MRF24J40 + tristate "Microchip MRF24J40 transceiver driver" + depends on IEEE802154_DRIVERS && MAC802154 + depends on SPI + ---help--- + Say Y here to enable the MRF24J20 SPI 802.15.4 wireless + controller. + + This driver can also be built as a module. To do so, say M here. + the module will be called 'mrf24j40'. diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile new file mode 100644 index 000000000000..abb0c08decb0 --- /dev/null +++ b/drivers/net/ieee802154/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o +obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o +obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o +obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c new file mode 100644 index 000000000000..ba753d87a32f --- /dev/null +++ b/drivers/net/ieee802154/at86rf230.c @@ -0,0 +1,958 @@ +/* + * AT86RF230/RF231 driver + * + * Copyright (C) 2009-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> +#include <linux/spinlock.h> +#include <linux/spi/spi.h> +#include <linux/spi/at86rf230.h> +#include <linux/skbuff.h> + +#include <net/mac802154.h> +#include <net/wpan-phy.h> + +struct at86rf230_local { + struct spi_device *spi; + int rstn, slp_tr, dig2; + + u8 part; + u8 vers; + + u8 buf[2]; + struct mutex bmux; + + struct work_struct irqwork; + struct completion tx_complete; + + struct ieee802154_dev *dev; + + spinlock_t lock; + bool irq_disabled; + bool is_tx; +}; + +#define RG_TRX_STATUS (0x01) +#define SR_TRX_STATUS 0x01, 0x1f, 0 +#define SR_RESERVED_01_3 0x01, 0x20, 5 +#define SR_CCA_STATUS 0x01, 0x40, 6 +#define SR_CCA_DONE 0x01, 0x80, 7 +#define RG_TRX_STATE (0x02) +#define SR_TRX_CMD 0x02, 0x1f, 0 +#define SR_TRAC_STATUS 0x02, 0xe0, 5 +#define RG_TRX_CTRL_0 (0x03) +#define SR_CLKM_CTRL 0x03, 0x07, 0 +#define SR_CLKM_SHA_SEL 0x03, 0x08, 3 +#define SR_PAD_IO_CLKM 0x03, 0x30, 4 +#define SR_PAD_IO 0x03, 0xc0, 6 +#define RG_TRX_CTRL_1 (0x04) +#define SR_IRQ_POLARITY 0x04, 0x01, 0 +#define SR_IRQ_MASK_MODE 0x04, 0x02, 1 +#define SR_SPI_CMD_MODE 0x04, 0x0c, 2 +#define SR_RX_BL_CTRL 0x04, 0x10, 4 +#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5 +#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6 +#define SR_PA_EXT_EN 0x04, 0x80, 7 +#define RG_PHY_TX_PWR (0x05) +#define SR_TX_PWR 0x05, 0x0f, 0 +#define SR_PA_LT 0x05, 0x30, 4 +#define SR_PA_BUF_LT 0x05, 0xc0, 6 +#define RG_PHY_RSSI (0x06) +#define SR_RSSI 0x06, 0x1f, 0 +#define SR_RND_VALUE 0x06, 0x60, 5 +#define SR_RX_CRC_VALID 0x06, 0x80, 7 +#define RG_PHY_ED_LEVEL (0x07) +#define SR_ED_LEVEL 0x07, 0xff, 0 +#define RG_PHY_CC_CCA (0x08) +#define SR_CHANNEL 0x08, 0x1f, 0 +#define SR_CCA_MODE 0x08, 0x60, 5 +#define SR_CCA_REQUEST 0x08, 0x80, 7 +#define RG_CCA_THRES (0x09) +#define SR_CCA_ED_THRES 0x09, 0x0f, 0 +#define SR_RESERVED_09_1 0x09, 0xf0, 4 +#define RG_RX_CTRL (0x0a) +#define SR_PDT_THRES 0x0a, 0x0f, 0 +#define SR_RESERVED_0a_1 0x0a, 0xf0, 4 +#define RG_SFD_VALUE (0x0b) +#define SR_SFD_VALUE 0x0b, 0xff, 0 +#define RG_TRX_CTRL_2 (0x0c) +#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0 +#define SR_RESERVED_0c_2 0x0c, 0x7c, 2 +#define SR_RX_SAFE_MODE 0x0c, 0x80, 7 +#define RG_ANT_DIV (0x0d) +#define SR_ANT_CTRL 0x0d, 0x03, 0 +#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2 +#define SR_ANT_DIV_EN 0x0d, 0x08, 3 +#define SR_RESERVED_0d_2 0x0d, 0x70, 4 +#define SR_ANT_SEL 0x0d, 0x80, 7 +#define RG_IRQ_MASK (0x0e) +#define SR_IRQ_MASK 0x0e, 0xff, 0 +#define RG_IRQ_STATUS (0x0f) +#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0 +#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1 +#define SR_IRQ_2_RX_START 0x0f, 0x04, 2 +#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3 +#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4 +#define SR_IRQ_5_AMI 0x0f, 0x20, 5 +#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6 +#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7 +#define RG_VREG_CTRL (0x10) +#define SR_RESERVED_10_6 0x10, 0x03, 0 +#define SR_DVDD_OK 0x10, 0x04, 2 +#define SR_DVREG_EXT 0x10, 0x08, 3 +#define SR_RESERVED_10_3 0x10, 0x30, 4 +#define SR_AVDD_OK 0x10, 0x40, 6 +#define SR_AVREG_EXT 0x10, 0x80, 7 +#define RG_BATMON (0x11) +#define SR_BATMON_VTH 0x11, 0x0f, 0 +#define SR_BATMON_HR 0x11, 0x10, 4 +#define SR_BATMON_OK 0x11, 0x20, 5 +#define SR_RESERVED_11_1 0x11, 0xc0, 6 +#define RG_XOSC_CTRL (0x12) +#define SR_XTAL_TRIM 0x12, 0x0f, 0 +#define SR_XTAL_MODE 0x12, 0xf0, 4 +#define RG_RX_SYN (0x15) +#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0 +#define SR_RESERVED_15_2 0x15, 0x70, 4 +#define SR_RX_PDT_DIS 0x15, 0x80, 7 +#define RG_XAH_CTRL_1 (0x17) +#define SR_RESERVED_17_8 0x17, 0x01, 0 +#define SR_AACK_PROM_MODE 0x17, 0x02, 1 +#define SR_AACK_ACK_TIME 0x17, 0x04, 2 +#define SR_RESERVED_17_5 0x17, 0x08, 3 +#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4 +#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5 +#define SR_RESERVED_17_2 0x17, 0x40, 6 +#define SR_RESERVED_17_1 0x17, 0x80, 7 +#define RG_FTN_CTRL (0x18) +#define SR_RESERVED_18_2 0x18, 0x7f, 0 +#define SR_FTN_START 0x18, 0x80, 7 +#define RG_PLL_CF (0x1a) +#define SR_RESERVED_1a_2 0x1a, 0x7f, 0 +#define SR_PLL_CF_START 0x1a, 0x80, 7 +#define RG_PLL_DCU (0x1b) +#define SR_RESERVED_1b_3 0x1b, 0x3f, 0 +#define SR_RESERVED_1b_2 0x1b, 0x40, 6 +#define SR_PLL_DCU_START 0x1b, 0x80, 7 +#define RG_PART_NUM (0x1c) +#define SR_PART_NUM 0x1c, 0xff, 0 +#define RG_VERSION_NUM (0x1d) +#define SR_VERSION_NUM 0x1d, 0xff, 0 +#define RG_MAN_ID_0 (0x1e) +#define SR_MAN_ID_0 0x1e, 0xff, 0 +#define RG_MAN_ID_1 (0x1f) +#define SR_MAN_ID_1 0x1f, 0xff, 0 +#define RG_SHORT_ADDR_0 (0x20) +#define SR_SHORT_ADDR_0 0x20, 0xff, 0 +#define RG_SHORT_ADDR_1 (0x21) +#define SR_SHORT_ADDR_1 0x21, 0xff, 0 +#define RG_PAN_ID_0 (0x22) +#define SR_PAN_ID_0 0x22, 0xff, 0 +#define RG_PAN_ID_1 (0x23) +#define SR_PAN_ID_1 0x23, 0xff, 0 +#define RG_IEEE_ADDR_0 (0x24) +#define SR_IEEE_ADDR_0 0x24, 0xff, 0 +#define RG_IEEE_ADDR_1 (0x25) +#define SR_IEEE_ADDR_1 0x25, 0xff, 0 +#define RG_IEEE_ADDR_2 (0x26) +#define SR_IEEE_ADDR_2 0x26, 0xff, 0 +#define RG_IEEE_ADDR_3 (0x27) +#define SR_IEEE_ADDR_3 0x27, 0xff, 0 +#define RG_IEEE_ADDR_4 (0x28) +#define SR_IEEE_ADDR_4 0x28, 0xff, 0 +#define RG_IEEE_ADDR_5 (0x29) +#define SR_IEEE_ADDR_5 0x29, 0xff, 0 +#define RG_IEEE_ADDR_6 (0x2a) +#define SR_IEEE_ADDR_6 0x2a, 0xff, 0 +#define RG_IEEE_ADDR_7 (0x2b) +#define SR_IEEE_ADDR_7 0x2b, 0xff, 0 +#define RG_XAH_CTRL_0 (0x2c) +#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0 +#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1 +#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4 +#define RG_CSMA_SEED_0 (0x2d) +#define SR_CSMA_SEED_0 0x2d, 0xff, 0 +#define RG_CSMA_SEED_1 (0x2e) +#define SR_CSMA_SEED_1 0x2e, 0x07, 0 +#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3 +#define SR_AACK_DIS_ACK 0x2e, 0x10, 4 +#define SR_AACK_SET_PD 0x2e, 0x20, 5 +#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6 +#define RG_CSMA_BE (0x2f) +#define SR_MIN_BE 0x2f, 0x0f, 0 +#define SR_MAX_BE 0x2f, 0xf0, 4 + +#define CMD_REG 0x80 +#define CMD_REG_MASK 0x3f +#define CMD_WRITE 0x40 +#define CMD_FB 0x20 + +#define IRQ_BAT_LOW (1 << 7) +#define IRQ_TRX_UR (1 << 6) +#define IRQ_AMI (1 << 5) +#define IRQ_CCA_ED (1 << 4) +#define IRQ_TRX_END (1 << 3) +#define IRQ_RX_START (1 << 2) +#define IRQ_PLL_UNL (1 << 1) +#define IRQ_PLL_LOCK (1 << 0) + +#define STATE_P_ON 0x00 /* BUSY */ +#define STATE_BUSY_RX 0x01 +#define STATE_BUSY_TX 0x02 +#define STATE_FORCE_TRX_OFF 0x03 +#define STATE_FORCE_TX_ON 0x04 /* IDLE */ +/* 0x05 */ /* INVALID_PARAMETER */ +#define STATE_RX_ON 0x06 +/* 0x07 */ /* SUCCESS */ +#define STATE_TRX_OFF 0x08 +#define STATE_TX_ON 0x09 +/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */ +#define STATE_SLEEP 0x0F +#define STATE_BUSY_RX_AACK 0x11 +#define STATE_BUSY_TX_ARET 0x12 +#define STATE_BUSY_RX_AACK_ON 0x16 +#define STATE_BUSY_TX_ARET_ON 0x19 +#define STATE_RX_ON_NOCLK 0x1C +#define STATE_RX_AACK_ON_NOCLK 0x1D +#define STATE_BUSY_RX_AACK_NOCLK 0x1E +#define STATE_TRANSITION_IN_PROGRESS 0x1F + +static int +__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 2, + .tx_buf = buf, + }; + + buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE; + buf[1] = data; + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + status = spi_sync(lp->spi, &msg); + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + if (msg.status) + status = msg.status; + + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + return status; +} + +static int +__at86rf230_read_subreg(struct at86rf230_local *lp, + u8 addr, u8 mask, int shift, u8 *data) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 2, + .tx_buf = buf, + .rx_buf = buf, + }; + + buf[0] = (addr & CMD_REG_MASK) | CMD_REG; + buf[1] = 0xff; + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + status = spi_sync(lp->spi, &msg); + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + if (msg.status) + status = msg.status; + + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + if (status == 0) + *data = buf[1]; + + return status; +} + +static int +at86rf230_read_subreg(struct at86rf230_local *lp, + u8 addr, u8 mask, int shift, u8 *data) +{ + int status; + + mutex_lock(&lp->bmux); + status = __at86rf230_read_subreg(lp, addr, mask, shift, data); + mutex_unlock(&lp->bmux); + + return status; +} + +static int +at86rf230_write_subreg(struct at86rf230_local *lp, + u8 addr, u8 mask, int shift, u8 data) +{ + int status; + u8 val; + + mutex_lock(&lp->bmux); + status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val); + if (status) + goto out; + + val &= ~mask; + val |= (data << shift) & mask; + + status = __at86rf230_write(lp, addr, val); +out: + mutex_unlock(&lp->bmux); + + return status; +} + +static int +at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer_head = { + .len = 2, + .tx_buf = buf, + + }; + struct spi_transfer xfer_buf = { + .len = len, + .tx_buf = data, + }; + + mutex_lock(&lp->bmux); + buf[0] = CMD_WRITE | CMD_FB; + buf[1] = len + 2; /* 2 bytes for CRC that isn't written */ + + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head, &msg); + spi_message_add_tail(&xfer_buf, &msg); + + status = spi_sync(lp->spi, &msg); + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + if (msg.status) + status = msg.status; + + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + mutex_unlock(&lp->bmux); + return status; +} + +static int +at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi) +{ + u8 *buf = lp->buf; + int status; + struct spi_message msg; + struct spi_transfer xfer_head = { + .len = 2, + .tx_buf = buf, + .rx_buf = buf, + }; + struct spi_transfer xfer_head1 = { + .len = 2, + .tx_buf = buf, + .rx_buf = buf, + }; + struct spi_transfer xfer_buf = { + .len = 0, + .rx_buf = data, + }; + + mutex_lock(&lp->bmux); + + buf[0] = CMD_FB; + buf[1] = 0x00; + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head, &msg); + + status = spi_sync(lp->spi, &msg); + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + + xfer_buf.len = *(buf + 1) + 1; + *len = buf[1]; + + buf[0] = CMD_FB; + buf[1] = 0x00; + + spi_message_init(&msg); + spi_message_add_tail(&xfer_head1, &msg); + spi_message_add_tail(&xfer_buf, &msg); + + status = spi_sync(lp->spi, &msg); + + if (msg.status) + status = msg.status; + + dev_vdbg(&lp->spi->dev, "status = %d\n", status); + dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]); + dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); + + if (status) { + if (lqi && (*len > lp->buf[1])) + *lqi = data[lp->buf[1]]; + } + mutex_unlock(&lp->bmux); + + return status; +} + +static int +at86rf230_ed(struct ieee802154_dev *dev, u8 *level) +{ + might_sleep(); + BUG_ON(!level); + *level = 0xbe; + return 0; +} + +static int +at86rf230_state(struct ieee802154_dev *dev, int state) +{ + struct at86rf230_local *lp = dev->priv; + int rc; + u8 val; + u8 desired_status; + + might_sleep(); + + if (state == STATE_FORCE_TX_ON) + desired_status = STATE_TX_ON; + else if (state == STATE_FORCE_TRX_OFF) + desired_status = STATE_TRX_OFF; + else + desired_status = state; + + do { + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val); + if (rc) + goto err; + } while (val == STATE_TRANSITION_IN_PROGRESS); + + if (val == desired_status) + return 0; + + /* state is equal to phy states */ + rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state); + if (rc) + goto err; + + do { + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val); + if (rc) + goto err; + } while (val == STATE_TRANSITION_IN_PROGRESS); + + + if (val == desired_status) + return 0; + + pr_err("unexpected state change: %d, asked for %d\n", val, state); + return -EBUSY; + +err: + pr_err("error: %d\n", rc); + return rc; +} + +static int +at86rf230_start(struct ieee802154_dev *dev) +{ + struct at86rf230_local *lp = dev->priv; + u8 rc; + + rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1); + if (rc) + return rc; + + return at86rf230_state(dev, STATE_RX_ON); +} + +static void +at86rf230_stop(struct ieee802154_dev *dev) +{ + at86rf230_state(dev, STATE_FORCE_TRX_OFF); +} + +static int +at86rf230_channel(struct ieee802154_dev *dev, int page, int channel) +{ + struct at86rf230_local *lp = dev->priv; + int rc; + + might_sleep(); + + if (page != 0 || channel < 11 || channel > 26) { + WARN_ON(1); + return -EINVAL; + } + + rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel); + msleep(1); /* Wait for PLL */ + dev->phy->current_channel = channel; + + return 0; +} + +static int +at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) +{ + struct at86rf230_local *lp = dev->priv; + int rc; + unsigned long flags; + + spin_lock(&lp->lock); + if (lp->irq_disabled) { + spin_unlock(&lp->lock); + return -EBUSY; + } + spin_unlock(&lp->lock); + + might_sleep(); + + rc = at86rf230_state(dev, STATE_FORCE_TX_ON); + if (rc) + goto err; + + spin_lock_irqsave(&lp->lock, flags); + lp->is_tx = 1; + INIT_COMPLETION(lp->tx_complete); + spin_unlock_irqrestore(&lp->lock, flags); + + rc = at86rf230_write_fbuf(lp, skb->data, skb->len); + if (rc) + goto err_rx; + + rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX); + if (rc) + goto err_rx; + + rc = wait_for_completion_interruptible(&lp->tx_complete); + if (rc < 0) + goto err_rx; + + rc = at86rf230_start(dev); + + return rc; + +err_rx: + at86rf230_start(dev); +err: + pr_err("error: %d\n", rc); + + spin_lock_irqsave(&lp->lock, flags); + lp->is_tx = 0; + spin_unlock_irqrestore(&lp->lock, flags); + + return rc; +} + +static int at86rf230_rx(struct at86rf230_local *lp) +{ + u8 len = 128, lqi = 0; + struct sk_buff *skb; + + skb = alloc_skb(len, GFP_KERNEL); + + if (!skb) + return -ENOMEM; + + if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi)) + goto err; + + if (len < 2) + goto err; + + skb_trim(skb, len - 2); /* We do not put CRC into the frame */ + + ieee802154_rx_irqsafe(lp->dev, skb, lqi); + + dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi); + + return 0; +err: + pr_debug("received frame is too small\n"); + + kfree_skb(skb); + return -EINVAL; +} + +static struct ieee802154_ops at86rf230_ops = { + .owner = THIS_MODULE, + .xmit = at86rf230_xmit, + .ed = at86rf230_ed, + .set_channel = at86rf230_channel, + .start = at86rf230_start, + .stop = at86rf230_stop, +}; + +static void at86rf230_irqwork(struct work_struct *work) +{ + struct at86rf230_local *lp = + container_of(work, struct at86rf230_local, irqwork); + u8 status = 0, val; + int rc; + unsigned long flags; + + rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val); + status |= val; + + status &= ~IRQ_PLL_LOCK; /* ignore */ + status &= ~IRQ_RX_START; /* ignore */ + status &= ~IRQ_AMI; /* ignore */ + status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/ + + if (status & IRQ_TRX_END) { + spin_lock_irqsave(&lp->lock, flags); + status &= ~IRQ_TRX_END; + if (lp->is_tx) { + lp->is_tx = 0; + spin_unlock_irqrestore(&lp->lock, flags); + complete(&lp->tx_complete); + } else { + spin_unlock_irqrestore(&lp->lock, flags); + at86rf230_rx(lp); + } + } + + spin_lock_irqsave(&lp->lock, flags); + lp->irq_disabled = 0; + spin_unlock_irqrestore(&lp->lock, flags); + + enable_irq(lp->spi->irq); +} + +static irqreturn_t at86rf230_isr(int irq, void *data) +{ + struct at86rf230_local *lp = data; + + disable_irq_nosync(irq); + + spin_lock(&lp->lock); + lp->irq_disabled = 1; + spin_unlock(&lp->lock); + + schedule_work(&lp->irqwork); + + return IRQ_HANDLED; +} + + +static int at86rf230_hw_init(struct at86rf230_local *lp) +{ + u8 status; + int rc; + + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); + if (rc) + return rc; + + dev_info(&lp->spi->dev, "Status: %02x\n", status); + if (status == STATE_P_ON) { + rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF); + if (rc) + return rc; + msleep(1); + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); + if (rc) + return rc; + dev_info(&lp->spi->dev, "Status: %02x\n", status); + } + + rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR | + * IRQ_CCA_ED | + * IRQ_TRX_END | + * IRQ_PLL_UNL | + * IRQ_PLL_LOCK + */ + if (rc) + return rc; + + /* CLKM changes are applied immediately */ + rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00); + if (rc) + return rc; + + /* Turn CLKM Off */ + rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00); + if (rc) + return rc; + /* Wait the next SLEEP cycle */ + msleep(100); + + rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON); + if (rc) + return rc; + msleep(1); + + rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status); + if (rc) + return rc; + dev_info(&lp->spi->dev, "Status: %02x\n", status); + + rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status); + if (rc) + return rc; + if (!status) { + dev_err(&lp->spi->dev, "DVDD error\n"); + return -EINVAL; + } + + rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status); + if (rc) + return rc; + if (!status) { + dev_err(&lp->spi->dev, "AVDD error\n"); + return -EINVAL; + } + + return 0; +} + +static int at86rf230_suspend(struct spi_device *spi, pm_message_t message) +{ + return 0; +} + +static int at86rf230_resume(struct spi_device *spi) +{ + return 0; +} + +static int at86rf230_fill_data(struct spi_device *spi) +{ + struct at86rf230_local *lp = spi_get_drvdata(spi); + struct at86rf230_platform_data *pdata = spi->dev.platform_data; + + if (!pdata) { + dev_err(&spi->dev, "no platform_data\n"); + return -EINVAL; + } + + lp->rstn = pdata->rstn; + lp->slp_tr = pdata->slp_tr; + lp->dig2 = pdata->dig2; + + return 0; +} + +static int __devinit at86rf230_probe(struct spi_device *spi) +{ + struct ieee802154_dev *dev; + struct at86rf230_local *lp; + u8 man_id_0, man_id_1; + int rc; + const char *chip; + int supported = 0; + + if (!spi->irq) { + dev_err(&spi->dev, "no IRQ specified\n"); + return -EINVAL; + } + + dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops); + if (!dev) + return -ENOMEM; + + lp = dev->priv; + lp->dev = dev; + + lp->spi = spi; + + dev->priv = lp; + dev->parent = &spi->dev; + dev->extra_tx_headroom = 0; + /* We do support only 2.4 Ghz */ + dev->phy->channels_supported[0] = 0x7FFF800; + dev->flags = IEEE802154_HW_OMIT_CKSUM; + + mutex_init(&lp->bmux); + INIT_WORK(&lp->irqwork, at86rf230_irqwork); + spin_lock_init(&lp->lock); + init_completion(&lp->tx_complete); + + spi_set_drvdata(spi, lp); + + rc = at86rf230_fill_data(spi); + if (rc) + goto err_fill; + + rc = gpio_request(lp->rstn, "rstn"); + if (rc) + goto err_rstn; + + if (gpio_is_valid(lp->slp_tr)) { + rc = gpio_request(lp->slp_tr, "slp_tr"); + if (rc) + goto err_slp_tr; + } + + rc = gpio_direction_output(lp->rstn, 1); + if (rc) + goto err_gpio_dir; + + if (gpio_is_valid(lp->slp_tr)) { + rc = gpio_direction_output(lp->slp_tr, 0); + if (rc) + goto err_gpio_dir; + } + + /* Reset */ + msleep(1); + gpio_set_value(lp->rstn, 0); + msleep(1); + gpio_set_value(lp->rstn, 1); + msleep(1); + + rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0); + if (rc) + goto err_gpio_dir; + rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1); + if (rc) + goto err_gpio_dir; + + if (man_id_1 != 0x00 || man_id_0 != 0x1f) { + dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n", + man_id_1, man_id_0); + rc = -EINVAL; + goto err_gpio_dir; + } + + rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part); + if (rc) + goto err_gpio_dir; + + rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers); + if (rc) + goto err_gpio_dir; + + switch (lp->part) { + case 2: + chip = "at86rf230"; + /* supported = 1; FIXME: should be easy to support; */ + break; + case 3: + chip = "at86rf231"; + supported = 1; + break; + default: + chip = "UNKNOWN"; + break; + } + + dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers); + if (!supported) { + rc = -ENOTSUPP; + goto err_gpio_dir; + } + + rc = at86rf230_hw_init(lp); + if (rc) + goto err_gpio_dir; + + rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED, + dev_name(&spi->dev), lp); + if (rc) + goto err_gpio_dir; + + rc = ieee802154_register_device(lp->dev); + if (rc) + goto err_irq; + + return rc; + + ieee802154_unregister_device(lp->dev); +err_irq: + free_irq(spi->irq, lp); + flush_work(&lp->irqwork); +err_gpio_dir: + if (gpio_is_valid(lp->slp_tr)) + gpio_free(lp->slp_tr); +err_slp_tr: + gpio_free(lp->rstn); +err_rstn: +err_fill: + spi_set_drvdata(spi, NULL); + mutex_destroy(&lp->bmux); + ieee802154_free_device(lp->dev); + return rc; +} + +static int __devexit at86rf230_remove(struct spi_device *spi) +{ + struct at86rf230_local *lp = spi_get_drvdata(spi); + + ieee802154_unregister_device(lp->dev); + + free_irq(spi->irq, lp); + flush_work(&lp->irqwork); + + if (gpio_is_valid(lp->slp_tr)) + gpio_free(lp->slp_tr); + gpio_free(lp->rstn); + + spi_set_drvdata(spi, NULL); + mutex_destroy(&lp->bmux); + ieee802154_free_device(lp->dev); + + dev_dbg(&spi->dev, "unregistered at86rf230\n"); + return 0; +} + +static struct spi_driver at86rf230_driver = { + .driver = { + .name = "at86rf230", + .owner = THIS_MODULE, + }, + .probe = at86rf230_probe, + .remove = __devexit_p(at86rf230_remove), + .suspend = at86rf230_suspend, + .resume = at86rf230_resume, +}; + +module_spi_driver(at86rf230_driver); + +MODULE_DESCRIPTION("AT86RF230 Transceiver Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c new file mode 100644 index 000000000000..7d39add7d467 --- /dev/null +++ b/drivers/net/ieee802154/fakehard.c @@ -0,0 +1,448 @@ +/* + * Sample driver for HardMAC IEEE 802.15.4 devices + * + * Copyright (C) 2009 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com> + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/if_arp.h> + +#include <net/af_ieee802154.h> +#include <net/ieee802154_netdev.h> +#include <net/ieee802154.h> +#include <net/nl802154.h> +#include <net/wpan-phy.h> + +struct fakehard_priv { + struct wpan_phy *phy; +}; + +static struct wpan_phy *fake_to_phy(const struct net_device *dev) +{ + struct fakehard_priv *priv = netdev_priv(dev); + return priv->phy; +} + +/** + * fake_get_phy - Return a phy corresponding to this device. + * @dev: The network device for which to return the wan-phy object + * + * This function returns a wpan-phy object corresponding to the passed + * network device. Reference counter for wpan-phy object is incremented, + * so when the wpan-phy isn't necessary, you should drop the reference + * via @wpan_phy_put() call. + */ +static struct wpan_phy *fake_get_phy(const struct net_device *dev) +{ + struct wpan_phy *phy = fake_to_phy(dev); + return to_phy(get_device(&phy->dev)); +} + +/** + * fake_get_pan_id - Retrieve the PAN ID of the device. + * @dev: The network device to retrieve the PAN of. + * + * Return the ID of the PAN from the PIB. + */ +static u16 fake_get_pan_id(const struct net_device *dev) +{ + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return 0xeba1; +} + +/** + * fake_get_short_addr - Retrieve the short address of the device. + * @dev: The network device to retrieve the short address of. + * + * Returns the IEEE 802.15.4 short-form address cached for this + * device. If the device has not yet had a short address assigned + * then this should return 0xFFFF to indicate a lack of association. + */ +static u16 fake_get_short_addr(const struct net_device *dev) +{ + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return 0x1; +} + +/** + * fake_get_dsn - Retrieve the DSN of the device. + * @dev: The network device to retrieve the DSN for. + * + * Returns the IEEE 802.15.4 DSN for the network device. + * The DSN is the sequence number which will be added to each + * packet or MAC command frame by the MAC during transmission. + * + * DSN means 'Data Sequence Number'. + * + * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 + * document. + */ +static u8 fake_get_dsn(const struct net_device *dev) +{ + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return 0x00; /* DSN are implemented in HW, so return just 0 */ +} + +/** + * fake_get_bsn - Retrieve the BSN of the device. + * @dev: The network device to retrieve the BSN for. + * + * Returns the IEEE 802.15.4 BSN for the network device. + * The BSN is the sequence number which will be added to each + * beacon frame sent by the MAC. + * + * BSN means 'Beacon Sequence Number'. + * + * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 + * document. + */ +static u8 fake_get_bsn(const struct net_device *dev) +{ + BUG_ON(dev->type != ARPHRD_IEEE802154); + + return 0x00; /* BSN are implemented in HW, so return just 0 */ +} + +/** + * fake_assoc_req - Make an association request to the HW. + * @dev: The network device which we are associating to a network. + * @addr: The coordinator with which we wish to associate. + * @channel: The channel on which to associate. + * @cap: The capability information field to use in the association. + * + * Start an association with a coordinator. The coordinator's address + * and PAN ID can be found in @addr. + * + * Note: This is in section 7.3.1 and 7.5.3.1 of the IEEE + * 802.15.4-2006 document. + */ +static int fake_assoc_req(struct net_device *dev, + struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap) +{ + struct wpan_phy *phy = fake_to_phy(dev); + + mutex_lock(&phy->pib_lock); + phy->current_channel = channel; + phy->current_page = page; + mutex_unlock(&phy->pib_lock); + + /* We simply emulate it here */ + return ieee802154_nl_assoc_confirm(dev, fake_get_short_addr(dev), + IEEE802154_SUCCESS); +} + +/** + * fake_assoc_resp - Send an association response to a device. + * @dev: The network device on which to send the response. + * @addr: The address of the device to respond to. + * @short_addr: The assigned short address for the device (if any). + * @status: The result of the association request. + * + * Queue the association response of the coordinator to another + * device's attempt to associate with the network which we + * coordinate. This is then added to the indirect-send queue to be + * transmitted to the end device when it polls for data. + * + * Note: This is in section 7.3.2 and 7.5.3.1 of the IEEE + * 802.15.4-2006 document. + */ +static int fake_assoc_resp(struct net_device *dev, + struct ieee802154_addr *addr, u16 short_addr, u8 status) +{ + return 0; +} + +/** + * fake_disassoc_req - Disassociate a device from a network. + * @dev: The network device on which we're disassociating a device. + * @addr: The device to disassociate from the network. + * @reason: The reason to give to the device for being disassociated. + * + * This sends a disassociation notification to the device being + * disassociated from the network. + * + * Note: This is in section 7.5.3.2 of the IEEE 802.15.4-2006 + * document, with the reason described in 7.3.3.2. + */ +static int fake_disassoc_req(struct net_device *dev, + struct ieee802154_addr *addr, u8 reason) +{ + return ieee802154_nl_disassoc_confirm(dev, IEEE802154_SUCCESS); +} + +/** + * fake_start_req - Start an IEEE 802.15.4 PAN. + * @dev: The network device on which to start the PAN. + * @addr: The coordinator address to use when starting the PAN. + * @channel: The channel on which to start the PAN. + * @bcn_ord: Beacon order. + * @sf_ord: Superframe order. + * @pan_coord: Whether or not we are the PAN coordinator or just + * requesting a realignment perhaps? + * @blx: Battery Life Extension feature bitfield. + * @coord_realign: Something to realign something else. + * + * If pan_coord is non-zero then this starts a network with the + * provided parameters, otherwise it attempts a coordinator + * realignment of the stated network instead. + * + * Note: This is in section 7.5.2.3 of the IEEE 802.15.4-2006 + * document, with 7.3.8 describing coordinator realignment. + */ +static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr, + u8 channel, u8 page, + u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx, + u8 coord_realign) +{ + struct wpan_phy *phy = fake_to_phy(dev); + + mutex_lock(&phy->pib_lock); + phy->current_channel = channel; + phy->current_page = page; + mutex_unlock(&phy->pib_lock); + + /* We don't emulate beacons here at all, so START should fail */ + ieee802154_nl_start_confirm(dev, IEEE802154_INVALID_PARAMETER); + return 0; +} + +/** + * fake_scan_req - Start a channel scan. + * @dev: The network device on which to perform a channel scan. + * @type: The type of scan to perform. + * @channels: The channel bitmask to scan. + * @duration: How long to spend on each channel. + * + * This starts either a passive (energy) scan or an active (PAN) scan + * on the channels indicated in the @channels bitmask. The duration of + * the scan is measured in terms of superframe duration. Specifically, + * the scan will spend aBaseSuperFrameDuration * ((2^n) + 1) on each + * channel. + * + * Note: This is in section 7.5.2.1 of the IEEE 802.15.4-2006 document. + */ +static int fake_scan_req(struct net_device *dev, u8 type, u32 channels, + u8 page, u8 duration) +{ + u8 edl[27] = {}; + return ieee802154_nl_scan_confirm(dev, IEEE802154_SUCCESS, type, + channels, page, + type == IEEE802154_MAC_SCAN_ED ? edl : NULL); +} + +static struct ieee802154_mlme_ops fake_mlme = { + .assoc_req = fake_assoc_req, + .assoc_resp = fake_assoc_resp, + .disassoc_req = fake_disassoc_req, + .start_req = fake_start_req, + .scan_req = fake_scan_req, + + .get_phy = fake_get_phy, + + .get_pan_id = fake_get_pan_id, + .get_short_addr = fake_get_short_addr, + .get_dsn = fake_get_dsn, + .get_bsn = fake_get_bsn, +}; + +static int ieee802154_fake_open(struct net_device *dev) +{ + netif_start_queue(dev); + return 0; +} + +static int ieee802154_fake_close(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + /* FIXME: do hardware work here ... */ + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + + +static int ieee802154_fake_ioctl(struct net_device *dev, struct ifreq *ifr, + int cmd) +{ + struct sockaddr_ieee802154 *sa = + (struct sockaddr_ieee802154 *)&ifr->ifr_addr; + u16 pan_id, short_addr; + + switch (cmd) { + case SIOCGIFADDR: + /* FIXME: fixed here, get from device IRL */ + pan_id = fake_get_pan_id(dev); + short_addr = fake_get_short_addr(dev); + if (pan_id == IEEE802154_PANID_BROADCAST || + short_addr == IEEE802154_ADDR_BROADCAST) + return -EADDRNOTAVAIL; + + sa->family = AF_IEEE802154; + sa->addr.addr_type = IEEE802154_ADDR_SHORT; + sa->addr.pan_id = pan_id; + sa->addr.short_addr = short_addr; + return 0; + } + return -ENOIOCTLCMD; +} + +static int ieee802154_fake_mac_addr(struct net_device *dev, void *p) +{ + return -EBUSY; /* HW address is built into the device */ +} + +static const struct net_device_ops fake_ops = { + .ndo_open = ieee802154_fake_open, + .ndo_stop = ieee802154_fake_close, + .ndo_start_xmit = ieee802154_fake_xmit, + .ndo_do_ioctl = ieee802154_fake_ioctl, + .ndo_set_mac_address = ieee802154_fake_mac_addr, +}; + +static void ieee802154_fake_destruct(struct net_device *dev) +{ + struct wpan_phy *phy = fake_to_phy(dev); + + wpan_phy_unregister(phy); + free_netdev(dev); + wpan_phy_free(phy); +} + +static void ieee802154_fake_setup(struct net_device *dev) +{ + dev->addr_len = IEEE802154_ADDR_LEN; + memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN); + dev->features = NETIF_F_HW_CSUM; + dev->needed_tailroom = 2; /* FCS */ + dev->mtu = 127; + dev->tx_queue_len = 10; + dev->type = ARPHRD_IEEE802154; + dev->flags = IFF_NOARP | IFF_BROADCAST; + dev->watchdog_timeo = 0; + dev->destructor = ieee802154_fake_destruct; +} + + +static int __devinit ieee802154fake_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct fakehard_priv *priv; + struct wpan_phy *phy = wpan_phy_alloc(0); + int err; + + if (!phy) + return -ENOMEM; + + dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup); + if (!dev) { + wpan_phy_free(phy); + return -ENOMEM; + } + + memcpy(dev->dev_addr, "\xba\xbe\xca\xfe\xde\xad\xbe\xef", + dev->addr_len); + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); + + /* + * For now we'd like to emulate 2.4 GHz-only device, + * both O-QPSK and CSS + */ + /* 2.4 GHz O-QPSK 802.15.4-2003 */ + phy->channels_supported[0] |= 0x7FFF800; + /* 2.4 GHz CSS 802.15.4a-2007 */ + phy->channels_supported[3] |= 0x3fff; + + phy->transmit_power = 0xbf; + + dev->netdev_ops = &fake_ops; + dev->ml_priv = &fake_mlme; + + priv = netdev_priv(dev); + priv->phy = phy; + + wpan_phy_set_dev(phy, &pdev->dev); + SET_NETDEV_DEV(dev, &phy->dev); + + platform_set_drvdata(pdev, dev); + + err = wpan_phy_register(phy); + if (err) + goto out; + + err = register_netdev(dev); + if (err < 0) + goto out; + + dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); + return 0; + +out: + unregister_netdev(dev); + return err; +} + +static int __devexit ieee802154fake_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + unregister_netdev(dev); + return 0; +} + +static struct platform_device *ieee802154fake_dev; + +static struct platform_driver ieee802154fake_driver = { + .probe = ieee802154fake_probe, + .remove = __devexit_p(ieee802154fake_remove), + .driver = { + .name = "ieee802154hardmac", + .owner = THIS_MODULE, + }, +}; + +static __init int fake_init(void) +{ + ieee802154fake_dev = platform_device_register_simple( + "ieee802154hardmac", -1, NULL, 0); + return platform_driver_register(&ieee802154fake_driver); +} + +static __exit void fake_exit(void) +{ + platform_driver_unregister(&ieee802154fake_driver); + platform_device_unregister(ieee802154fake_dev); +} + +module_init(fake_init); +module_exit(fake_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c new file mode 100644 index 000000000000..e7456fcd0913 --- /dev/null +++ b/drivers/net/ieee802154/fakelb.c @@ -0,0 +1,294 @@ +/* + * Loopback IEEE 802.15.4 interface + * + * Copyright 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Written by: + * Sergey Lapin <slapin@ossfans.org> + * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> + */ + +#include <linux/module.h> +#include <linux/timer.h> +#include <linux/platform_device.h> +#include <linux/netdevice.h> +#include <linux/spinlock.h> +#include <net/mac802154.h> +#include <net/wpan-phy.h> + +static int numlbs = 1; + +struct fakelb_dev_priv { + struct ieee802154_dev *dev; + + struct list_head list; + struct fakelb_priv *fake; + + spinlock_t lock; + bool working; +}; + +struct fakelb_priv { + struct list_head list; + rwlock_t lock; +}; + +static int +fakelb_hw_ed(struct ieee802154_dev *dev, u8 *level) +{ + might_sleep(); + BUG_ON(!level); + *level = 0xbe; + + return 0; +} + +static int +fakelb_hw_channel(struct ieee802154_dev *dev, int page, int channel) +{ + pr_debug("set channel to %d\n", channel); + + might_sleep(); + dev->phy->current_page = page; + dev->phy->current_channel = channel; + + return 0; +} + +static void +fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb) +{ + struct sk_buff *newskb; + + spin_lock(&priv->lock); + if (priv->working) { + newskb = pskb_copy(skb, GFP_ATOMIC); + ieee802154_rx_irqsafe(priv->dev, newskb, 0xcc); + } + spin_unlock(&priv->lock); +} + +static int +fakelb_hw_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) +{ + struct fakelb_dev_priv *priv = dev->priv; + struct fakelb_priv *fake = priv->fake; + + might_sleep(); + + read_lock_bh(&fake->lock); + if (priv->list.next == priv->list.prev) { + /* we are the only one device */ + fakelb_hw_deliver(priv, skb); + } else { + struct fakelb_dev_priv *dp; + list_for_each_entry(dp, &priv->fake->list, list) { + if (dp != priv && + (dp->dev->phy->current_channel == + priv->dev->phy->current_channel)) + fakelb_hw_deliver(dp, skb); + } + } + read_unlock_bh(&fake->lock); + + return 0; +} + +static int +fakelb_hw_start(struct ieee802154_dev *dev) { + struct fakelb_dev_priv *priv = dev->priv; + int ret = 0; + + spin_lock(&priv->lock); + if (priv->working) + ret = -EBUSY; + else + priv->working = 1; + spin_unlock(&priv->lock); + + return ret; +} + +static void +fakelb_hw_stop(struct ieee802154_dev *dev) { + struct fakelb_dev_priv *priv = dev->priv; + + spin_lock(&priv->lock); + priv->working = 0; + spin_unlock(&priv->lock); +} + +static struct ieee802154_ops fakelb_ops = { + .owner = THIS_MODULE, + .xmit = fakelb_hw_xmit, + .ed = fakelb_hw_ed, + .set_channel = fakelb_hw_channel, + .start = fakelb_hw_start, + .stop = fakelb_hw_stop, +}; + +/* Number of dummy devices to be set up by this module. */ +module_param(numlbs, int, 0); +MODULE_PARM_DESC(numlbs, " number of pseudo devices"); + +static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake) +{ + struct fakelb_dev_priv *priv; + int err; + struct ieee802154_dev *ieee; + + ieee = ieee802154_alloc_device(sizeof(*priv), &fakelb_ops); + if (!ieee) + return -ENOMEM; + + priv = ieee->priv; + priv->dev = ieee; + + /* 868 MHz BPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 1; + /* 915 MHz BPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 0x7fe; + /* 2.4 GHz O-QPSK 802.15.4-2003 */ + ieee->phy->channels_supported[0] |= 0x7FFF800; + /* 868 MHz ASK 802.15.4-2006 */ + ieee->phy->channels_supported[1] |= 1; + /* 915 MHz ASK 802.15.4-2006 */ + ieee->phy->channels_supported[1] |= 0x7fe; + /* 868 MHz O-QPSK 802.15.4-2006 */ + ieee->phy->channels_supported[2] |= 1; + /* 915 MHz O-QPSK 802.15.4-2006 */ + ieee->phy->channels_supported[2] |= 0x7fe; + /* 2.4 GHz CSS 802.15.4a-2007 */ + ieee->phy->channels_supported[3] |= 0x3fff; + /* UWB Sub-gigahertz 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 1; + /* UWB Low band 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 0x1e; + /* UWB High band 802.15.4a-2007 */ + ieee->phy->channels_supported[4] |= 0xffe0; + /* 750 MHz O-QPSK 802.15.4c-2009 */ + ieee->phy->channels_supported[5] |= 0xf; + /* 750 MHz MPSK 802.15.4c-2009 */ + ieee->phy->channels_supported[5] |= 0xf0; + /* 950 MHz BPSK 802.15.4d-2009 */ + ieee->phy->channels_supported[6] |= 0x3ff; + /* 950 MHz GFSK 802.15.4d-2009 */ + ieee->phy->channels_supported[6] |= 0x3ffc00; + + INIT_LIST_HEAD(&priv->list); + priv->fake = fake; + + spin_lock_init(&priv->lock); + + ieee->parent = dev; + + err = ieee802154_register_device(ieee); + if (err) + goto err_reg; + + write_lock_bh(&fake->lock); + list_add_tail(&priv->list, &fake->list); + write_unlock_bh(&fake->lock); + + return 0; + +err_reg: + ieee802154_free_device(priv->dev); + return err; +} + +static void fakelb_del(struct fakelb_dev_priv *priv) +{ + write_lock_bh(&priv->fake->lock); + list_del(&priv->list); + write_unlock_bh(&priv->fake->lock); + + ieee802154_unregister_device(priv->dev); + ieee802154_free_device(priv->dev); +} + +static int __devinit fakelb_probe(struct platform_device *pdev) +{ + struct fakelb_priv *priv; + struct fakelb_dev_priv *dp; + int err = -ENOMEM; + int i; + + priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL); + if (!priv) + goto err_alloc; + + INIT_LIST_HEAD(&priv->list); + rwlock_init(&priv->lock); + + for (i = 0; i < numlbs; i++) { + err = fakelb_add_one(&pdev->dev, priv); + if (err < 0) + goto err_slave; + } + + platform_set_drvdata(pdev, priv); + dev_info(&pdev->dev, "added ieee802154 hardware\n"); + return 0; + +err_slave: + list_for_each_entry(dp, &priv->list, list) + fakelb_del(dp); + kfree(priv); +err_alloc: + return err; +} + +static int __devexit fakelb_remove(struct platform_device *pdev) +{ + struct fakelb_priv *priv = platform_get_drvdata(pdev); + struct fakelb_dev_priv *dp, *temp; + + list_for_each_entry_safe(dp, temp, &priv->list, list) + fakelb_del(dp); + kfree(priv); + + return 0; +} + +static struct platform_device *ieee802154fake_dev; + +static struct platform_driver ieee802154fake_driver = { + .probe = fakelb_probe, + .remove = __devexit_p(fakelb_remove), + .driver = { + .name = "ieee802154fakelb", + .owner = THIS_MODULE, + }, +}; + +static __init int fakelb_init_module(void) +{ + ieee802154fake_dev = platform_device_register_simple( + "ieee802154fakelb", -1, NULL, 0); + return platform_driver_register(&ieee802154fake_driver); +} + +static __exit void fake_remove_module(void) +{ + platform_driver_unregister(&ieee802154fake_driver); + platform_device_unregister(ieee802154fake_dev); +} + +module_init(fakelb_init_module); +module_exit(fake_remove_module); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c new file mode 100644 index 000000000000..0e53d4f431d2 --- /dev/null +++ b/drivers/net/ieee802154/mrf24j40.c @@ -0,0 +1,767 @@ +/* + * Driver for Microchip MRF24J40 802.15.4 Wireless-PAN Networking controller + * + * Copyright (C) 2012 Alan Ott <alan@signal11.us> + * Signal 11 Software + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/spi/spi.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <net/wpan-phy.h> +#include <net/mac802154.h> + +/* MRF24J40 Short Address Registers */ +#define REG_RXMCR 0x00 /* Receive MAC control */ +#define REG_PANIDL 0x01 /* PAN ID (low) */ +#define REG_PANIDH 0x02 /* PAN ID (high) */ +#define REG_SADRL 0x03 /* Short address (low) */ +#define REG_SADRH 0x04 /* Short address (high) */ +#define REG_EADR0 0x05 /* Long address (low) (high is EADR7) */ +#define REG_TXMCR 0x11 /* Transmit MAC control */ +#define REG_PACON0 0x16 /* Power Amplifier Control */ +#define REG_PACON1 0x17 /* Power Amplifier Control */ +#define REG_PACON2 0x18 /* Power Amplifier Control */ +#define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */ +#define REG_TXSTAT 0x24 /* TX MAC Status Register */ +#define REG_SOFTRST 0x2A /* Soft Reset */ +#define REG_TXSTBL 0x2E /* TX Stabilization */ +#define REG_INTSTAT 0x31 /* Interrupt Status */ +#define REG_INTCON 0x32 /* Interrupt Control */ +#define REG_RFCTL 0x36 /* RF Control Mode Register */ +#define REG_BBREG1 0x39 /* Baseband Registers */ +#define REG_BBREG2 0x3A /* */ +#define REG_BBREG6 0x3E /* */ +#define REG_CCAEDTH 0x3F /* Energy Detection Threshold */ + +/* MRF24J40 Long Address Registers */ +#define REG_RFCON0 0x200 /* RF Control Registers */ +#define REG_RFCON1 0x201 +#define REG_RFCON2 0x202 +#define REG_RFCON3 0x203 +#define REG_RFCON5 0x205 +#define REG_RFCON6 0x206 +#define REG_RFCON7 0x207 +#define REG_RFCON8 0x208 +#define REG_RSSI 0x210 +#define REG_SLPCON0 0x211 /* Sleep Clock Control Registers */ +#define REG_SLPCON1 0x220 +#define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */ +#define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */ +#define REG_RX_FIFO 0x300 /* Receive FIFO */ + +/* Device configuration: Only channels 11-26 on page 0 are supported. */ +#define MRF24J40_CHAN_MIN 11 +#define MRF24J40_CHAN_MAX 26 +#define CHANNEL_MASK (((u32)1 << (MRF24J40_CHAN_MAX + 1)) \ + - ((u32)1 << MRF24J40_CHAN_MIN)) + +#define TX_FIFO_SIZE 128 /* From datasheet */ +#define RX_FIFO_SIZE 144 /* From datasheet */ +#define SET_CHANNEL_DELAY_US 192 /* From datasheet */ + +/* Device Private Data */ +struct mrf24j40 { + struct spi_device *spi; + struct ieee802154_dev *dev; + + struct mutex buffer_mutex; /* only used to protect buf */ + struct completion tx_complete; + struct work_struct irqwork; + u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */ +}; + +/* Read/Write SPI Commands for Short and Long Address registers. */ +#define MRF24J40_READSHORT(reg) ((reg) << 1) +#define MRF24J40_WRITESHORT(reg) ((reg) << 1 | 1) +#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5) +#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4) + +/* Maximum speed to run the device at. TODO: Get the real max value from + * someone at Microchip since it isn't in the datasheet. */ +#define MAX_SPI_SPEED_HZ 1000000 + +#define printdev(X) (&X->spi->dev) + +static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value) +{ + int ret; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 2, + .tx_buf = devrec->buf, + .rx_buf = devrec->buf, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = MRF24J40_WRITESHORT(reg); + devrec->buf[1] = value; + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), + "SPI write Failed for short register 0x%hhx\n", reg); + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val) +{ + int ret = -1; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 2, + .tx_buf = devrec->buf, + .rx_buf = devrec->buf, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = MRF24J40_READSHORT(reg); + devrec->buf[1] = 0; + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), + "SPI read Failed for short register 0x%hhx\n", reg); + else + *val = devrec->buf[1]; + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value) +{ + int ret; + u16 cmd; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 3, + .tx_buf = devrec->buf, + .rx_buf = devrec->buf, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + cmd = MRF24J40_READLONG(reg); + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = cmd >> 8 & 0xff; + devrec->buf[1] = cmd & 0xff; + devrec->buf[2] = 0; + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), + "SPI read Failed for long register 0x%hx\n", reg); + else + *value = devrec->buf[2]; + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val) +{ + int ret; + u16 cmd; + struct spi_message msg; + struct spi_transfer xfer = { + .len = 3, + .tx_buf = devrec->buf, + .rx_buf = devrec->buf, + }; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + cmd = MRF24J40_WRITELONG(reg); + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = cmd >> 8 & 0xff; + devrec->buf[1] = cmd & 0xff; + devrec->buf[2] = val; + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), + "SPI write Failed for long register 0x%hx\n", reg); + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +/* This function relies on an undocumented write method. Once a write command + and address is set, as many bytes of data as desired can be clocked into + the device. The datasheet only shows setting one byte at a time. */ +static int write_tx_buf(struct mrf24j40 *devrec, u16 reg, + const u8 *data, size_t length) +{ + int ret; + u16 cmd; + u8 lengths[2]; + struct spi_message msg; + struct spi_transfer addr_xfer = { + .len = 2, + .tx_buf = devrec->buf, + }; + struct spi_transfer lengths_xfer = { + .len = 2, + .tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */ + }; + struct spi_transfer data_xfer = { + .len = length, + .tx_buf = data, + }; + + /* Range check the length. 2 bytes are used for the length fields.*/ + if (length > TX_FIFO_SIZE-2) { + dev_err(printdev(devrec), "write_tx_buf() was passed too large a buffer. Performing short write.\n"); + length = TX_FIFO_SIZE-2; + } + + spi_message_init(&msg); + spi_message_add_tail(&addr_xfer, &msg); + spi_message_add_tail(&lengths_xfer, &msg); + spi_message_add_tail(&data_xfer, &msg); + + cmd = MRF24J40_WRITELONG(reg); + mutex_lock(&devrec->buffer_mutex); + devrec->buf[0] = cmd >> 8 & 0xff; + devrec->buf[1] = cmd & 0xff; + lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */ + lengths[1] = length; /* Total length */ + + ret = spi_sync(devrec->spi, &msg); + if (ret) + dev_err(printdev(devrec), "SPI write Failed for TX buf\n"); + + mutex_unlock(&devrec->buffer_mutex); + return ret; +} + +static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec, + u8 *data, u8 *len, u8 *lqi) +{ + u8 rx_len; + u8 addr[2]; + u8 lqi_rssi[2]; + u16 cmd; + int ret; + struct spi_message msg; + struct spi_transfer addr_xfer = { + .len = 2, + .tx_buf = &addr, + }; + struct spi_transfer data_xfer = { + .len = 0x0, /* set below */ + .rx_buf = data, + }; + struct spi_transfer status_xfer = { + .len = 2, + .rx_buf = &lqi_rssi, + }; + + /* Get the length of the data in the RX FIFO. The length in this + * register exclues the 1-byte length field at the beginning. */ + ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len); + if (ret) + goto out; + + /* Range check the RX FIFO length, accounting for the one-byte + * length field at the begining. */ + if (rx_len > RX_FIFO_SIZE-1) { + dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n"); + rx_len = RX_FIFO_SIZE-1; + } + + if (rx_len > *len) { + /* Passed in buffer wasn't big enough. Should never happen. */ + dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n"); + rx_len = *len; + } + + /* Set up the commands to read the data. */ + cmd = MRF24J40_READLONG(REG_RX_FIFO+1); + addr[0] = cmd >> 8 & 0xff; + addr[1] = cmd & 0xff; + data_xfer.len = rx_len; + + spi_message_init(&msg); + spi_message_add_tail(&addr_xfer, &msg); + spi_message_add_tail(&data_xfer, &msg); + spi_message_add_tail(&status_xfer, &msg); + + ret = spi_sync(devrec->spi, &msg); + if (ret) { + dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n"); + goto out; + } + + *lqi = lqi_rssi[0]; + *len = rx_len; + +#ifdef DEBUG + print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", + DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0); + printk(KERN_DEBUG "mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n", + lqi_rssi[0], lqi_rssi[1]); +#endif + +out: + return ret; +} + +static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb) +{ + struct mrf24j40 *devrec = dev->priv; + u8 val; + int ret = 0; + + dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len); + + ret = write_tx_buf(devrec, 0x000, skb->data, skb->len); + if (ret) + goto err; + + /* Set TXNTRIG bit of TXNCON to send packet */ + ret = read_short_reg(devrec, REG_TXNCON, &val); + if (ret) + goto err; + val |= 0x1; + val &= ~0x4; + write_short_reg(devrec, REG_TXNCON, val); + + INIT_COMPLETION(devrec->tx_complete); + + /* Wait for the device to send the TX complete interrupt. */ + ret = wait_for_completion_interruptible_timeout( + &devrec->tx_complete, + 5 * HZ); + if (ret == -ERESTARTSYS) + goto err; + if (ret == 0) { + ret = -ETIMEDOUT; + goto err; + } + + /* Check for send error from the device. */ + ret = read_short_reg(devrec, REG_TXSTAT, &val); + if (ret) + goto err; + if (val & 0x1) { + dev_err(printdev(devrec), "Error Sending. Retry count exceeded\n"); + ret = -ECOMM; /* TODO: Better error code ? */ + } else + dev_dbg(printdev(devrec), "Packet Sent\n"); + +err: + + return ret; +} + +static int mrf24j40_ed(struct ieee802154_dev *dev, u8 *level) +{ + /* TODO: */ + printk(KERN_WARNING "mrf24j40: ed not implemented\n"); + *level = 0; + return 0; +} + +static int mrf24j40_start(struct ieee802154_dev *dev) +{ + struct mrf24j40 *devrec = dev->priv; + u8 val; + int ret; + + dev_dbg(printdev(devrec), "start\n"); + + ret = read_short_reg(devrec, REG_INTCON, &val); + if (ret) + return ret; + val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */ + write_short_reg(devrec, REG_INTCON, val); + + return 0; +} + +static void mrf24j40_stop(struct ieee802154_dev *dev) +{ + struct mrf24j40 *devrec = dev->priv; + u8 val; + int ret; + dev_dbg(printdev(devrec), "stop\n"); + + ret = read_short_reg(devrec, REG_INTCON, &val); + if (ret) + return; + val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */ + write_short_reg(devrec, REG_INTCON, val); + + return; +} + +static int mrf24j40_set_channel(struct ieee802154_dev *dev, + int page, int channel) +{ + struct mrf24j40 *devrec = dev->priv; + u8 val; + int ret; + + dev_dbg(printdev(devrec), "Set Channel %d\n", channel); + + WARN_ON(page != 0); + WARN_ON(channel < MRF24J40_CHAN_MIN); + WARN_ON(channel > MRF24J40_CHAN_MAX); + + /* Set Channel TODO */ + val = (channel-11) << 4 | 0x03; + write_long_reg(devrec, REG_RFCON0, val); + + /* RF Reset */ + ret = read_short_reg(devrec, REG_RFCTL, &val); + if (ret) + return ret; + val |= 0x04; + write_short_reg(devrec, REG_RFCTL, val); + val &= ~0x04; + write_short_reg(devrec, REG_RFCTL, val); + + udelay(SET_CHANNEL_DELAY_US); /* per datasheet */ + + return 0; +} + +static int mrf24j40_filter(struct ieee802154_dev *dev, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed) +{ + struct mrf24j40 *devrec = dev->priv; + + dev_dbg(printdev(devrec), "filter\n"); + + if (changed & IEEE802515_AFILT_SADDR_CHANGED) { + /* Short Addr */ + u8 addrh, addrl; + addrh = filt->short_addr >> 8 & 0xff; + addrl = filt->short_addr & 0xff; + + write_short_reg(devrec, REG_SADRH, addrh); + write_short_reg(devrec, REG_SADRL, addrl); + dev_dbg(printdev(devrec), + "Set short addr to %04hx\n", filt->short_addr); + } + + if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) { + /* Device Address */ + int i; + for (i = 0; i < 8; i++) + write_short_reg(devrec, REG_EADR0+i, + filt->ieee_addr[i]); + +#ifdef DEBUG + printk(KERN_DEBUG "Set long addr to: "); + for (i = 0; i < 8; i++) + printk("%02hhx ", filt->ieee_addr[i]); + printk(KERN_DEBUG "\n"); +#endif + } + + if (changed & IEEE802515_AFILT_PANID_CHANGED) { + /* PAN ID */ + u8 panidl, panidh; + panidh = filt->pan_id >> 8 & 0xff; + panidl = filt->pan_id & 0xff; + write_short_reg(devrec, REG_PANIDH, panidh); + write_short_reg(devrec, REG_PANIDL, panidl); + + dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id); + } + + if (changed & IEEE802515_AFILT_PANC_CHANGED) { + /* Pan Coordinator */ + u8 val; + int ret; + + ret = read_short_reg(devrec, REG_RXMCR, &val); + if (ret) + return ret; + if (filt->pan_coord) + val |= 0x8; + else + val &= ~0x8; + write_short_reg(devrec, REG_RXMCR, val); + + /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA). + * REG_ORDER is maintained as default (no beacon/superframe). + */ + + dev_dbg(printdev(devrec), "Set Pan Coord to %s\n", + filt->pan_coord ? "on" : "off"); + } + + return 0; +} + +static int mrf24j40_handle_rx(struct mrf24j40 *devrec) +{ + u8 len = RX_FIFO_SIZE; + u8 lqi = 0; + u8 val; + int ret = 0; + struct sk_buff *skb; + + /* Turn off reception of packets off the air. This prevents the + * device from overwriting the buffer while we're reading it. */ + ret = read_short_reg(devrec, REG_BBREG1, &val); + if (ret) + goto out; + val |= 4; /* SET RXDECINV */ + write_short_reg(devrec, REG_BBREG1, val); + + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) { + ret = -ENOMEM; + goto out; + } + + ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi); + if (ret < 0) { + dev_err(printdev(devrec), "Failure reading RX FIFO\n"); + kfree_skb(skb); + ret = -EINVAL; + goto out; + } + + /* Cut off the checksum */ + skb_trim(skb, len-2); + + /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040, + * also from a workqueue). I think irqsafe is not necessary here. + * Can someone confirm? */ + ieee802154_rx_irqsafe(devrec->dev, skb, lqi); + + dev_dbg(printdev(devrec), "RX Handled\n"); + +out: + /* Turn back on reception of packets off the air. */ + ret = read_short_reg(devrec, REG_BBREG1, &val); + if (ret) + return ret; + val &= ~0x4; /* Clear RXDECINV */ + write_short_reg(devrec, REG_BBREG1, val); + + return ret; +} + +static struct ieee802154_ops mrf24j40_ops = { + .owner = THIS_MODULE, + .xmit = mrf24j40_tx, + .ed = mrf24j40_ed, + .start = mrf24j40_start, + .stop = mrf24j40_stop, + .set_channel = mrf24j40_set_channel, + .set_hw_addr_filt = mrf24j40_filter, +}; + +static irqreturn_t mrf24j40_isr(int irq, void *data) +{ + struct mrf24j40 *devrec = data; + + disable_irq_nosync(irq); + + schedule_work(&devrec->irqwork); + + return IRQ_HANDLED; +} + +static void mrf24j40_isrwork(struct work_struct *work) +{ + struct mrf24j40 *devrec = container_of(work, struct mrf24j40, irqwork); + u8 intstat; + int ret; + + /* Read the interrupt status */ + ret = read_short_reg(devrec, REG_INTSTAT, &intstat); + if (ret) + goto out; + + /* Check for TX complete */ + if (intstat & 0x1) + complete(&devrec->tx_complete); + + /* Check for Rx */ + if (intstat & 0x8) + mrf24j40_handle_rx(devrec); + +out: + enable_irq(devrec->spi->irq); +} + +static int __devinit mrf24j40_probe(struct spi_device *spi) +{ + int ret = -ENOMEM; + u8 val; + struct mrf24j40 *devrec; + + printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq); + + devrec = kzalloc(sizeof(struct mrf24j40), GFP_KERNEL); + if (!devrec) + goto err_devrec; + devrec->buf = kzalloc(3, GFP_KERNEL); + if (!devrec->buf) + goto err_buf; + + spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */ + if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) + spi->max_speed_hz = MAX_SPI_SPEED_HZ; + + mutex_init(&devrec->buffer_mutex); + init_completion(&devrec->tx_complete); + INIT_WORK(&devrec->irqwork, mrf24j40_isrwork); + devrec->spi = spi; + dev_set_drvdata(&spi->dev, devrec); + + /* Register with the 802154 subsystem */ + + devrec->dev = ieee802154_alloc_device(0, &mrf24j40_ops); + if (!devrec->dev) + goto err_alloc_dev; + + devrec->dev->priv = devrec; + devrec->dev->parent = &devrec->spi->dev; + devrec->dev->phy->channels_supported[0] = CHANNEL_MASK; + devrec->dev->flags = IEEE802154_HW_OMIT_CKSUM|IEEE802154_HW_AACK; + + dev_dbg(printdev(devrec), "registered mrf24j40\n"); + ret = ieee802154_register_device(devrec->dev); + if (ret) + goto err_register_device; + + /* Initialize the device. + From datasheet section 3.2: Initialization. */ + write_short_reg(devrec, REG_SOFTRST, 0x07); + write_short_reg(devrec, REG_PACON2, 0x98); + write_short_reg(devrec, REG_TXSTBL, 0x95); + write_long_reg(devrec, REG_RFCON0, 0x03); + write_long_reg(devrec, REG_RFCON1, 0x01); + write_long_reg(devrec, REG_RFCON2, 0x80); + write_long_reg(devrec, REG_RFCON6, 0x90); + write_long_reg(devrec, REG_RFCON7, 0x80); + write_long_reg(devrec, REG_RFCON8, 0x10); + write_long_reg(devrec, REG_SLPCON1, 0x21); + write_short_reg(devrec, REG_BBREG2, 0x80); + write_short_reg(devrec, REG_CCAEDTH, 0x60); + write_short_reg(devrec, REG_BBREG6, 0x40); + write_short_reg(devrec, REG_RFCTL, 0x04); + write_short_reg(devrec, REG_RFCTL, 0x0); + udelay(192); + + /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */ + ret = read_short_reg(devrec, REG_RXMCR, &val); + if (ret) + goto err_read_reg; + val &= ~0x3; /* Clear RX mode (normal) */ + write_short_reg(devrec, REG_RXMCR, val); + + ret = request_irq(spi->irq, + mrf24j40_isr, + IRQF_TRIGGER_FALLING, + dev_name(&spi->dev), + devrec); + + if (ret) { + dev_err(printdev(devrec), "Unable to get IRQ"); + goto err_irq; + } + + return 0; + +err_irq: +err_read_reg: + ieee802154_unregister_device(devrec->dev); +err_register_device: + ieee802154_free_device(devrec->dev); +err_alloc_dev: + kfree(devrec->buf); +err_buf: + kfree(devrec); +err_devrec: + return ret; +} + +static int __devexit mrf24j40_remove(struct spi_device *spi) +{ + struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev); + + dev_dbg(printdev(devrec), "remove\n"); + + free_irq(spi->irq, devrec); + flush_work_sync(&devrec->irqwork); /* TODO: Is this the right call? */ + ieee802154_unregister_device(devrec->dev); + ieee802154_free_device(devrec->dev); + /* TODO: Will ieee802154_free_device() wait until ->xmit() is + * complete? */ + + /* Clean up the SPI stuff. */ + dev_set_drvdata(&spi->dev, NULL); + kfree(devrec->buf); + kfree(devrec); + return 0; +} + +static const struct spi_device_id mrf24j40_ids[] = { + { "mrf24j40", 0 }, + { "mrf24j40ma", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(spi, mrf24j40_ids); + +static struct spi_driver mrf24j40_driver = { + .driver = { + .name = "mrf24j40", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .id_table = mrf24j40_ids, + .probe = mrf24j40_probe, + .remove = __devexit_p(mrf24j40_remove), +}; + +static int __init mrf24j40_init(void) +{ + return spi_register_driver(&mrf24j40_driver); +} + +static void __exit mrf24j40_exit(void) +{ + spi_unregister_driver(&mrf24j40_driver); +} + +module_init(mrf24j40_init); +module_exit(mrf24j40_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Alan Ott"); +MODULE_DESCRIPTION("MRF24J40 SPI 802.15.4 Controller Driver"); diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index a561ae44a9ac..c6a0299aa9f9 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) /* If not add the 'RPOLC', we can't catch the receive interrupt. * It's related with the HW layout and the IR transiver. */ - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); return ret; } @@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev bfin_sir_stop_rx(port); val = UART_GET_GCTL(port); - val &= ~(UCEN | IREN | RPOLC); + val &= ~(UCEN | UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); #ifdef CONFIG_SIR_BFIN_DMA @@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work) * reset all the UART. */ val = UART_GET_GCTL(port); - val &= ~(IREN | RPOLC); + val &= ~(UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); SSYNC(); - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); SSYNC(); /* bfin_sir_set_speed(port, self->speed); */ diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c index 824e2a93fe8a..5f3aeac3f86d 100644 --- a/drivers/net/irda/ks959-sir.c +++ b/drivers/net/irda/ks959-sir.c @@ -542,6 +542,7 @@ static int ks959_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { + err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c index 5a278ab83c2f..2d4b6a1ab202 100644 --- a/drivers/net/irda/ksdazzle-sir.c +++ b/drivers/net/irda/ksdazzle-sir.c @@ -436,6 +436,7 @@ static int ksdazzle_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { + err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index e2a06fd996d5..81f8f9e31db5 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -157,7 +157,7 @@ static const struct net_device_ops loopback_ops = { */ static void loopback_setup(struct net_device *dev) { - dev->mtu = (16 * 1024) + 20 + 20 + 12; + dev->mtu = 64 * 1024; dev->hard_header_len = ETH_HLEN; /* 14 */ dev->addr_len = ETH_ALEN; /* 6 */ dev->tx_queue_len = 0; @@ -197,6 +197,7 @@ static __net_init int loopback_net_init(struct net *net) if (err) goto out_free_netdev; + BUG_ON(dev->ifindex != LOOPBACK_IFINDEX); net->loopback_dev = dev; return 0; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 66a9bfe7b1c8..815dfcfbc7b9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -548,7 +548,7 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev, static int macvlan_fdb_add(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr, + const unsigned char *addr, u16 flags) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -567,7 +567,7 @@ static int macvlan_fdb_add(struct ndmsg *ndm, static int macvlan_fdb_del(struct ndmsg *ndm, struct net_device *dev, - unsigned char *addr) + const unsigned char *addr) { struct macvlan_dev *vlan = netdev_priv(dev); int err = -EINVAL; diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 0737bd4d1669..0f0f9ce3a776 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q) int i; for (i = 0; i < MAX_MACVTAP_QUEUES; i++) { - if (rcu_dereference(vlan->taps[i]) == q) + if (rcu_dereference_protected(vlan->taps[i], + lockdep_is_held(&macvtap_lock)) == q) return i; } diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index f9347ea3d381..b3321129a83c 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -640,15 +640,9 @@ static int netconsole_netdev_event(struct notifier_block *this, * rtnl_lock already held */ if (nt->np.dev) { - spin_unlock_irqrestore( - &target_list_lock, - flags); __netpoll_cleanup(&nt->np); - spin_lock_irqsave(&target_list_lock, - flags); dev_put(nt->np.dev); nt->np.dev = NULL; - netconsole_target_put(nt); } nt->enabled = 0; stopped = true; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 3090dc65a6f1..983bbf4d5ef6 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -159,6 +159,19 @@ config MDIO_BUS_MUX_GPIO several child MDIO busses to a parent bus. Child bus selection is under the control of GPIO lines. +config MDIO_BUS_MUX_MMIOREG + tristate "Support for MMIO device-controlled MDIO bus multiplexers" + depends on OF_MDIO + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via a simple memory-mapped device, like an FPGA. + The multiplexer connects one of several child MDIO busses to a + parent bus. Child bus selection is under the control of one of + the FPGA's registers. + + Currently, only 8-bit registers are supported. + endif # PHYLIB config MICREL_KS8995MA diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 6d2dc6c94f2e..426674debae4 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o +obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index b0da0226661f..24e05c43bff8 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -980,7 +980,7 @@ static int dp83640_probe(struct phy_device *phydev) if (choose_this_phy(clock, phydev)) { clock->chosen = dp83640; - clock->ptp_clock = ptp_clock_register(&clock->caps); + clock->ptp_clock = ptp_clock_register(&clock->caps, &phydev->dev); if (IS_ERR(clock->ptp_clock)) { err = PTR_ERR(clock->ptp_clock); goto no_register; diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index 6d1e3fcc43e2..ec40ba882f61 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -122,6 +122,123 @@ static int lxt971_config_intr(struct phy_device *phydev) return err; } +/* + * A2 version of LXT973 chip has an ERRATA: it randomly return the contents + * of the previous even register when you read a odd register regularly + */ + +static int lxt973a2_update_link(struct phy_device *phydev) +{ + int status; + int control; + int retry = 8; /* we try 8 times */ + + /* Do a fake read */ + status = phy_read(phydev, MII_BMSR); + + if (status < 0) + return status; + + control = phy_read(phydev, MII_BMCR); + if (control < 0) + return control; + + do { + /* Read link and autonegotiation status */ + status = phy_read(phydev, MII_BMSR); + } while (status >= 0 && retry-- && status == control); + + if (status < 0) + return status; + + if ((status & BMSR_LSTATUS) == 0) + phydev->link = 0; + else + phydev->link = 1; + + return 0; +} + +int lxt973a2_read_status(struct phy_device *phydev) +{ + int adv; + int err; + int lpa; + int lpagb = 0; + + /* Update the link, but return if there was an error */ + err = lxt973a2_update_link(phydev); + if (err) + return err; + + if (AUTONEG_ENABLE == phydev->autoneg) { + int retry = 1; + + adv = phy_read(phydev, MII_ADVERTISE); + + if (adv < 0) + return adv; + + do { + lpa = phy_read(phydev, MII_LPA); + + if (lpa < 0) + return lpa; + + /* If both registers are equal, it is suspect but not + * impossible, hence a new try + */ + } while (lpa == adv && retry--); + + lpa &= adv; + + phydev->speed = SPEED_10; + phydev->duplex = DUPLEX_HALF; + phydev->pause = phydev->asym_pause = 0; + + if (lpagb & (LPA_1000FULL | LPA_1000HALF)) { + phydev->speed = SPEED_1000; + + if (lpagb & LPA_1000FULL) + phydev->duplex = DUPLEX_FULL; + } else if (lpa & (LPA_100FULL | LPA_100HALF)) { + phydev->speed = SPEED_100; + + if (lpa & LPA_100FULL) + phydev->duplex = DUPLEX_FULL; + } else { + if (lpa & LPA_10FULL) + phydev->duplex = DUPLEX_FULL; + } + + if (phydev->duplex == DUPLEX_FULL) { + phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; + phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; + } + } else { + int bmcr = phy_read(phydev, MII_BMCR); + + if (bmcr < 0) + return bmcr; + + if (bmcr & BMCR_FULLDPLX) + phydev->duplex = DUPLEX_FULL; + else + phydev->duplex = DUPLEX_HALF; + + if (bmcr & BMCR_SPEED1000) + phydev->speed = SPEED_1000; + else if (bmcr & BMCR_SPEED100) + phydev->speed = SPEED_100; + else + phydev->speed = SPEED_10; + + phydev->pause = phydev->asym_pause = 0; + } + + return 0; +} + static int lxt973_probe(struct phy_device *phydev) { int val = phy_read(phydev, MII_LXT973_PCR); @@ -175,6 +292,16 @@ static struct phy_driver lxt97x_driver[] = { .driver = { .owner = THIS_MODULE,}, }, { .phy_id = 0x00137a10, + .name = "LXT973-A2", + .phy_id_mask = 0xffffffff, + .features = PHY_BASIC_FEATURES, + .flags = 0, + .probe = lxt973_probe, + .config_aneg = lxt973_config_aneg, + .read_status = lxt973a2_read_status, + .driver = { .owner = THIS_MODULE,}, +}, { + .phy_id = 0x00137a10, .name = "LXT973", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 7189adf54bd1..899274f2f9b1 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -28,17 +28,38 @@ #include <linux/gpio.h> #include <linux/mdio-gpio.h> -#ifdef CONFIG_OF_GPIO #include <linux/of_gpio.h> #include <linux/of_mdio.h> -#include <linux/of_platform.h> -#endif struct mdio_gpio_info { struct mdiobb_ctrl ctrl; int mdc, mdio; }; +static void *mdio_gpio_of_get_data(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct mdio_gpio_platform_data *pdata; + int ret; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + ret = of_get_gpio(np, 0); + if (ret < 0) + return NULL; + + pdata->mdc = ret; + + ret = of_get_gpio(np, 1); + if (ret < 0) + return NULL; + pdata->mdio = ret; + + return pdata; +} + static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) { struct mdio_gpio_info *bitbang = @@ -162,10 +183,15 @@ static void __devexit mdio_gpio_bus_destroy(struct device *dev) static int __devinit mdio_gpio_probe(struct platform_device *pdev) { - struct mdio_gpio_platform_data *pdata = pdev->dev.platform_data; + struct mdio_gpio_platform_data *pdata; struct mii_bus *new_bus; int ret; + if (pdev->dev.of_node) + pdata = mdio_gpio_of_get_data(pdev); + else + pdata = pdev->dev.platform_data; + if (!pdata) return -ENODEV; @@ -173,7 +199,11 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev) if (!new_bus) return -ENODEV; - ret = mdiobus_register(new_bus); + if (pdev->dev.of_node) + ret = of_mdiobus_register(new_bus, pdev->dev.of_node); + else + ret = mdiobus_register(new_bus); + if (ret) mdio_gpio_bus_deinit(&pdev->dev); @@ -187,112 +217,30 @@ static int __devexit mdio_gpio_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_OF_GPIO - -static int __devinit mdio_ofgpio_probe(struct platform_device *ofdev) -{ - struct mdio_gpio_platform_data *pdata; - struct mii_bus *new_bus; - int ret; - - pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - ret = of_get_gpio(ofdev->dev.of_node, 0); - if (ret < 0) - goto out_free; - pdata->mdc = ret; - - ret = of_get_gpio(ofdev->dev.of_node, 1); - if (ret < 0) - goto out_free; - pdata->mdio = ret; - - new_bus = mdio_gpio_bus_init(&ofdev->dev, pdata, pdata->mdc); - if (!new_bus) - goto out_free; - - ret = of_mdiobus_register(new_bus, ofdev->dev.of_node); - if (ret) - mdio_gpio_bus_deinit(&ofdev->dev); - - return ret; - -out_free: - kfree(pdata); - return -ENODEV; -} - -static int __devexit mdio_ofgpio_remove(struct platform_device *ofdev) -{ - mdio_gpio_bus_destroy(&ofdev->dev); - kfree(ofdev->dev.platform_data); - - return 0; -} - -static struct of_device_id mdio_ofgpio_match[] = { - { - .compatible = "virtual,mdio-gpio", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); - -static struct platform_driver mdio_ofgpio_driver = { - .driver = { - .name = "mdio-ofgpio", - .owner = THIS_MODULE, - .of_match_table = mdio_ofgpio_match, - }, - .probe = mdio_ofgpio_probe, - .remove = __devexit_p(mdio_ofgpio_remove), +static struct of_device_id mdio_gpio_of_match[] = { + { .compatible = "virtual,mdio-gpio", }, + { /* sentinel */ } }; -static inline int __init mdio_ofgpio_init(void) -{ - return platform_driver_register(&mdio_ofgpio_driver); -} - -static inline void mdio_ofgpio_exit(void) -{ - platform_driver_unregister(&mdio_ofgpio_driver); -} -#else -static inline int __init mdio_ofgpio_init(void) { return 0; } -static inline void mdio_ofgpio_exit(void) { } -#endif /* CONFIG_OF_GPIO */ - static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, .remove = __devexit_p(mdio_gpio_remove), .driver = { .name = "mdio-gpio", .owner = THIS_MODULE, + .of_match_table = mdio_gpio_of_match, }, }; static int __init mdio_gpio_init(void) { - int ret; - - ret = mdio_ofgpio_init(); - if (ret) - return ret; - - ret = platform_driver_register(&mdio_gpio_driver); - if (ret) - mdio_ofgpio_exit(); - - return ret; + return platform_driver_register(&mdio_gpio_driver); } module_init(mdio_gpio_init); static void __exit mdio_gpio_exit(void) { platform_driver_unregister(&mdio_gpio_driver); - mdio_ofgpio_exit(); } module_exit(mdio_gpio_exit); diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index e0cc4ef33dee..eefe49e8713c 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -101,7 +101,6 @@ err: n--; gpio_free(s->gpio[n]); } - devm_kfree(&pdev->dev, s); return r; } diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c new file mode 100644 index 000000000000..9061ba622ac4 --- /dev/null +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -0,0 +1,171 @@ +/* + * Simple memory-mapped device MDIO MUX driver + * + * Author: Timur Tabi <timur@freescale.com> + * + * Copyright 2012 Freescale Semiconductor, Inc. + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/platform_device.h> +#include <linux/device.h> +#include <linux/of_address.h> +#include <linux/of_mdio.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/phy.h> +#include <linux/mdio-mux.h> + +struct mdio_mux_mmioreg_state { + void *mux_handle; + phys_addr_t phys; + uint8_t mask; +}; + +/* + * MDIO multiplexing switch function + * + * This function is called by the mdio-mux layer when it thinks the mdio bus + * multiplexer needs to switch. + * + * 'current_child' is the current value of the mux register (masked via + * s->mask). + * + * 'desired_child' is the value of the 'reg' property of the target child MDIO + * node. + * + * The first time this function is called, current_child == -1. + * + * If current_child == desired_child, then the mux is already set to the + * correct bus. + */ +static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child, + void *data) +{ + struct mdio_mux_mmioreg_state *s = data; + + if (current_child ^ desired_child) { + void *p = ioremap(s->phys, 1); + uint8_t x, y; + + if (!p) + return -ENOMEM; + + x = ioread8(p); + y = (x & ~s->mask) | desired_child; + if (x != y) { + iowrite8((x & ~s->mask) | desired_child, p); + pr_debug("%s: %02x -> %02x\n", __func__, x, y); + } + + iounmap(p); + } + + return 0; +} + +static int __devinit mdio_mux_mmioreg_probe(struct platform_device *pdev) +{ + struct device_node *np2, *np = pdev->dev.of_node; + struct mdio_mux_mmioreg_state *s; + struct resource res; + const __be32 *iprop; + int len, ret; + + dev_dbg(&pdev->dev, "probing node %s\n", np->full_name); + + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + ret = of_address_to_resource(np, 0, &res); + if (ret) { + dev_err(&pdev->dev, "could not obtain memory map for node %s\n", + np->full_name); + return ret; + } + s->phys = res.start; + + if (resource_size(&res) != sizeof(uint8_t)) { + dev_err(&pdev->dev, "only 8-bit registers are supported\n"); + return -EINVAL; + } + + iprop = of_get_property(np, "mux-mask", &len); + if (!iprop || len != sizeof(uint32_t)) { + dev_err(&pdev->dev, "missing or invalid mux-mask property\n"); + return -ENODEV; + } + if (be32_to_cpup(iprop) > 255) { + dev_err(&pdev->dev, "only 8-bit registers are supported\n"); + return -EINVAL; + } + s->mask = be32_to_cpup(iprop); + + /* + * Verify that the 'reg' property of each child MDIO bus does not + * set any bits outside of the 'mask'. + */ + for_each_available_child_of_node(np, np2) { + iprop = of_get_property(np2, "reg", &len); + if (!iprop || len != sizeof(uint32_t)) { + dev_err(&pdev->dev, "mdio-mux child node %s is " + "missing a 'reg' property\n", np2->full_name); + return -ENODEV; + } + if (be32_to_cpup(iprop) & ~s->mask) { + dev_err(&pdev->dev, "mdio-mux child node %s has " + "a 'reg' value with unmasked bits\n", + np2->full_name); + return -ENODEV; + } + } + + ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn, + &s->mux_handle, s); + if (ret) { + dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n", + np->full_name); + return ret; + } + + pdev->dev.platform_data = s; + + return 0; +} + +static int __devexit mdio_mux_mmioreg_remove(struct platform_device *pdev) +{ + struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev); + + mdio_mux_uninit(s->mux_handle); + + return 0; +} + +static struct of_device_id mdio_mux_mmioreg_match[] = { + { + .compatible = "mdio-mux-mmioreg", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match); + +static struct platform_driver mdio_mux_mmioreg_driver = { + .driver = { + .name = "mdio-mux-mmioreg", + .owner = THIS_MODULE, + .of_match_table = mdio_mux_mmioreg_match, + }, + .probe = mdio_mux_mmioreg_probe, + .remove = __devexit_p(mdio_mux_mmioreg_remove), +}; + +module_platform_driver(mdio_mux_mmioreg_driver); + +MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); +MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 5c120189ec86..4d4d25efc1e1 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -132,7 +132,7 @@ int mdio_mux_init(struct device *dev, pb->mii_bus = parent_bus; ret_val = -ENODEV; - for_each_child_of_node(dev->of_node, child_bus_node) { + for_each_available_child_of_node(dev->of_node, child_bus_node) { u32 v; r = of_property_read_u32(child_bus_node, "reg", &v); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7ca2ff97c368..ef9ea9248223 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1035,66 +1035,6 @@ static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad, bus->write(bus, addr, MII_MMD_DATA, data); } -static u32 phy_eee_to_adv(u16 eee_adv) -{ - u32 adv = 0; - - if (eee_adv & MDIO_EEE_100TX) - adv |= ADVERTISED_100baseT_Full; - if (eee_adv & MDIO_EEE_1000T) - adv |= ADVERTISED_1000baseT_Full; - if (eee_adv & MDIO_EEE_10GT) - adv |= ADVERTISED_10000baseT_Full; - if (eee_adv & MDIO_EEE_1000KX) - adv |= ADVERTISED_1000baseKX_Full; - if (eee_adv & MDIO_EEE_10GKX4) - adv |= ADVERTISED_10000baseKX4_Full; - if (eee_adv & MDIO_EEE_10GKR) - adv |= ADVERTISED_10000baseKR_Full; - - return adv; -} - -static u32 phy_eee_to_supported(u16 eee_caported) -{ - u32 supported = 0; - - if (eee_caported & MDIO_EEE_100TX) - supported |= SUPPORTED_100baseT_Full; - if (eee_caported & MDIO_EEE_1000T) - supported |= SUPPORTED_1000baseT_Full; - if (eee_caported & MDIO_EEE_10GT) - supported |= SUPPORTED_10000baseT_Full; - if (eee_caported & MDIO_EEE_1000KX) - supported |= SUPPORTED_1000baseKX_Full; - if (eee_caported & MDIO_EEE_10GKX4) - supported |= SUPPORTED_10000baseKX4_Full; - if (eee_caported & MDIO_EEE_10GKR) - supported |= SUPPORTED_10000baseKR_Full; - - return supported; -} - -static u16 phy_adv_to_eee(u32 adv) -{ - u16 reg = 0; - - if (adv & ADVERTISED_100baseT_Full) - reg |= MDIO_EEE_100TX; - if (adv & ADVERTISED_1000baseT_Full) - reg |= MDIO_EEE_1000T; - if (adv & ADVERTISED_10000baseT_Full) - reg |= MDIO_EEE_10GT; - if (adv & ADVERTISED_1000baseKX_Full) - reg |= MDIO_EEE_1000KX; - if (adv & ADVERTISED_10000baseKX4_Full) - reg |= MDIO_EEE_10GKX4; - if (adv & ADVERTISED_10000baseKR_Full) - reg |= MDIO_EEE_10GKR; - - return reg; -} - /** * phy_init_eee - init and check the EEE feature * @phydev: target phy_device struct @@ -1132,7 +1072,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (eee_cap < 0) return eee_cap; - cap = phy_eee_to_supported(eee_cap); + cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); if (!cap) goto eee_exit; @@ -1149,8 +1089,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (eee_adv < 0) return eee_adv; - adv = phy_eee_to_adv(eee_adv); - lp = phy_eee_to_adv(eee_lp); + adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); + lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); idx = phy_find_setting(phydev->speed, phydev->duplex); if ((lp & adv & settings[idx].setting)) goto eee_exit; @@ -1210,21 +1150,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) MDIO_MMD_PCS, phydev->addr); if (val < 0) return val; - data->supported = phy_eee_to_supported(val); + data->supported = mmd_eee_cap_to_ethtool_sup_t(val); /* Get advertisement EEE */ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, phydev->addr); if (val < 0) return val; - data->advertised = phy_eee_to_adv(val); + data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); /* Get LP advertisement EEE */ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN, phydev->addr); if (val < 0) return val; - data->lp_advertised = phy_eee_to_adv(val); + data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); return 0; } @@ -1241,7 +1181,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { int val; - val = phy_adv_to_eee(data->advertised); + val = ethtool_adv_to_mmd_eee_adv_t(data->advertised); phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN, phydev->addr, val); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 5c0557222f20..eb3f5cefeba3 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -94,6 +94,18 @@ struct ppp_file { #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel) /* + * Data structure to hold primary network stats for which + * we want to use 64 bit storage. Other network stats + * are stored in dev->stats of the ppp strucute. + */ +struct ppp_link_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; +}; + +/* * Data structure describing one ppp unit. * A ppp unit corresponds to a ppp network interface device * and represents a multilink bundle. @@ -136,6 +148,7 @@ struct ppp { unsigned pass_len, active_len; #endif /* CONFIG_PPP_FILTER */ struct net *ppp_net; /* the net we belong to */ + struct ppp_link_stats stats64; /* 64 bit network stats */ }; /* @@ -1021,9 +1034,34 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } +struct rtnl_link_stats64* +ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) +{ + struct ppp *ppp = netdev_priv(dev); + + ppp_recv_lock(ppp); + stats64->rx_packets = ppp->stats64.rx_packets; + stats64->rx_bytes = ppp->stats64.rx_bytes; + ppp_recv_unlock(ppp); + + ppp_xmit_lock(ppp); + stats64->tx_packets = ppp->stats64.tx_packets; + stats64->tx_bytes = ppp->stats64.tx_bytes; + ppp_xmit_unlock(ppp); + + stats64->rx_errors = dev->stats.rx_errors; + stats64->tx_errors = dev->stats.tx_errors; + stats64->rx_dropped = dev->stats.rx_dropped; + stats64->tx_dropped = dev->stats.tx_dropped; + stats64->rx_length_errors = dev->stats.rx_length_errors; + + return stats64; +} + static const struct net_device_ops ppp_netdev_ops = { - .ndo_start_xmit = ppp_start_xmit, - .ndo_do_ioctl = ppp_net_ioctl, + .ndo_start_xmit = ppp_start_xmit, + .ndo_do_ioctl = ppp_net_ioctl, + .ndo_get_stats64 = ppp_get_stats64, }; static void ppp_setup(struct net_device *dev) @@ -1157,8 +1195,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) #endif /* CONFIG_PPP_FILTER */ } - ++ppp->dev->stats.tx_packets; - ppp->dev->stats.tx_bytes += skb->len - 2; + ++ppp->stats64.tx_packets; + ppp->stats64.tx_bytes += skb->len - 2; switch (proto) { case PPP_IP: @@ -1745,8 +1783,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) break; } - ++ppp->dev->stats.rx_packets; - ppp->dev->stats.rx_bytes += skb->len - 2; + ++ppp->stats64.rx_packets; + ppp->stats64.rx_bytes += skb->len - 2; npi = proto_to_npindex(proto); if (npi < 0) { @@ -2570,12 +2608,12 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st) struct slcompress *vj = ppp->vj; memset(st, 0, sizeof(*st)); - st->p.ppp_ipackets = ppp->dev->stats.rx_packets; + st->p.ppp_ipackets = ppp->stats64.rx_packets; st->p.ppp_ierrors = ppp->dev->stats.rx_errors; - st->p.ppp_ibytes = ppp->dev->stats.rx_bytes; - st->p.ppp_opackets = ppp->dev->stats.tx_packets; + st->p.ppp_ibytes = ppp->stats64.rx_bytes; + st->p.ppp_opackets = ppp->stats64.tx_packets; st->p.ppp_oerrors = ppp->dev->stats.tx_errors; - st->p.ppp_obytes = ppp->dev->stats.tx_bytes; + st->p.ppp_obytes = ppp->stats64.tx_bytes; if (!vj) return; st->vj.vjs_packets = vj->sls_o_compressed + vj->sls_o_uncompressed; diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 1c98321b56cc..162464fe86bf 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (sk_pppox(po)->sk_state & PPPOX_DEAD) goto tx_error; - rt = ip_route_output_ports(&init_net, &fl4, NULL, + rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, IPPROTO_GRE, @@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pptp_chan_ops; - rt = ip_route_output_ports(&init_net, &fl4, sk, + rt = ip_route_output_ports(sock_net(sk), &fl4, sk, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig index 6a7260b03a1e..6b08bd419fba 100644 --- a/drivers/net/team/Kconfig +++ b/drivers/net/team/Kconfig @@ -21,7 +21,7 @@ config NET_TEAM_MODE_BROADCAST ---help--- Basic mode where packets are transmitted always by all suitable ports. - All added ports are setup to have team's mac address. + All added ports are setup to have team's device address. To compile this team mode as a module, choose M here: the module will be called team_mode_broadcast. @@ -33,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN Basic mode where port used for transmitting packets is selected in round-robin fashion using packet counter. - All added ports are setup to have team's mac address. + All added ports are setup to have team's device address. To compile this team mode as a module, choose M here: the module will be called team_mode_roundrobin. diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 87707ab39430..9ce0c51a04d5 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -54,29 +54,29 @@ static struct team_port *team_port_get_rtnl(const struct net_device *dev) } /* - * Since the ability to change mac address for open port device is tested in + * Since the ability to change device address for open port device is tested in * team_port_add, this function can be called without control of return value */ -static int __set_port_mac(struct net_device *port_dev, - const unsigned char *dev_addr) +static int __set_port_dev_addr(struct net_device *port_dev, + const unsigned char *dev_addr) { struct sockaddr addr; - memcpy(addr.sa_data, dev_addr, ETH_ALEN); - addr.sa_family = ARPHRD_ETHER; + memcpy(addr.sa_data, dev_addr, port_dev->addr_len); + addr.sa_family = port_dev->type; return dev_set_mac_address(port_dev, &addr); } -static int team_port_set_orig_mac(struct team_port *port) +static int team_port_set_orig_dev_addr(struct team_port *port) { - return __set_port_mac(port->dev, port->orig.dev_addr); + return __set_port_dev_addr(port->dev, port->orig.dev_addr); } -int team_port_set_team_mac(struct team_port *port) +int team_port_set_team_dev_addr(struct team_port *port) { - return __set_port_mac(port->dev, port->team->dev->dev_addr); + return __set_port_dev_addr(port->dev, port->team->dev->dev_addr); } -EXPORT_SYMBOL(team_port_set_team_mac); +EXPORT_SYMBOL(team_port_set_team_dev_addr); static void team_refresh_port_linkup(struct team_port *port) { @@ -658,6 +658,122 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) } +/************************************* + * Multiqueue Tx port select override + *************************************/ + +static int team_queue_override_init(struct team *team) +{ + struct list_head *listarr; + unsigned int queue_cnt = team->dev->num_tx_queues - 1; + unsigned int i; + + if (!queue_cnt) + return 0; + listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL); + if (!listarr) + return -ENOMEM; + team->qom_lists = listarr; + for (i = 0; i < queue_cnt; i++) + INIT_LIST_HEAD(listarr++); + return 0; +} + +static void team_queue_override_fini(struct team *team) +{ + kfree(team->qom_lists); +} + +static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id) +{ + return &team->qom_lists[queue_id - 1]; +} + +/* + * note: already called with rcu_read_lock + */ +static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb) +{ + struct list_head *qom_list; + struct team_port *port; + + if (!team->queue_override_enabled || !skb->queue_mapping) + return false; + qom_list = __team_get_qom_list(team, skb->queue_mapping); + list_for_each_entry_rcu(port, qom_list, qom_list) { + if (!team_dev_queue_xmit(team, port, skb)) + return true; + } + return false; +} + +static void __team_queue_override_port_del(struct team *team, + struct team_port *port) +{ + list_del_rcu(&port->qom_list); + synchronize_rcu(); + INIT_LIST_HEAD(&port->qom_list); +} + +static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, + struct team_port *cur) +{ + if (port->priority < cur->priority) + return true; + if (port->priority > cur->priority) + return false; + if (port->index < cur->index) + return true; + return false; +} + +static void __team_queue_override_port_add(struct team *team, + struct team_port *port) +{ + struct team_port *cur; + struct list_head *qom_list; + struct list_head *node; + + if (!port->queue_id || !team_port_enabled(port)) + return; + + qom_list = __team_get_qom_list(team, port->queue_id); + node = qom_list; + list_for_each_entry(cur, qom_list, qom_list) { + if (team_queue_override_port_has_gt_prio_than(port, cur)) + break; + node = &cur->qom_list; + } + list_add_tail_rcu(&port->qom_list, node); +} + +static void __team_queue_override_enabled_check(struct team *team) +{ + struct team_port *port; + bool enabled = false; + + list_for_each_entry(port, &team->port_list, list) { + if (!list_empty(&port->qom_list)) { + enabled = true; + break; + } + } + if (enabled == team->queue_override_enabled) + return; + netdev_dbg(team->dev, "%s queue override\n", + enabled ? "Enabling" : "Disabling"); + team->queue_override_enabled = enabled; +} + +static void team_queue_override_port_refresh(struct team *team, + struct team_port *port) +{ + __team_queue_override_port_del(team, port); + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); +} + + /**************** * Port handling ****************/ @@ -688,6 +804,7 @@ static void team_port_enable(struct team *team, hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); + team_queue_override_port_refresh(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); } @@ -716,6 +833,7 @@ static void team_port_disable(struct team *team, hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; + team_queue_override_port_refresh(team, port); __team_adjust_ops(team, team->en_port_count - 1); /* * Wait until readers see adjusted ops. This ensures that @@ -795,16 +913,17 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, + gfp_t gfp) { struct netpoll *np; int err; - np = kzalloc(sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), gfp); if (!np) return -ENOMEM; - err = __netpoll_setup(np, port->dev); + err = __netpoll_setup(np, port->dev, gfp); if (err) { kfree(np); return err; @@ -833,7 +952,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team) } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, + gfp_t gfp) { return 0; } @@ -846,7 +966,10 @@ static struct netpoll_info *team_netpoll_info(struct team *team) } #endif -static void __team_port_change_check(struct team_port *port, bool linkup); +static void __team_port_change_port_added(struct team_port *port, bool linkup); + +static int team_dev_type_check_change(struct net_device *dev, + struct net_device *port_dev); static int team_port_add(struct team *team, struct net_device *port_dev) { @@ -855,9 +978,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev) char *portname = port_dev->name; int err; - if (port_dev->flags & IFF_LOOPBACK || - port_dev->type != ARPHRD_ETHER) { - netdev_err(dev, "Device %s is of an unsupported type\n", + if (port_dev->flags & IFF_LOOPBACK) { + netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n", portname); return -EINVAL; } @@ -868,6 +990,17 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return -EBUSY; } + if (port_dev->features & NETIF_F_VLAN_CHALLENGED && + vlan_uses_dev(dev)) { + netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n", + portname); + return -EPERM; + } + + err = team_dev_type_check_change(dev, port_dev); + if (err) + return err; + if (port_dev->flags & IFF_UP) { netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n", portname); @@ -881,6 +1014,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) port->dev = port_dev; port->team = team; + INIT_LIST_HEAD(&port->qom_list); port->orig.mtu = port_dev->mtu; err = dev_set_mtu(port_dev, dev->mtu); @@ -889,7 +1023,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_set_mtu; } - memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN); + memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len); err = team_port_enter(team, port); if (err) { @@ -913,7 +1047,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) } if (team_netpoll_info(team)) { - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(team, port, GFP_KERNEL); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -946,7 +1080,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) team_port_enable(team, port); list_add_tail_rcu(&port->list, &team->port_list); __team_compute_features(team); - __team_port_change_check(port, !!netif_carrier_ok(port_dev)); + __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); @@ -970,7 +1104,7 @@ err_vids_add: err_dev_open: team_port_leave(team, port); - team_port_set_orig_mac(port); + team_port_set_orig_dev_addr(port); err_port_enter: dev_set_mtu(port_dev, port->orig.mtu); @@ -981,6 +1115,8 @@ err_set_mtu: return err; } +static void __team_port_change_port_removed(struct team_port *port); + static int team_port_del(struct team *team, struct net_device *port_dev) { struct net_device *dev = team->dev; @@ -997,8 +1133,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) __team_option_inst_mark_removed_port(team, port); __team_options_change_check(team); __team_option_inst_del_port(team, port); - port->removed = true; - __team_port_change_check(port, false); + __team_port_change_port_removed(port); team_port_disable(team, port); list_del_rcu(&port->list); netdev_rx_handler_unregister(port_dev); @@ -1007,7 +1142,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) vlan_vids_del_by_dev(port_dev, dev); dev_close(port_dev); team_port_leave(team, port); - team_port_set_orig_mac(port); + team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); synchronize_rcu(); kfree(port); @@ -1092,6 +1227,49 @@ static int team_user_linkup_en_option_set(struct team *team, return 0; } +static int team_priority_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + ctx->data.s32_val = port->priority; + return 0; +} + +static int team_priority_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + port->priority = ctx->data.s32_val; + team_queue_override_port_refresh(team, port); + return 0; +} + +static int team_queue_id_option_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + ctx->data.u32_val = port->queue_id; + return 0; +} + +static int team_queue_id_option_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + struct team_port *port = ctx->info->port; + + if (port->queue_id == ctx->data.u32_val) + return 0; + if (ctx->data.u32_val >= team->dev->real_num_tx_queues) + return -EINVAL; + port->queue_id = ctx->data.u32_val; + team_queue_override_port_refresh(team, port); + return 0; +} + + static const struct team_option team_options[] = { { .name = "mode", @@ -1120,6 +1298,20 @@ static const struct team_option team_options[] = { .getter = team_user_linkup_en_option_get, .setter = team_user_linkup_en_option_set, }, + { + .name = "priority", + .type = TEAM_OPTION_TYPE_S32, + .per_port = true, + .getter = team_priority_option_get, + .setter = team_priority_option_set, + }, + { + .name = "queue_id", + .type = TEAM_OPTION_TYPE_U32, + .per_port = true, + .getter = team_queue_id_option_get, + .setter = team_queue_id_option_set, + }, }; static struct lock_class_key team_netdev_xmit_lock_key; @@ -1155,6 +1347,9 @@ static int team_init(struct net_device *dev) for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) INIT_HLIST_HEAD(&team->en_port_hlist[i]); INIT_LIST_HEAD(&team->port_list); + err = team_queue_override_init(team); + if (err) + goto err_team_queue_override_init; team_adjust_ops(team); @@ -1170,6 +1365,8 @@ static int team_init(struct net_device *dev) return 0; err_options_register: + team_queue_override_fini(team); +err_team_queue_override_init: free_percpu(team->pcpu_stats); return err; @@ -1187,6 +1384,7 @@ static void team_uninit(struct net_device *dev) __team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); + team_queue_override_fini(team); mutex_unlock(&team->lock); } @@ -1216,10 +1414,12 @@ static int team_close(struct net_device *dev) static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) { struct team *team = netdev_priv(dev); - bool tx_success = false; + bool tx_success; unsigned int len = skb->len; - tx_success = team->ops.transmit(team, skb); + tx_success = team_queue_override_transmit(team, skb); + if (!tx_success) + tx_success = team->ops.transmit(team, skb); if (tx_success) { struct team_pcpu_stats *pcpu_stats; @@ -1293,17 +1493,18 @@ static void team_set_rx_mode(struct net_device *dev) static int team_set_mac_address(struct net_device *dev, void *p) { + struct sockaddr *addr = p; struct team *team = netdev_priv(dev); struct team_port *port; - int err; - err = eth_mac_addr(dev, p); - if (err) - return err; + if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; rcu_read_lock(); list_for_each_entry_rcu(port, &team->port_list, list) - if (team->ops.port_change_mac) - team->ops.port_change_mac(team, port); + if (team->ops.port_change_dev_addr) + team->ops.port_change_dev_addr(team, port); rcu_read_unlock(); return 0; } @@ -1443,7 +1644,7 @@ static void team_netpoll_cleanup(struct net_device *dev) } static int team_netpoll_setup(struct net_device *dev, - struct netpoll_info *npifo) + struct netpoll_info *npifo, gfp_t gfp) { struct team *team = netdev_priv(dev); struct team_port *port; @@ -1451,7 +1652,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(team, port, gfp); if (err) { __team_netpoll_cleanup(team); break; @@ -1534,6 +1735,45 @@ static const struct net_device_ops team_netdev_ops = { * rt netlink interface ***********************/ +static void team_setup_by_port(struct net_device *dev, + struct net_device *port_dev) +{ + dev->header_ops = port_dev->header_ops; + dev->type = port_dev->type; + dev->hard_header_len = port_dev->hard_header_len; + dev->addr_len = port_dev->addr_len; + dev->mtu = port_dev->mtu; + memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); + memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len); + dev->addr_assign_type &= ~NET_ADDR_RANDOM; +} + +static int team_dev_type_check_change(struct net_device *dev, + struct net_device *port_dev) +{ + struct team *team = netdev_priv(dev); + char *portname = port_dev->name; + int err; + + if (dev->type == port_dev->type) + return 0; + if (!list_empty(&team->port_list)) { + netdev_err(dev, "Device %s is of different type\n", portname); + return -EBUSY; + } + err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev); + err = notifier_to_errno(err); + if (err) { + netdev_err(dev, "Refused to change device type\n"); + return err; + } + dev_uc_flush(dev); + dev_mc_flush(dev); + team_setup_by_port(dev, port_dev); + call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); + return 0; +} + static void team_setup(struct net_device *dev) { ether_setup(dev); @@ -1648,7 +1888,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) if (!msg) return -ENOMEM; - hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, + hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &team_nl_family, 0, TEAM_CMD_NOOP); if (IS_ERR(hdr)) { err = PTR_ERR(hdr); @@ -1657,7 +1897,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) genlmsg_end(msg, hdr); - return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid); + return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid); err_msg_put: nlmsg_free(msg); @@ -1714,7 +1954,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team, if (err < 0) goto err_fill; - err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid); + err = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid); return err; err_fill: @@ -1723,11 +1963,11 @@ err_fill: } typedef int team_nl_send_func_t(struct sk_buff *skb, - struct team *team, u32 pid); + struct team *team, u32 portid); -static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid) +static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid) { - return genlmsg_unicast(dev_net(team->dev), skb, pid); + return genlmsg_unicast(dev_net(team->dev), skb, portid); } static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, @@ -1787,6 +2027,12 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) goto nest_cancel; break; + case TEAM_OPTION_TYPE_S32: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32)) + goto nest_cancel; + if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val)) + goto nest_cancel; + break; default: BUG(); } @@ -1806,13 +2052,13 @@ nest_cancel: } static int __send_and_alloc_skb(struct sk_buff **pskb, - struct team *team, u32 pid, + struct team *team, u32 portid, team_nl_send_func_t *send_func) { int err; if (*pskb) { - err = send_func(*pskb, team, pid); + err = send_func(*pskb, team, portid); if (err) return err; } @@ -1822,7 +2068,7 @@ static int __send_and_alloc_skb(struct sk_buff **pskb, return 0; } -static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, +static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq, int flags, team_nl_send_func_t *send_func, struct list_head *sel_opt_inst_list) { @@ -1839,11 +2085,11 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, struct team_option_inst, tmp_list); start_again: - err = __send_and_alloc_skb(&skb, team, pid, send_func); + err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) return err; - hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_OPTIONS_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); @@ -1876,15 +2122,15 @@ start_again: goto start_again; send_done: - nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); + nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); if (!nlh) { - err = __send_and_alloc_skb(&skb, team, pid, send_func); + err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) goto errout; goto send_done; } - return send_func(skb, team, pid); + return send_func(skb, team, portid); nla_put_failure: err = -EMSGSIZE; @@ -1907,7 +2153,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) list_for_each_entry(opt_inst, &team->option_inst_list, list) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); - err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq, + err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq, NLM_F_ACK, team_nl_send_unicast, &sel_opt_inst_list); @@ -1975,6 +2221,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) case NLA_FLAG: opt_type = TEAM_OPTION_TYPE_BOOL; break; + case NLA_S32: + opt_type = TEAM_OPTION_TYPE_S32; + break; default: goto team_put; } @@ -2031,6 +2280,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) case TEAM_OPTION_TYPE_BOOL: ctx.data.bool_val = attr_data ? true : false; break; + case TEAM_OPTION_TYPE_S32: + ctx.data.s32_val = nla_get_s32(attr_data); + break; default: BUG(); } @@ -2055,7 +2307,7 @@ team_put: } static int team_nl_fill_port_list_get(struct sk_buff *skb, - u32 pid, u32 seq, int flags, + u32 portid, u32 seq, int flags, struct team *team, bool fillall) { @@ -2063,7 +2315,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb, void *hdr; struct team_port *port; - hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, + hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags, TEAM_CMD_PORT_LIST_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); @@ -2112,7 +2364,7 @@ static int team_nl_fill_port_list_get_all(struct sk_buff *skb, struct genl_info *info, int flags, struct team *team) { - return team_nl_fill_port_list_get(skb, info->snd_pid, + return team_nl_fill_port_list_get(skb, info->snd_portid, info->snd_seq, NLM_F_ACK, team, true); } @@ -2165,7 +2417,7 @@ static struct genl_multicast_group team_change_event_mcgrp = { }; static int team_nl_send_multicast(struct sk_buff *skb, - struct team *team, u32 pid) + struct team *team, u32 portid) { return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, team_change_event_mcgrp.id, GFP_KERNEL); @@ -2243,19 +2495,17 @@ static void __team_options_change_check(struct team *team) list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); } err = team_nl_send_event_options_get(team, &sel_opt_inst_list); - if (err) + if (err && err != -ESRCH) netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", err); } /* rtnl lock is held */ -static void __team_port_change_check(struct team_port *port, bool linkup) + +static void __team_port_change_send(struct team_port *port, bool linkup) { int err; - if (!port->removed && port->state.linkup == linkup) - return; - port->changed = true; port->state.linkup = linkup; team_refresh_port_linkup(port); @@ -2274,10 +2524,27 @@ static void __team_port_change_check(struct team_port *port, bool linkup) send_event: err = team_nl_send_event_port_list_get(port->team); - if (err) - netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n", - port->dev->name); + if (err && err != -ESRCH) + netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n", + port->dev->name, err); + +} +static void __team_port_change_check(struct team_port *port, bool linkup) +{ + if (port->state.linkup != linkup) + __team_port_change_send(port, linkup); +} + +static void __team_port_change_port_added(struct team_port *port, bool linkup) +{ + __team_port_change_send(port, linkup); +} + +static void __team_port_change_port_removed(struct team_port *port) +{ + port->removed = true; + __team_port_change_send(port, false); } static void team_port_change_check(struct team_port *port, bool linkup) diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c index c96e4d2967f0..9db0171e9366 100644 --- a/drivers/net/team/team_mode_broadcast.c +++ b/drivers/net/team/team_mode_broadcast.c @@ -48,18 +48,18 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb) static int bc_port_enter(struct team *team, struct team_port *port) { - return team_port_set_team_mac(port); + return team_port_set_team_dev_addr(port); } -static void bc_port_change_mac(struct team *team, struct team_port *port) +static void bc_port_change_dev_addr(struct team *team, struct team_port *port) { - team_port_set_team_mac(port); + team_port_set_team_dev_addr(port); } static const struct team_mode_ops bc_mode_ops = { .transmit = bc_transmit, .port_enter = bc_port_enter, - .port_change_mac = bc_port_change_mac, + .port_change_dev_addr = bc_port_change_dev_addr, }; static const struct team_mode bc_mode = { diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c index ad7ed0ec544c..105135aa8f05 100644 --- a/drivers/net/team/team_mode_roundrobin.c +++ b/drivers/net/team/team_mode_roundrobin.c @@ -66,18 +66,18 @@ drop: static int rr_port_enter(struct team *team, struct team_port *port) { - return team_port_set_team_mac(port); + return team_port_set_team_dev_addr(port); } -static void rr_port_change_mac(struct team *team, struct team_port *port) +static void rr_port_change_dev_addr(struct team *team, struct team_port *port) { - team_port_set_team_mac(port); + team_port_set_team_dev_addr(port); } static const struct team_mode_ops rr_mode_ops = { .transmit = rr_transmit, .port_enter = rr_port_enter, - .port_change_mac = rr_port_change_mac, + .port_change_dev_addr = rr_port_change_dev_addr, }; static const struct team_mode rr_mode = { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 926d4db5cb38..498dc0d4ba5e 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -120,8 +120,8 @@ struct tun_sock; struct tun_struct { struct tun_file *tfile; unsigned int flags; - uid_t owner; - gid_t group; + kuid_t owner; + kgid_t group; struct net_device *dev; netdev_features_t set_features; @@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun) netif_tx_lock_bh(tun->dev); netif_carrier_off(tun->dev); tun->tfile = NULL; - tun->socket.file = NULL; netif_tx_unlock_bh(tun->dev); /* Drop read queue */ @@ -1032,8 +1031,8 @@ static void tun_setup(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); - tun->owner = -1; - tun->group = -1; + tun->owner = INVALID_UID; + tun->group = INVALID_GID; dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = tun_free_netdev; @@ -1156,14 +1155,20 @@ static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); - return sprintf(buf, "%d\n", tun->owner); + return uid_valid(tun->owner)? + sprintf(buf, "%u\n", + from_kuid_munged(current_user_ns(), tun->owner)): + sprintf(buf, "-1\n"); } static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, char *buf) { struct tun_struct *tun = netdev_priv(to_net_dev(dev)); - return sprintf(buf, "%d\n", tun->group); + return gid_valid(tun->group) ? + sprintf(buf, "%u\n", + from_kgid_munged(current_user_ns(), tun->group)): + sprintf(buf, "-1\n"); } static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); @@ -1190,8 +1195,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) else return -EINVAL; - if (((tun->owner != -1 && cred->euid != tun->owner) || - (tun->group != -1 && !in_egroup_p(tun->group))) && + if (((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || + (gid_valid(tun->group) && !in_egroup_p(tun->group))) && !capable(CAP_NET_ADMIN)) return -EPERM; err = security_tun_dev_attach(tun->socket.sk); @@ -1375,6 +1380,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, void __user* argp = (void __user*)arg; struct sock_fprog fprog; struct ifreq ifr; + kuid_t owner; + kgid_t group; int sndbuf; int vnet_hdr_sz; int ret; @@ -1448,16 +1455,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, case TUNSETOWNER: /* Set owner of the device */ - tun->owner = (uid_t) arg; - - tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner); + owner = make_kuid(current_user_ns(), arg); + if (!uid_valid(owner)) { + ret = -EINVAL; + break; + } + tun->owner = owner; + tun_debug(KERN_INFO, tun, "owner set to %d\n", + from_kuid(&init_user_ns, tun->owner)); break; case TUNSETGROUP: /* Set group of the device */ - tun->group= (gid_t) arg; - - tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group); + group = make_kgid(current_user_ns(), arg); + if (!gid_valid(group)) { + ret = -EINVAL; + break; + } + tun->group = group; + tun_debug(KERN_INFO, tun, "group set to %d\n", + from_kgid(&init_user_ns, tun->group)); break; case TUNSETLINK: diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 4fd48df6b989..8d5fdf103bcd 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -221,7 +221,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf) /* Get the MAC address */ ret = asix_read_cmd(dev, AX88172_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("read AX_CMD_READ_NODE_ID failed: %d", ret); + netdev_dbg(dev->net, "read AX_CMD_READ_NODE_ID failed: %d\n", + ret); goto out; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); @@ -303,7 +304,7 @@ static int ax88772_reset(struct usbnet *dev) ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); if (ret < 0) { - dbg("Select PHY #1 failed: %d", ret); + netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); goto out; } @@ -331,13 +332,13 @@ static int ax88772_reset(struct usbnet *dev) msleep(150); rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x after software reset", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl); ret = asix_write_rx_ctl(dev, 0x0000); if (ret < 0) goto out; rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x setting to 0x0000", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl); ret = asix_sw_reset(dev, AX_SWRESET_PRL); if (ret < 0) @@ -364,7 +365,7 @@ static int ax88772_reset(struct usbnet *dev) AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT, AX88772_IPG2_DEFAULT, 0, NULL); if (ret < 0) { - dbg("Write IPG,IPG1,IPG2 failed: %d", ret); + netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret); goto out; } @@ -381,10 +382,13 @@ static int ax88772_reset(struct usbnet *dev) goto out; rx_ctl = asix_read_rx_ctl(dev); - dbg("RX_CTL is 0x%04x after all initializations", rx_ctl); + netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n", + rx_ctl); rx_ctl = asix_read_medium_status(dev); - dbg("Medium Status is 0x%04x after all initializations", rx_ctl); + netdev_dbg(dev->net, + "Medium Status is 0x%04x after all initializations\n", + rx_ctl); return 0; @@ -416,7 +420,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("Failed to read MAC address: %d", ret); + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); @@ -439,7 +443,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) /* Reset the PHY to normal operation mode */ ret = asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL); if (ret < 0) { - dbg("Select PHY #1 failed: %d", ret); + netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret); return ret; } @@ -459,7 +463,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) /* Read PHYID register *AFTER* the PHY was reset properly */ phyid = asix_get_phyid(dev); - dbg("PHYID=0x%08x", phyid); + netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid); /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */ if (dev->driver_info->flags & FLAG_FRAMING_AX) { @@ -575,13 +579,13 @@ static int ax88178_reset(struct usbnet *dev) u32 phyid; asix_read_cmd(dev, AX_CMD_READ_GPIOS, 0, 0, 1, &status); - dbg("GPIO Status: 0x%04x", status); + netdev_dbg(dev->net, "GPIO Status: 0x%04x\n", status); asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0, 0, 0, NULL); asix_read_cmd(dev, AX_CMD_READ_EEPROM, 0x0017, 0, 2, &eeprom); asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0, 0, 0, NULL); - dbg("EEPROM index 0x17 is 0x%04x", eeprom); + netdev_dbg(dev->net, "EEPROM index 0x17 is 0x%04x\n", eeprom); if (eeprom == cpu_to_le16(0xffff)) { data->phymode = PHY_MODE_MARVELL; @@ -592,7 +596,7 @@ static int ax88178_reset(struct usbnet *dev) data->ledmode = le16_to_cpu(eeprom) >> 8; gpio0 = (le16_to_cpu(eeprom) & 0x80) ? 0 : 1; } - dbg("GPIO0: %d, PhyMode: %d", gpio0, data->phymode); + netdev_dbg(dev->net, "GPIO0: %d, PhyMode: %d\n", gpio0, data->phymode); /* Power up external GigaPHY through AX88178 GPIO pin */ asix_write_gpio(dev, AX_GPIO_RSE | AX_GPIO_GPO_1 | AX_GPIO_GPO1EN, 40); @@ -601,14 +605,14 @@ static int ax88178_reset(struct usbnet *dev) asix_write_gpio(dev, 0x001c, 300); asix_write_gpio(dev, 0x003c, 30); } else { - dbg("gpio phymode == 1 path"); + netdev_dbg(dev->net, "gpio phymode == 1 path\n"); asix_write_gpio(dev, AX_GPIO_GPO1EN, 30); asix_write_gpio(dev, AX_GPIO_GPO1EN | AX_GPIO_GPO_1, 30); } /* Read PHYID register *AFTER* powering up PHY */ phyid = asix_get_phyid(dev); - dbg("PHYID=0x%08x", phyid); + netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid); /* Set AX88178 to enable MII/GMII/RGMII interface for external PHY */ asix_write_cmd(dev, AX_CMD_SW_PHY_SELECT, 0, 0, 0, NULL); @@ -770,7 +774,7 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf); if (ret < 0) { - dbg("Failed to read MAC address: %d", ret); + netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret); return ret; } memcpy(dev->net->dev_addr, buf, ETH_ALEN); diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index 26c5bebd9eca..18d9579123ea 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -236,7 +236,8 @@ static void catc_rx_done(struct urb *urb) } if (status) { - dbg("rx_done, status %d, length %d", status, urb->actual_length); + dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n", + status, urb->actual_length); return; } @@ -275,10 +276,11 @@ static void catc_rx_done(struct urb *urb) if (atomic_read(&catc->recq_sz)) { int state; atomic_dec(&catc->recq_sz); - dbg("getting extra packet"); + netdev_dbg(catc->netdev, "getting extra packet\n"); urb->dev = catc->usbdev; if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { - dbg("submit(rx_urb) status %d", state); + netdev_dbg(catc->netdev, + "submit(rx_urb) status %d\n", state); } } else { clear_bit(RX_RUNNING, &catc->flags); @@ -317,18 +319,20 @@ static void catc_irq_done(struct urb *urb) return; /* -EPIPE: should clear the halt */ default: /* error */ - dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]); + dev_dbg(&urb->dev->dev, + "irq_done, status %d, data %02x %02x.\n", + status, data[0], data[1]); goto resubmit; } if (linksts == LinkGood) { netif_carrier_on(catc->netdev); - dbg("link ok"); + netdev_dbg(catc->netdev, "link ok\n"); } if (linksts == LinkBad) { netif_carrier_off(catc->netdev); - dbg("link bad"); + netdev_dbg(catc->netdev, "link bad\n"); } if (hasdata) { @@ -385,7 +389,7 @@ static void catc_tx_done(struct urb *urb) int r, status = urb->status; if (status == -ECONNRESET) { - dbg("Tx Reset."); + dev_dbg(&urb->dev->dev, "Tx Reset.\n"); urb->status = 0; catc->netdev->trans_start = jiffies; catc->netdev->stats.tx_errors++; @@ -395,7 +399,8 @@ static void catc_tx_done(struct urb *urb) } if (status) { - dbg("tx_done, status %d, length %d", status, urb->actual_length); + dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n", + status, urb->actual_length); return; } @@ -511,7 +516,8 @@ static void catc_ctrl_done(struct urb *urb) int status = urb->status; if (status) - dbg("ctrl_done, status %d, len %d.", status, urb->actual_length); + dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n", + status, urb->actual_length); spin_lock_irqsave(&catc->ctrl_lock, flags); @@ -667,7 +673,9 @@ static void catc_set_multicast_list(struct net_device *netdev) f5u011_mchash_async(catc, catc->multicast); if (catc->rxmode[0] != rx) { catc->rxmode[0] = rx; - dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]); + netdev_dbg(catc->netdev, + "Setting RX mode to %2.2X %2.2X\n", + catc->rxmode[0], catc->rxmode[1]); f5u011_rxmode_async(catc, catc->rxmode); } } @@ -766,6 +774,7 @@ static const struct net_device_ops catc_netdev_ops = { static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { + struct device *dev = &intf->dev; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; @@ -774,7 +783,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { - dev_err(&intf->dev, "Can't set altsetting 1.\n"); + dev_err(dev, "Can't set altsetting 1.\n"); return -EIO; } @@ -817,7 +826,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { - dbg("Testing for f5u011"); + dev_dbg(dev, "Testing for f5u011\n"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; @@ -838,7 +847,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { - dbg("Checking memory size\n"); + dev_dbg(dev, "Checking memory size\n"); i = 0x12345678; catc_write_mem(catc, 0x7a80, &i, 4); @@ -850,7 +859,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); - dbg("64k Memory\n"); + dev_dbg(dev, "64k Memory\n"); break; default: dev_warn(&intf->dev, @@ -858,49 +867,49 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); - dbg("32k Memory\n"); + dev_dbg(dev, "32k Memory\n"); break; } - dbg("Getting MAC from SEEROM."); + dev_dbg(dev, "Getting MAC from SEEROM.\n"); catc_get_mac(catc, netdev->dev_addr); - dbg("Setting MAC into registers."); + dev_dbg(dev, "Setting MAC into registers.\n"); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); - dbg("Filling the multicast list."); + dev_dbg(dev, "Filling the multicast list.\n"); memset(broadcast, 0xff, 6); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); - dbg("Clearing error counters."); + dev_dbg(dev, "Clearing error counters.\n"); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; - dbg("Enabling."); + dev_dbg(dev, "Enabling.\n"); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { - dbg("Performing reset\n"); + dev_dbg(dev, "Performing reset\n"); catc_reset(catc); catc_get_mac(catc, netdev->dev_addr); - dbg("Setting RX Mode"); + dev_dbg(dev, "Setting RX Mode\n"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } - dbg("Init done."); + dev_dbg(dev, "Init done.\n"); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 64610048ce87..7d78669000d7 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev) struct urb *req = usb_alloc_urb(0, GFP_KERNEL); if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { + usb_free_urb(req); usbpn_close(dev); return -ENOMEM; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f4ce5957df32..4cd582a4f625 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Dell branded MBM devices like DW5550 */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x413c, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + + /* Toshiba branded MBM devices */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x0930, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c index 49ab45e17fe8..1e207f086b75 100644 --- a/drivers/net/usb/cx82310_eth.c +++ b/drivers/net/usb/cx82310_eth.c @@ -302,18 +302,9 @@ static const struct driver_info cx82310_info = { .tx_fixup = cx82310_tx_fixup, }; -#define USB_DEVICE_CLASS(vend, prod, cl, sc, pr) \ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \ - USB_DEVICE_ID_MATCH_DEV_INFO, \ - .idVendor = (vend), \ - .idProduct = (prod), \ - .bDeviceClass = (cl), \ - .bDeviceSubClass = (sc), \ - .bDeviceProtocol = (pr) - static const struct usb_device_id products[] = { { - USB_DEVICE_CLASS(0x0572, 0xcb01, 0xff, 0, 0), + USB_DEVICE_AND_INTERFACE_INFO(0x0572, 0xcb01, 0xff, 0, 0), .driver_info = (unsigned long) &cx82310_info }, { }, diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c index db3c8021f2a3..a7e3f4e55bf3 100644 --- a/drivers/net/usb/gl620a.c +++ b/drivers/net/usb/gl620a.c @@ -91,7 +91,9 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) // get the packet count of the received skb count = le32_to_cpu(header->packet_count); if (count > GL_MAX_TRANSMIT_PACKETS) { - dbg("genelink: invalid received packet count %u", count); + netdev_dbg(dev->net, + "genelink: invalid received packet count %u\n", + count); return 0; } @@ -107,7 +109,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) // this may be a broken packet if (size > GL_MAX_PACKET_LEN) { - dbg("genelink: invalid rx length %d", size); + netdev_dbg(dev->net, "genelink: invalid rx length %d\n", + size); return 0; } @@ -133,7 +136,8 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skb_pull(skb, 4); if (skb->len > GL_MAX_PACKET_LEN) { - dbg("genelink: invalid rx length %d", skb->len); + netdev_dbg(dev->net, "genelink: invalid rx length %d\n", + skb->len); return 0; } return 1; diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index c3d03490c97d..c75e11e1b385 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -267,19 +267,16 @@ static int kaweth_control(struct kaweth_device *kaweth, struct usb_ctrlrequest *dr; int retval; - dbg("kaweth_control()"); + netdev_dbg(kaweth->net, "kaweth_control()\n"); if(in_interrupt()) { - dbg("in_interrupt()"); + netdev_dbg(kaweth->net, "in_interrupt()\n"); return -EBUSY; } dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); - - if (!dr) { - dbg("kmalloc() failed"); + if (!dr) return -ENOMEM; - } dr->bRequestType = requesttype; dr->bRequest = request; @@ -305,7 +302,7 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth) { int retval; - dbg("Reading kaweth configuration"); + netdev_dbg(kaweth->net, "Reading kaweth configuration\n"); retval = kaweth_control(kaweth, usb_rcvctrlpipe(kaweth->dev, 0), @@ -327,7 +324,7 @@ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size) { int retval; - dbg("Setting URB size to %d", (unsigned)urb_size); + netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size); retval = kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), @@ -349,7 +346,7 @@ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait) { int retval; - dbg("Set SOFS wait to %d", (unsigned)sofs_wait); + netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait); retval = kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), @@ -372,7 +369,8 @@ static int kaweth_set_receive_filter(struct kaweth_device *kaweth, { int retval; - dbg("Set receive filter to %d", (unsigned)receive_filter); + netdev_dbg(kaweth->net, "Set receive filter to %d\n", + (unsigned)receive_filter); retval = kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), @@ -421,12 +419,13 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth, kaweth->firmware_buf[4] = type; kaweth->firmware_buf[5] = interrupt; - dbg("High: %i, Low:%i", kaweth->firmware_buf[3], + netdev_dbg(kaweth->net, "High: %i, Low:%i\n", kaweth->firmware_buf[3], kaweth->firmware_buf[2]); - dbg("Downloading firmware at %p to kaweth device at %p", - fw->data, kaweth); - dbg("Firmware length: %d", data_len); + netdev_dbg(kaweth->net, + "Downloading firmware at %p to kaweth device at %p\n", + fw->data, kaweth); + netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len); return kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), @@ -454,7 +453,7 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth, kaweth->firmware_buf[6] = 0x00; kaweth->firmware_buf[7] = 0x00; - dbg("Triggering firmware"); + netdev_dbg(kaweth->net, "Triggering firmware\n"); return kaweth_control(kaweth, usb_sndctrlpipe(kaweth->dev, 0), @@ -474,11 +473,11 @@ static int kaweth_reset(struct kaweth_device *kaweth) { int result; - dbg("kaweth_reset(%p)", kaweth); + netdev_dbg(kaweth->net, "kaweth_reset(%p)\n", kaweth); result = usb_reset_configuration(kaweth->dev); mdelay(10); - dbg("kaweth_reset() returns %d.",result); + netdev_dbg(kaweth->net, "kaweth_reset() returns %d.\n", result); return result; } @@ -595,6 +594,7 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth); ****************************************************************/ static void kaweth_usb_receive(struct urb *urb) { + struct device *dev = &urb->dev->dev; struct kaweth_device *kaweth = urb->context; struct net_device *net = kaweth->net; int status = urb->status; @@ -610,25 +610,25 @@ static void kaweth_usb_receive(struct urb *urb) kaweth->stats.rx_errors++; kaweth->end = 1; wake_up(&kaweth->term_wait); - dbg("Status was -EPIPE."); + dev_dbg(dev, "Status was -EPIPE.\n"); return; } if (unlikely(status == -ECONNRESET || status == -ESHUTDOWN)) { /* we are killed - set a flag and wake the disconnect handler */ kaweth->end = 1; wake_up(&kaweth->term_wait); - dbg("Status was -ECONNRESET or -ESHUTDOWN."); + dev_dbg(dev, "Status was -ECONNRESET or -ESHUTDOWN.\n"); return; } if (unlikely(status == -EPROTO || status == -ETIME || status == -EILSEQ)) { kaweth->stats.rx_errors++; - dbg("Status was -EPROTO, -ETIME, or -EILSEQ."); + dev_dbg(dev, "Status was -EPROTO, -ETIME, or -EILSEQ.\n"); return; } if (unlikely(status == -EOVERFLOW)) { kaweth->stats.rx_errors++; - dbg("Status was -EOVERFLOW."); + dev_dbg(dev, "Status was -EOVERFLOW.\n"); } spin_lock(&kaweth->device_lock); if (IS_BLOCKED(kaweth->status)) { @@ -687,7 +687,7 @@ static int kaweth_open(struct net_device *net) struct kaweth_device *kaweth = netdev_priv(net); int res; - dbg("Opening network device."); + netdev_dbg(kaweth->net, "Opening network device.\n"); res = usb_autopm_get_interface(kaweth->intf); if (res) { @@ -787,7 +787,8 @@ static void kaweth_usb_transmit_complete(struct urb *urb) if (unlikely(status != 0)) if (status != -ENOENT) - dbg("%s: TX status %d.", kaweth->net->name, status); + dev_dbg(&urb->dev->dev, "%s: TX status %d.\n", + kaweth->net->name, status); netif_wake_queue(kaweth->net); dev_kfree_skb_irq(skb); @@ -871,7 +872,7 @@ static void kaweth_set_rx_mode(struct net_device *net) KAWETH_PACKET_FILTER_BROADCAST | KAWETH_PACKET_FILTER_MULTICAST; - dbg("Setting Rx mode to %d", packet_filter_bitmap); + netdev_dbg(net, "Setting Rx mode to %d\n", packet_filter_bitmap); netif_stop_queue(net); @@ -916,7 +917,8 @@ static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth) result); } else { - dbg("Set Rx mode to %d", packet_filter_bitmap); + netdev_dbg(kaweth->net, "Set Rx mode to %d\n", + packet_filter_bitmap); } } @@ -951,7 +953,7 @@ static int kaweth_suspend(struct usb_interface *intf, pm_message_t message) struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; - dbg("Suspending device"); + dev_dbg(&intf->dev, "Suspending device\n"); spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status |= KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); @@ -968,7 +970,7 @@ static int kaweth_resume(struct usb_interface *intf) struct kaweth_device *kaweth = usb_get_intfdata(intf); unsigned long flags; - dbg("Resuming device"); + dev_dbg(&intf->dev, "Resuming device\n"); spin_lock_irqsave(&kaweth->device_lock, flags); kaweth->status &= ~KAWETH_STATUS_SUSPENDING; spin_unlock_irqrestore(&kaweth->device_lock, flags); @@ -1003,36 +1005,37 @@ static int kaweth_probe( const struct usb_device_id *id /* from id_table */ ) { - struct usb_device *dev = interface_to_usbdev(intf); + struct device *dev = &intf->dev; + struct usb_device *udev = interface_to_usbdev(intf); struct kaweth_device *kaweth; struct net_device *netdev; const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; int result = 0; - dbg("Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x", - dev->devnum, - le16_to_cpu(dev->descriptor.idVendor), - le16_to_cpu(dev->descriptor.idProduct), - le16_to_cpu(dev->descriptor.bcdDevice)); + dev_dbg(dev, + "Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n", + udev->devnum, le16_to_cpu(udev->descriptor.idVendor), + le16_to_cpu(udev->descriptor.idProduct), + le16_to_cpu(udev->descriptor.bcdDevice)); - dbg("Device at %p", dev); + dev_dbg(dev, "Device at %p\n", udev); - dbg("Descriptor length: %x type: %x", - (int)dev->descriptor.bLength, - (int)dev->descriptor.bDescriptorType); + dev_dbg(dev, "Descriptor length: %x type: %x\n", + (int)udev->descriptor.bLength, + (int)udev->descriptor.bDescriptorType); netdev = alloc_etherdev(sizeof(*kaweth)); if (!netdev) return -ENOMEM; kaweth = netdev_priv(netdev); - kaweth->dev = dev; + kaweth->dev = udev; kaweth->net = netdev; spin_lock_init(&kaweth->device_lock); init_waitqueue_head(&kaweth->term_wait); - dbg("Resetting."); + dev_dbg(dev, "Resetting.\n"); kaweth_reset(kaweth); @@ -1041,17 +1044,17 @@ static int kaweth_probe( * downloaded. Don't try to do it again, or we'll hang the device. */ - if (le16_to_cpu(dev->descriptor.bcdDevice) >> 8) { - dev_info(&intf->dev, "Firmware present in device.\n"); + if (le16_to_cpu(udev->descriptor.bcdDevice) >> 8) { + dev_info(dev, "Firmware present in device.\n"); } else { /* Download the firmware */ - dev_info(&intf->dev, "Downloading firmware...\n"); + dev_info(dev, "Downloading firmware...\n"); kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL); if ((result = kaweth_download_firmware(kaweth, "kaweth/new_code.bin", 100, 2)) < 0) { - dev_err(&intf->dev, "Error downloading firmware (%d)\n", + dev_err(dev, "Error downloading firmware (%d)\n", result); goto err_fw; } @@ -1060,8 +1063,7 @@ static int kaweth_probe( "kaweth/new_code_fix.bin", 100, 3)) < 0) { - dev_err(&intf->dev, - "Error downloading firmware fix (%d)\n", + dev_err(dev, "Error downloading firmware fix (%d)\n", result); goto err_fw; } @@ -1070,8 +1072,7 @@ static int kaweth_probe( "kaweth/trigger_code.bin", 126, 2)) < 0) { - dev_err(&intf->dev, - "Error downloading trigger code (%d)\n", + dev_err(dev, "Error downloading trigger code (%d)\n", result); goto err_fw; @@ -1081,19 +1082,18 @@ static int kaweth_probe( "kaweth/trigger_code_fix.bin", 126, 3)) < 0) { - dev_err(&intf->dev, "Error downloading trigger code fix (%d)\n", result); + dev_err(dev, "Error downloading trigger code fix (%d)\n", result); goto err_fw; } if ((result = kaweth_trigger_firmware(kaweth, 126)) < 0) { - dev_err(&intf->dev, "Error triggering firmware (%d)\n", - result); + dev_err(dev, "Error triggering firmware (%d)\n", result); goto err_fw; } /* Device will now disappear for a moment... */ - dev_info(&intf->dev, "Firmware loaded. I'll be back...\n"); + dev_info(dev, "Firmware loaded. I'll be back...\n"); err_fw: free_page((unsigned long)kaweth->firmware_buf); free_netdev(netdev); @@ -1103,29 +1103,29 @@ err_fw: result = kaweth_read_configuration(kaweth); if(result < 0) { - dev_err(&intf->dev, "Error reading configuration (%d), no net device created\n", result); + dev_err(dev, "Error reading configuration (%d), no net device created\n", result); goto err_free_netdev; } - dev_info(&intf->dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask); - dev_info(&intf->dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1)); - dev_info(&intf->dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size)); - dev_info(&intf->dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr); + dev_info(dev, "Statistics collection: %x\n", kaweth->configuration.statistics_mask); + dev_info(dev, "Multicast filter limit: %x\n", kaweth->configuration.max_multicast_filters & ((1 << 15) - 1)); + dev_info(dev, "MTU: %d\n", le16_to_cpu(kaweth->configuration.segment_size)); + dev_info(dev, "Read MAC address %pM\n", kaweth->configuration.hw_addr); if(!memcmp(&kaweth->configuration.hw_addr, &bcast_addr, sizeof(bcast_addr))) { - dev_err(&intf->dev, "Firmware not functioning properly, no net device created\n"); + dev_err(dev, "Firmware not functioning properly, no net device created\n"); goto err_free_netdev; } if(kaweth_set_urb_size(kaweth, KAWETH_BUF_SIZE) < 0) { - dbg("Error setting URB size"); + dev_dbg(dev, "Error setting URB size\n"); goto err_free_netdev; } if(kaweth_set_sofs_wait(kaweth, KAWETH_SOFS_TO_WAIT) < 0) { - dev_err(&intf->dev, "Error setting SOFS wait\n"); + dev_err(dev, "Error setting SOFS wait\n"); goto err_free_netdev; } @@ -1135,11 +1135,11 @@ err_fw: KAWETH_PACKET_FILTER_MULTICAST); if(result < 0) { - dev_err(&intf->dev, "Error setting receive filter\n"); + dev_err(dev, "Error setting receive filter\n"); goto err_free_netdev; } - dbg("Initializing net device."); + dev_dbg(dev, "Initializing net device.\n"); kaweth->intf = intf; @@ -1181,20 +1181,20 @@ err_fw: #if 0 // dma_supported() is deeply broken on almost all architectures - if (dma_supported (&intf->dev, 0xffffffffffffffffULL)) + if (dma_supported (dev, 0xffffffffffffffffULL)) kaweth->net->features |= NETIF_F_HIGHDMA; #endif - SET_NETDEV_DEV(netdev, &intf->dev); + SET_NETDEV_DEV(netdev, dev); if (register_netdev(netdev) != 0) { - dev_err(&intf->dev, "Error registering netdev.\n"); + dev_err(dev, "Error registering netdev.\n"); goto err_intfdata; } - dev_info(&intf->dev, "kaweth interface created at %s\n", + dev_info(dev, "kaweth interface created at %s\n", kaweth->net->name); - dbg("Kaweth probe returning."); + dev_dbg(dev, "Kaweth probe returning.\n"); return 0; @@ -1232,7 +1232,7 @@ static void kaweth_disconnect(struct usb_interface *intf) } netdev = kaweth->net; - dbg("Unregistering net device"); + netdev_dbg(kaweth->net, "Unregistering net device\n"); unregister_netdev(netdev); usb_free_urb(kaweth->rx_urb); diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c index 28c4d513ba85..c062a3e8295c 100644 --- a/drivers/net/usb/net1080.c +++ b/drivers/net/usb/net1080.c @@ -155,12 +155,10 @@ static void nc_dump_registers(struct usbnet *dev) u8 reg; u16 *vp = kmalloc(sizeof (u16)); - if (!vp) { - dbg("no memory?"); + if (!vp) return; - } - dbg("%s registers:", dev->net->name); + netdev_dbg(dev->net, "registers:\n"); for (reg = 0; reg < 0x20; reg++) { int retval; @@ -172,11 +170,10 @@ static void nc_dump_registers(struct usbnet *dev) retval = nc_register_read(dev, reg, vp); if (retval < 0) - dbg("%s reg [0x%x] ==> error %d", - dev->net->name, reg, retval); + netdev_dbg(dev->net, "reg [0x%x] ==> error %d\n", + reg, retval); else - dbg("%s reg [0x%x] = 0x%x", - dev->net->name, reg, *vp); + netdev_dbg(dev->net, "reg [0x%x] = 0x%x\n", reg, *vp); } kfree(vp); } @@ -300,15 +297,15 @@ static int net1080_reset(struct usbnet *dev) // nc_dump_registers(dev); if ((retval = nc_register_read(dev, REG_STATUS, vp)) < 0) { - dbg("can't read %s-%s status: %d", - dev->udev->bus->bus_name, dev->udev->devpath, retval); + netdev_dbg(dev->net, "can't read %s-%s status: %d\n", + dev->udev->bus->bus_name, dev->udev->devpath, retval); goto done; } status = *vp; nc_dump_status(dev, status); if ((retval = nc_register_read(dev, REG_USBCTL, vp)) < 0) { - dbg("can't read USBCTL, %d", retval); + netdev_dbg(dev->net, "can't read USBCTL, %d\n", retval); goto done; } usbctl = *vp; @@ -318,7 +315,7 @@ static int net1080_reset(struct usbnet *dev) USBCTL_FLUSH_THIS | USBCTL_FLUSH_OTHER); if ((retval = nc_register_read(dev, REG_TTL, vp)) < 0) { - dbg("can't read TTL, %d", retval); + netdev_dbg(dev->net, "can't read TTL, %d\n", retval); goto done; } ttl = *vp; @@ -326,7 +323,7 @@ static int net1080_reset(struct usbnet *dev) nc_register_write(dev, REG_TTL, MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) ); - dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS); + netdev_dbg(dev->net, "assigned TTL, %d ms\n", NC_READ_TTL_MS); netif_info(dev, link, dev->net, "port %c, peer %sconnected\n", (status & STATUS_PORT_A) ? 'A' : 'B', @@ -350,7 +347,7 @@ static int net1080_check_connect(struct usbnet *dev) status = *vp; kfree(vp); if (retval != 0) { - dbg("%s net1080_check_conn read - %d", dev->net->name, retval); + netdev_dbg(dev->net, "net1080_check_conn read - %d\n", retval); return retval; } if ((status & STATUS_CONN_OTHER) != STATUS_CONN_OTHER) @@ -420,11 +417,9 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) u16 hdr_len, packet_len; if (!(skb->len & 0x01)) { -#ifdef DEBUG - struct net_device *net = dev->net; - dbg("rx framesize %d range %d..%d mtu %d", skb->len, - net->hard_header_len, dev->hard_mtu, net->mtu); -#endif + netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n", + skb->len, dev->net->hard_header_len, dev->hard_mtu, + dev->net->mtu); dev->net->stats.rx_frame_errors++; nc_ensure_sync(dev); return 0; @@ -435,17 +430,17 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) packet_len = le16_to_cpup(&header->packet_len); if (FRAMED_SIZE(packet_len) > NC_MAX_PACKET) { dev->net->stats.rx_frame_errors++; - dbg("packet too big, %d", packet_len); + netdev_dbg(dev->net, "packet too big, %d\n", packet_len); nc_ensure_sync(dev); return 0; } else if (hdr_len < MIN_HEADER) { dev->net->stats.rx_frame_errors++; - dbg("header too short, %d", hdr_len); + netdev_dbg(dev->net, "header too short, %d\n", hdr_len); nc_ensure_sync(dev); return 0; } else if (hdr_len > MIN_HEADER) { // out of band data for us? - dbg("header OOB, %d bytes", hdr_len - MIN_HEADER); + netdev_dbg(dev->net, "header OOB, %d bytes\n", hdr_len - MIN_HEADER); nc_ensure_sync(dev); // switch (vendor/product ids) { ... } } @@ -458,23 +453,23 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb) if ((packet_len & 0x01) == 0) { if (skb->data [packet_len] != PAD_BYTE) { dev->net->stats.rx_frame_errors++; - dbg("bad pad"); + netdev_dbg(dev->net, "bad pad\n"); return 0; } skb_trim(skb, skb->len - 1); } if (skb->len != packet_len) { dev->net->stats.rx_frame_errors++; - dbg("bad packet len %d (expected %d)", - skb->len, packet_len); + netdev_dbg(dev->net, "bad packet len %d (expected %d)\n", + skb->len, packet_len); nc_ensure_sync(dev); return 0; } if (header->packet_id != get_unaligned(&trailer->packet_id)) { dev->net->stats.rx_fifo_errors++; - dbg("(2+ dropped) rx packet_id mismatch 0x%x 0x%x", - le16_to_cpu(header->packet_id), - le16_to_cpu(trailer->packet_id)); + netdev_dbg(dev->net, "(2+ dropped) rx packet_id mismatch 0x%x 0x%x\n", + le16_to_cpu(header->packet_id), + le16_to_cpu(trailer->packet_id)); return 0; } #if 0 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 2ea126a16d79..ca253206b73f 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -108,7 +108,7 @@ static int qmi_wwan_register_subdriver(struct usbnet *dev) atomic_set(&info->pmcount, 0); /* register subdriver */ - subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); + subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 4096, &qmi_wwan_cdc_wdm_manage_power); if (IS_ERR(subdriver)) { dev_err(&info->control->dev, "subdriver registration failed\n"); rv = PTR_ERR(subdriver); @@ -139,10 +139,18 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); - /* require a single interrupt status endpoint for subdriver */ + /* control and data is shared? */ + if (intf->cur_altsetting->desc.bNumEndpoints == 3) { + info->control = intf; + info->data = intf; + goto shared; + } + + /* else require a single interrupt status endpoint on control intf */ if (intf->cur_altsetting->desc.bNumEndpoints != 1) goto err; + /* and a number of CDC descriptors */ while (len > 3) { struct usb_descriptor_header *h = (void *)buf; @@ -231,8 +239,9 @@ next_desc: if (status < 0) goto err; +shared: status = qmi_wwan_register_subdriver(dev); - if (status < 0) { + if (status < 0 && info->control != info->data) { usb_set_intfdata(info->data, NULL); usb_driver_release_interface(driver, info->data); } @@ -241,38 +250,6 @@ err: return status; } -/* Some devices combine the "control" and "data" functions into a - * single interface with all three endpoints: interrupt + bulk in and - * out - */ -static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) -{ - int rv; - struct qmi_wwan_state *info = (void *)&dev->data; - - /* ZTE makes devices where the interface descriptors and endpoint - * configurations of two or more interfaces are identical, even - * though the functions are completely different. If set, then - * driver_info->data is a bitmap of acceptable interface numbers - * allowing us to bind to one such interface without binding to - * all of them - */ - if (dev->driver_info->data && - !test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) { - dev_info(&intf->dev, "not on our whitelist - ignored"); - rv = -ENODEV; - goto err; - } - - /* control and data is shared */ - info->control = intf; - info->data = intf; - rv = qmi_wwan_register_subdriver(dev); - -err: - return rv; -} - static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) { struct qmi_wwan_state *info = (void *)&dev->data; @@ -315,7 +292,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) if (ret < 0) goto err; - if (info->subdriver && info->subdriver->suspend) + if (intf == info->control && info->subdriver && info->subdriver->suspend) ret = info->subdriver->suspend(intf, message); if (ret < 0) usbnet_resume(intf); @@ -328,13 +305,14 @@ static int qmi_wwan_resume(struct usb_interface *intf) struct usbnet *dev = usb_get_intfdata(intf); struct qmi_wwan_state *info = (void *)&dev->data; int ret = 0; + bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume); - if (info->subdriver && info->subdriver->resume) + if (callsub) ret = info->subdriver->resume(intf); if (ret < 0) goto err; ret = usbnet_resume(intf); - if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend) + if (ret < 0 && callsub && info->subdriver->suspend) info->subdriver->suspend(intf, PMSG_SUSPEND); err: return ret; @@ -348,225 +326,71 @@ static const struct driver_info qmi_wwan_info = { .manage_power = qmi_wwan_manage_power, }; -static const struct driver_info qmi_wwan_shared = { - .description = "WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, -}; - -static const struct driver_info qmi_wwan_force_int0 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(0), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int1 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(1), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int2 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(2), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int3 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(3), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int4 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(4), /* interface whitelist bitmap */ -}; - -/* Sierra Wireless provide equally useless interface descriptors - * Devices in QMI mode can be switched between two different - * configurations: - * a) USB interface #8 is QMI/wwan - * b) USB interfaces #8, #19 and #20 are QMI/wwan - * - * Both configurations provide a number of other interfaces (serial++), - * some of which have the same endpoint configuration as we expect, so - * a whitelist or blacklist is necessary. - * - * FIXME: The below whitelist should include BIT(20). It does not - * because I cannot get it to work... - */ -static const struct driver_info qmi_wwan_sierra = { - .description = "Sierra Wireless wwan/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ -}; - #define HUAWEI_VENDOR_ID 0x12D1 +/* map QMI/wwan function by a fixed interface number */ +#define QMI_FIXED_INTF(vend, prod, num) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_info + /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ #define QMI_GOBI1K_DEVICE(vend, prod) \ - USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_force_int3 + QMI_FIXED_INTF(vend, prod, 3) -/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */ +/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */ #define QMI_GOBI_DEVICE(vend, prod) \ - USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_force_int0 + QMI_FIXED_INTF(vend, prod, 0) static const struct usb_device_id products[] = { + /* 1. CDC ECM like devices match on the control interface */ { /* Huawei E392, E398 and possibly others sharing both device id and more... */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9), .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Huawei E392, E398 and possibly others in "Windows mode" - * using a combined control and data interface without any CDC - * functional descriptors - */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 17, - .driver_info = (unsigned long)&qmi_wwan_shared, - }, - { /* Pantech UML290 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x106c, - .idProduct = 0x3718, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xf0, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_shared, - }, - { /* ZTE MF820D */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0167, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE MF821D */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0326, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3520-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0055, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int1, - }, - { /* ZTE (Vodafone) K3565-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0063, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3570-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1008, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3571-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1010, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3765-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x2002, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, + + /* 2. Combined interface devices matching on class+protocol */ + { /* Huawei E367 and possibly others in "Windows mode" */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 7), + .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* ZTE (Vodafone) K4505-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0104, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, + { /* Huawei E392, E398 and possibly others in "Windows mode" */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), + .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* ZTE MF60 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1402, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int2, + { /* Pantech UML290, P4200 and more */ + USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), + .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Sierra Wireless MC77xx in QMI mode */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x1199, - .idProduct = 0x68a2, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_sierra, + { /* Pantech UML290 - newer firmware */ + USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), + .driver_info = (unsigned long)&qmi_wwan_info, }, - /* Gobi 1000 devices */ + /* 3. Combined interface devices matching on interface number */ + {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */ + {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */ + {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ + + /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ - {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ @@ -579,9 +403,11 @@ static const struct usb_device_id products[] = { {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ - /* Gobi 2000 and 3000 devices */ + /* 5. Gobi 2000 and 3000 devices */ {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ + {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ + {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ @@ -589,6 +415,8 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ + {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ + {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */ {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ @@ -600,11 +428,17 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ + {QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */ {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ + {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ + {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ + {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */ + {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */ + { } /* END */ }; MODULE_DEVICE_TABLE(usb, products); @@ -620,7 +454,7 @@ static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id */ if (!id->driver_info) { dev_dbg(&intf->dev, "setting defaults for dynamic device id\n"); - id->driver_info = (unsigned long)&qmi_wwan_shared; + id->driver_info = (unsigned long)&qmi_wwan_info; } return usbnet_probe(intf, id); diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 0e2c92e0e532..5f39a3b225ef 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -275,7 +275,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p) return -EBUSY; memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); - dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr); + netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr); /* Set the IDR registers. */ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); #ifdef EEPROM_WRITE @@ -503,12 +503,12 @@ static void intr_callback(struct urb *urb) if ((d[INT_MSR] & MSR_LINK) == 0) { if (netif_carrier_ok(dev->netdev)) { netif_carrier_off(dev->netdev); - dbg("%s: LINK LOST\n", __func__); + netdev_dbg(dev->netdev, "%s: LINK LOST\n", __func__); } } else { if (!netif_carrier_ok(dev->netdev)) { netif_carrier_on(dev->netdev); - dbg("%s: LINK CAME BACK\n", __func__); + netdev_dbg(dev->netdev, "%s: LINK CAME BACK\n", __func__); } } diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index d75d1f56becf..c27d27701aee 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -68,16 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0); */ #define SIERRA_NET_USBCTL_BUF_LEN 1024 -/* list of interface numbers - used for constructing interface lists */ -struct sierra_net_iface_info { - const u32 infolen; /* number of interface numbers on list */ - const u8 *ifaceinfo; /* pointer to the array holding the numbers */ -}; - -struct sierra_net_info_data { - u16 rx_urb_size; - struct sierra_net_iface_info whitelist; -}; +/* Overriding the default usbnet rx_urb_size */ +#define SIERRA_NET_RX_URB_SIZE (8 * 1024) /* Private data structure */ struct sierra_net_data { @@ -567,7 +559,7 @@ static void sierra_net_defer_kevent(struct usbnet *dev, int work) /* * Sync Retransmit Timer Handler. On expiry, kick the work queue */ -void sierra_sync_timer(unsigned long syncdata) +static void sierra_sync_timer(unsigned long syncdata) { struct usbnet *dev = (struct usbnet *)syncdata; @@ -637,21 +629,6 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu) return usbnet_change_mtu(net, new_mtu); } -static int is_whitelisted(const u8 ifnum, - const struct sierra_net_iface_info *whitelist) -{ - if (whitelist) { - const u8 *list = whitelist->ifaceinfo; - int i; - - for (i = 0; i < whitelist->infolen; i++) { - if (list[i] == ifnum) - return 1; - } - } - return 0; -} - static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) { int result = 0; @@ -678,7 +655,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) return -EIO; } - *datap = *attrdata; + *datap = le16_to_cpu(*attrdata); kfree(attrdata); return result; @@ -700,17 +677,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = { 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00}; - struct sierra_net_info_data *data = - (struct sierra_net_info_data *)dev->driver_info->data; - dev_dbg(&dev->udev->dev, "%s", __func__); ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; - /* We only accept certain interfaces */ - if (!is_whitelisted(ifacenum, &data->whitelist)) { - dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); - return -ENODEV; - } numendpoints = intf->cur_altsetting->desc.bNumEndpoints; /* We have three endpoints, bulk in and out, and a status */ if (numendpoints != 3) { @@ -752,9 +721,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) sierra_net_set_ctx_index(priv, 0); /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ - dev->rx_urb_size = data->rx_urb_size; + dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE; if (dev->udev->speed != USB_SPEED_HIGH) - dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size); + dev->rx_urb_size = min_t(size_t, 4096, SIERRA_NET_RX_URB_SIZE); dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; @@ -869,7 +838,7 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) netdev_err(dev->net, "HIP/ETH: Invalid pkt\n"); dev->net->stats.rx_frame_errors++; - /* dev->net->stats.rx_errors incremented by caller */; + /* dev->net->stats.rx_errors incremented by caller */ return 0; } @@ -893,8 +862,8 @@ static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb) } /* ---------------------------- Transmit data path ----------------------*/ -struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, - gfp_t flags) +static struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, + struct sk_buff *skb, gfp_t flags) { struct sierra_net_data *priv = sierra_net_get_private(dev); u16 len; @@ -945,15 +914,6 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, return NULL; } -static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; -static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { - .rx_urb_size = 8 * 1024, - .whitelist = { - .infolen = ARRAY_SIZE(sierra_net_ifnum_list), - .ifaceinfo = sierra_net_ifnum_list - } -}; - static const struct driver_info sierra_net_info_direct_ip = { .description = "Sierra Wireless USB-to-WWAN Modem", .flags = FLAG_WWAN | FLAG_SEND_ZLP, @@ -962,18 +922,21 @@ static const struct driver_info sierra_net_info_direct_ip = { .status = sierra_net_status, .rx_fixup = sierra_net_rx_fixup, .tx_fixup = sierra_net_tx_fixup, - .data = (unsigned long)&sierra_net_info_data_direct_ip, }; +#define DIRECT_IP_DEVICE(vend, prod) \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip} + static const struct usb_device_id products[] = { - {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, + DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ + DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ + DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ + DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ {}, /* last item */ }; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 8531c1caac28..fc9f578a1e25 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1201,19 +1201,26 @@ deferred: } EXPORT_SYMBOL_GPL(usbnet_start_xmit); -static void rx_alloc_submit(struct usbnet *dev, gfp_t flags) +static int rx_alloc_submit(struct usbnet *dev, gfp_t flags) { struct urb *urb; int i; + int ret = 0; /* don't refill the queue all at once */ for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { urb = usb_alloc_urb(0, flags); if (urb != NULL) { - if (rx_submit(dev, urb, flags) == -ENOLINK) - return; + ret = rx_submit(dev, urb, flags); + if (ret) + goto err; + } else { + ret = -ENOMEM; + goto err; } } +err: + return ret; } /*-------------------------------------------------------------------------*/ @@ -1257,7 +1264,8 @@ static void usbnet_bh (unsigned long param) int temp = dev->rxq.qlen; if (temp < RX_QLEN(dev)) { - rx_alloc_submit(dev, GFP_ATOMIC); + if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) + return; if (temp != dev->rxq.qlen) netif_dbg(dev, link, dev->net, "rxqlen %d --> %d\n", @@ -1573,7 +1581,7 @@ int usbnet_resume (struct usb_interface *intf) netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) - rx_alloc_submit(dev, GFP_KERNEL); + rx_alloc_submit(dev, GFP_NOIO); if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_tx_wake_all_queues(dev->net); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 5852361032c4..e522ff70444c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -348,6 +348,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (tbp[IFLA_ADDRESS] == NULL) eth_hw_addr_random(peer); + if (ifmp && (dev->ifindex != 0)) + peer->ifindex = ifmp->ifi_index; + err = register_netdevice(peer); put_net(net); net = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 83d2b0c34c5e..81a64c58e8ad 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -993,7 +993,7 @@ static void virtnet_config_changed_work(struct work_struct *work) goto done; if (v & VIRTIO_NET_S_ANNOUNCE) { - netif_notify_peers(vi->dev); + netdev_notify_peers(vi->dev); virtnet_ack_link_announce(vi); } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 93e0cfb739b8..ce9d4f2c9776 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3019,6 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, netdev->watchdog_timeo = 5 * HZ; INIT_WORK(&adapter->work, vmxnet3_reset_work); + set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); if (adapter->intr.type == VMXNET3_IT_MSIX) { int i; @@ -3043,7 +3044,6 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_register; } - set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); vmxnet3_check_link(adapter, false); atomic_inc(&devices_found); return 0; diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 9eb6479306d6..ef36cafd44b7 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -774,14 +774,15 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev, } /* Global interrupt queue */ writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); + + rc = -ENOMEM; + priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); if (!priv->iqcfg) goto err_free_irq_5; writel(priv->iqcfg_dma, ioaddr + IQCFG); - rc = -ENOMEM; - /* * SCC 0-3 private rx/tx irq structures * IQRX/TXi needs to be set soon. Learned it the hard way... diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c index aaaca9aa2293..3f575afd8cfc 100644 --- a/drivers/net/wan/ixp4xx_hss.c +++ b/drivers/net/wan/ixp4xx_hss.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/module.h> #include <linux/bitops.h> #include <linux/cdev.h> #include <linux/dma-mapping.h> diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index 025426132754..9c34d2fccfac 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -222,7 +222,6 @@ int i2400m_check_mac_addr(struct i2400m *i2400m) struct sk_buff *skb; const struct i2400m_tlv_detailed_device_info *ddi; struct net_device *net_dev = i2400m->wimax_dev.net_dev; - const unsigned char zeromac[ETH_ALEN] = { 0 }; d_fnstart(3, dev, "(i2400m %p)\n", i2400m); skb = i2400m_get_device_info(i2400m); @@ -244,7 +243,7 @@ int i2400m_check_mac_addr(struct i2400m *i2400m) "to that of boot mode's\n"); dev_warn(dev, "device reports %pM\n", ddi->mac_address); dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr); - if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac))) + if (is_zero_ether_addr(ddi->mac_address)) dev_err(dev, "device reports an invalid MAC address, " "not updating\n"); else { diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index 283237f6f074..def12b38cbf7 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c @@ -326,8 +326,10 @@ int i2400m_barker_db_init(const char *_options) unsigned barker; options_orig = kstrdup(_options, GFP_KERNEL); - if (options_orig == NULL) + if (options_orig == NULL) { + result = -ENOMEM; goto error_parse; + } options = options_orig; while ((token = strsep(&options, ",")) != NULL) { diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index ac1eda64739d..3cd05a7173f6 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -231,8 +231,10 @@ static int adhoc; static int probe = 1; +static kuid_t proc_kuid; static int proc_uid /* = 0 */; +static kgid_t proc_kgid; static int proc_gid /* = 0 */; static int airo_perm = 0555; @@ -4498,78 +4500,79 @@ struct proc_data { static int setup_proc_entry( struct net_device *dev, struct airo_info *apriv ) { struct proc_dir_entry *entry; + /* First setup the device directory */ strcpy(apriv->proc_name,dev->name); apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm, airo_entry); if (!apriv->proc_entry) goto fail; - apriv->proc_entry->uid = proc_uid; - apriv->proc_entry->gid = proc_gid; + apriv->proc_entry->uid = proc_kuid; + apriv->proc_entry->gid = proc_kgid; /* Setup the StatsDelta */ entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm, apriv->proc_entry, &proc_statsdelta_ops, dev); if (!entry) goto fail_stats_delta; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the Stats */ entry = proc_create_data("Stats", S_IRUGO & proc_perm, apriv->proc_entry, &proc_stats_ops, dev); if (!entry) goto fail_stats; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the Status */ entry = proc_create_data("Status", S_IRUGO & proc_perm, apriv->proc_entry, &proc_status_ops, dev); if (!entry) goto fail_status; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the Config */ entry = proc_create_data("Config", proc_perm, apriv->proc_entry, &proc_config_ops, dev); if (!entry) goto fail_config; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the SSID */ entry = proc_create_data("SSID", proc_perm, apriv->proc_entry, &proc_SSID_ops, dev); if (!entry) goto fail_ssid; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the APList */ entry = proc_create_data("APList", proc_perm, apriv->proc_entry, &proc_APList_ops, dev); if (!entry) goto fail_aplist; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the BSSList */ entry = proc_create_data("BSSList", proc_perm, apriv->proc_entry, &proc_BSSList_ops, dev); if (!entry) goto fail_bsslist; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; /* Setup the WepKey */ entry = proc_create_data("WepKey", proc_perm, apriv->proc_entry, &proc_wepkey_ops, dev); if (!entry) goto fail_wepkey; - entry->uid = proc_uid; - entry->gid = proc_gid; + entry->uid = proc_kuid; + entry->gid = proc_kgid; return 0; @@ -5696,11 +5699,16 @@ static int __init airo_init_module( void ) { int i; + proc_kuid = make_kuid(&init_user_ns, proc_uid); + proc_kgid = make_kgid(&init_user_ns, proc_gid); + if (!uid_valid(proc_kuid) || !gid_valid(proc_kgid)) + return -EINVAL; + airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL); if (airo_entry) { - airo_entry->uid = proc_uid; - airo_entry->gid = proc_gid; + airo_entry->uid = proc_kuid; + airo_entry->gid = proc_kgid; } for (i = 0; i < 4 && io[i] && irq[i]; i++) { diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 55a77e41170a..27980778d992 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -10,6 +10,7 @@ #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/usb.h> +#include <linux/olpc-ec.h> #ifdef CONFIG_OLPC #include <asm/olpc.h> diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index e603adbfb985..429ca3215fdb 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -38,7 +38,7 @@ MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); MODULE_LICENSE("GPL"); -static u32 wmediumd_pid; +static u32 wmediumd_portid; static int radios = 2; module_param(radios, int, 0444); @@ -545,7 +545,7 @@ static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, struct sk_buff *my_skb, - int dst_pid) + int dst_portid) { struct sk_buff *skb; struct mac80211_hwsim_data *data = hw->priv; @@ -619,7 +619,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; genlmsg_end(skb, msg_head); - genlmsg_unicast(&init_net, skb, dst_pid); + genlmsg_unicast(&init_net, skb, dst_portid); /* Enqueue the packet */ skb_queue_tail(&data->pending, my_skb); @@ -715,7 +715,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, { bool ack; struct ieee80211_tx_info *txi; - u32 _pid; + u32 _portid; mac80211_hwsim_monitor_rx(hw, skb); @@ -726,10 +726,10 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, } /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _portid = ACCESS_ONCE(wmediumd_portid); - if (_pid) - return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); + if (_portid) + return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); /* NO wmediumd detected, perfect medium simulation */ ack = mac80211_hwsim_tx_frame_no_nl(hw, skb); @@ -814,7 +814,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_hw *hw = arg; struct sk_buff *skb; struct ieee80211_tx_info *info; - u32 _pid; + u32 _portid; hwsim_check_magic(vif); @@ -831,10 +831,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, mac80211_hwsim_monitor_rx(hw, skb); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _portid = ACCESS_ONCE(wmediumd_portid); - if (_pid) - return mac80211_hwsim_tx_frame_nl(hw, skb, _pid); + if (_portid) + return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); mac80211_hwsim_tx_frame_no_nl(hw, skb); dev_kfree_skb(skb); @@ -1315,7 +1315,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_pspoll *pspoll; - u32 _pid; + u32 _portid; if (!vp->assoc) return; @@ -1336,10 +1336,10 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) memcpy(pspoll->ta, mac, ETH_ALEN); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _portid = ACCESS_ONCE(wmediumd_portid); - if (_pid) - return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); + if (_portid) + return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid); if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb)) printk(KERN_DEBUG "%s: PS-poll frame not ack'ed\n", __func__); @@ -1353,7 +1353,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; - u32 _pid; + u32 _portid; if (!vp->assoc) return; @@ -1375,10 +1375,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, memcpy(hdr->addr3, vp->bssid, ETH_ALEN); /* wmediumd mode check */ - _pid = ACCESS_ONCE(wmediumd_pid); + _portid = ACCESS_ONCE(wmediumd_portid); - if (_pid) - return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid); + if (_portid) + return mac80211_hwsim_tx_frame_nl(data->hw, skb, _portid); if (!mac80211_hwsim_tx_frame_no_nl(data->hw, skb)) printk(KERN_DEBUG "%s: nullfunc frame not ack'ed\n", __func__); @@ -1632,10 +1632,10 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2, if (info == NULL) goto out; - wmediumd_pid = info->snd_pid; + wmediumd_portid = info->snd_portid; printk(KERN_DEBUG "mac80211_hwsim: received a REGISTER, " - "switching to wmediumd mode with pid %d\n", info->snd_pid); + "switching to wmediumd mode with pid %d\n", info->snd_portid); return 0; out: @@ -1672,10 +1672,10 @@ static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, if (state != NETLINK_URELEASE) return NOTIFY_DONE; - if (notify->pid == wmediumd_pid) { + if (notify->portid == wmediumd_portid) { printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" " socket, switching to perfect channel medium\n"); - wmediumd_pid = 0; + wmediumd_portid = 0; } return NOTIFY_DONE; diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 7f207b6e9552..effb044a8a9d 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb"); * whenever you add a new device. */ -static struct usb_device_id p54u_table[] __devinitdata = { +static struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c index 05d8ca045afd..7811b6315973 100644 --- a/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] __devinitdata = { +static struct usb_device_id rtl8187_table[] = { /* Asus */ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, /* Belkin */ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 30899901aef5..c934fe8583f5 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -57,8 +57,7 @@ static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { - struct page *page; - unsigned offset; + int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) @@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev, struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { - struct page *page = NETFRONT_SKB_CB(skb)->page; - void *vaddr = page_address(page); - unsigned offset = NETFRONT_SKB_CB(skb)->offset; - - memcpy(skb->data, vaddr + offset, - skb_headlen(skb)); + int pull_to = NETFRONT_SKB_CB(skb)->pull_to; - if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) - __free_page(page); + __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); @@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget) struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; - unsigned int len; int err; spin_lock(&np->rx_lock); @@ -955,24 +947,13 @@ err: } } - NETFRONT_SKB_CB(skb)->page = - skb_frag_page(&skb_shinfo(skb)->frags[0]); - NETFRONT_SKB_CB(skb)->offset = rx->offset; - - len = rx->status; - if (len > RX_COPY_THRESHOLD) - len = RX_COPY_THRESHOLD; - skb_put(skb, len); + NETFRONT_SKB_CB(skb)->pull_to = rx->status; + if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) + NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; - if (rx->status > len) { - skb_shinfo(skb)->frags[0].page_offset = - rx->offset + len; - skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len); - skb->data_len = rx->status - len; - } else { - __skb_fill_page_desc(skb, 0, NULL, 0, 0); - skb_shinfo(skb)->nr_frags = 0; - } + skb_shinfo(skb)->frags[0].page_offset = rx->offset; + skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); + skb->data_len = rx->status; i = xennet_fill_frags(np, skb, &tmpq); @@ -999,7 +980,7 @@ err: * receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ - skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); + skb->truesize += skb->data_len - RX_COPY_THRESHOLD; skb->len += skb->data_len; if (rx->flags & XEN_NETRXF_csum_blank) @@ -1731,7 +1712,7 @@ static void netback_changed(struct xenbus_device *dev, break; case XenbusStateConnected: - netif_notify_peers(netdev); + netdev_notify_peers(netdev); break; case XenbusStateClosing: |