summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
authorChris Zankel <chris@zankel.net>2014-02-24 12:34:36 +0400
committerChris Zankel <chris@zankel.net>2014-02-24 12:34:36 +0400
commitb3fdfc1b4b641d372e35ced98814289bc60bc5d1 (patch)
tree5f11d5ba885031dde45690745646519fb887f447 /include/linux/netdevice.h
parentc0e50d41126e4786d9cf1105bdf783e55c99f915 (diff)
parentf63b6d7555cd4064554b39da4d44c4cbbc9d6a4a (diff)
downloadlinux-b3fdfc1b4b641d372e35ced98814289bc60bc5d1.tar.xz
Merge tag 'xtensa-for-next-20140221-1' into for_next
Xtensa fixes for 3.14: - allow booting xtfpga on boards with new uBoot and >128MBytes memory; - drop nonexistent GPIO32 support from fsf variant; - don't select USE_GENERIC_SMP_HELPERS; - enable common clock framework support, set up ethoc clock on xtfpga; - wire up sched_setattr and sched_getattr syscalls. Signed-off-by: Chris Zankel <chris@zankel.net>
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h190
1 files changed, 150 insertions, 40 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d9a550bf3e8e..440a02ee6f92 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -668,15 +668,28 @@ extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
u16 filter_id);
#endif
+#endif /* CONFIG_RPS */
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
+#ifdef CONFIG_RPS
struct rps_map __rcu *rps_map;
struct rps_dev_flow_table __rcu *rps_flow_table;
+#endif
struct kobject kobj;
struct net_device *dev;
} ____cacheline_aligned_in_smp;
-#endif /* CONFIG_RPS */
+
+/*
+ * RX queue sysfs structures and functions.
+ */
+struct rx_queue_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr, char *buf);
+ ssize_t (*store)(struct netdev_rx_queue *queue,
+ struct rx_queue_attribute *attr, const char *buf, size_t len);
+};
#ifdef CONFIG_XPS
/*
@@ -769,7 +782,8 @@ struct netdev_phys_port_id {
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
- * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
+ * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
+ * void *accel_priv);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
@@ -990,7 +1004,8 @@ struct net_device_ops {
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ void *accel_priv);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1283,6 +1298,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_NET_DSA)
struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
#endif
+#if IS_ENABLED(CONFIG_TIPC)
+ struct tipc_bearer __rcu *tipc_ptr; /* TIPC specific data */
+#endif
void *atalk_ptr; /* AppleTalk link */
struct in_device __rcu *ip_ptr; /* IPv4 specific data */
struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
@@ -1308,7 +1326,7 @@ struct net_device {
unicast) */
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
struct netdev_rx_queue *_rx;
/* Number of RX queues allocated at register_netdev() time */
@@ -1406,7 +1424,7 @@ struct net_device {
union {
void *ml_priv;
struct pcpu_lstats __percpu *lstats; /* loopback stats */
- struct pcpu_tstats __percpu *tstats; /* tunnel stats */
+ struct pcpu_sw_netstats __percpu *tstats;
struct pcpu_dstats __percpu *dstats; /* dummy stats */
struct pcpu_vstats __percpu *vstats; /* veth stats */
};
@@ -1419,6 +1437,8 @@ struct net_device {
struct device dev;
/* space for optional device, statistics, and wireless sysfs groups */
const struct attribute_group *sysfs_groups[4];
+ /* space for optional per-rx queue attributes */
+ const struct attribute_group *sysfs_rx_queue_group;
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
@@ -1441,7 +1461,7 @@ struct net_device {
/* max exchange id for FCoE LRO by ddp */
unsigned int fcoe_ddp_xid;
#endif
-#if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
+#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
/* phy device may attach itself for hardware timestamping */
@@ -1529,7 +1549,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
}
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ void *accel_priv);
u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
/*
@@ -1629,7 +1650,10 @@ struct napi_gro_cb {
int data_offset;
/* This is non-zero if the packet cannot be merged with the new skb. */
- int flush;
+ u16 flush;
+
+ /* Save the IP ID here and check when we get to the transport layer */
+ u16 flush_id;
/* Number of segments aggregated. */
u16 count;
@@ -1646,7 +1670,13 @@ struct napi_gro_cb {
unsigned long age;
/* Used in ipv6_gro_receive() */
- int proto;
+ u16 proto;
+
+ /* Used in udp_gro_receive */
+ u16 udp_mark;
+
+ /* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ __wsum csum;
/* used in skb_gro_receive() slow path */
struct sk_buff *last;
@@ -1673,7 +1703,7 @@ struct offload_callbacks {
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
- int (*gro_complete)(struct sk_buff *skb);
+ int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
struct packet_offload {
@@ -1682,6 +1712,20 @@ struct packet_offload {
struct list_head list;
};
+struct udp_offload {
+ __be16 port;
+ struct offload_callbacks callbacks;
+};
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_sw_netstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink
@@ -1697,7 +1741,7 @@ struct packet_offload {
#define NETDEV_CHANGE 0x0004 /* Notify device state change */
#define NETDEV_REGISTER 0x0005
#define NETDEV_UNREGISTER 0x0006
-#define NETDEV_CHANGEMTU 0x0007
+#define NETDEV_CHANGEMTU 0x0007 /* notify after mtu change happened */
#define NETDEV_CHANGEADDR 0x0008
#define NETDEV_GOING_DOWN 0x0009
#define NETDEV_CHANGENAME 0x000A
@@ -1713,6 +1757,7 @@ struct packet_offload {
#define NETDEV_JOIN 0x0014
#define NETDEV_CHANGEUPPER 0x0015
#define NETDEV_RESEND_IGMP 0x0016
+#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -1738,8 +1783,6 @@ netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
return info->dev;
}
-int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
- struct netdev_notifier_info *info);
int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
@@ -1806,7 +1849,6 @@ void dev_remove_pack(struct packet_type *pt);
void __dev_remove_pack(struct packet_type *pt);
void dev_add_offload(struct packet_offload *po);
void dev_remove_offload(struct packet_offload *po);
-void __dev_remove_offload(struct packet_offload *po);
struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
unsigned short mask);
@@ -1819,6 +1861,7 @@ int dev_close(struct net_device *dev);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct sk_buff *newskb);
int dev_queue_xmit(struct sk_buff *skb);
+int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
void unregister_netdevice_many(struct list_head *head);
@@ -1891,6 +1934,14 @@ static inline void *skb_gro_network_header(struct sk_buff *skb)
skb_network_offset(skb);
}
+static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
+ const void *start, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
+ csum_partial(start, len, 0));
+}
+
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
@@ -1912,6 +1963,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+static inline int dev_rebuild_header(struct sk_buff *skb)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->rebuild)
+ return 0;
+ return dev->header_ops->rebuild(skb);
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -2338,7 +2398,7 @@ static inline bool netif_is_multiqueue(const struct net_device *dev)
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
@@ -2357,7 +2417,7 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
from_dev->real_num_tx_queues);
if (err)
return err;
-#ifdef CONFIG_RPS
+#ifdef CONFIG_SYSFS
return netif_set_real_num_rx_queues(to_dev,
from_dev->real_num_rx_queues);
#else
@@ -2365,20 +2425,67 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
#endif
}
+#ifdef CONFIG_SYSFS
+static inline unsigned int get_netdev_rx_queue_index(
+ struct netdev_rx_queue *queue)
+{
+ struct net_device *dev = queue->dev;
+ int index = queue - dev->_rx;
+
+ BUG_ON(index >= dev->num_rx_queues);
+ return index;
+}
+#endif
+
#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
int netif_get_num_default_rss_queues(void);
-/* Use this variant when it is known for sure that it
- * is executing from hardware interrupt context or with hardware interrupts
- * disabled.
- */
-void dev_kfree_skb_irq(struct sk_buff *skb);
+enum skb_free_reason {
+ SKB_REASON_CONSUMED,
+ SKB_REASON_DROPPED,
+};
+
+void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
+void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
-/* Use this variant in places where it could be invoked
- * from either hardware interrupt or other context, with hardware interrupts
- * either disabled or enabled.
+/*
+ * It is not allowed to call kfree_skb() or consume_skb() from hardware
+ * interrupt context or with hardware interrupts being disabled.
+ * (in_irq() || irqs_disabled())
+ *
+ * We provide four helpers that can be used in following contexts :
+ *
+ * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
+ * replacing kfree_skb(skb)
+ *
+ * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
+ * Typically used in place of consume_skb(skb) in TX completion path
+ *
+ * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
+ * replacing kfree_skb(skb)
+ *
+ * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
+ * and consumed a packet. Used in place of consume_skb(skb)
*/
-void dev_kfree_skb_any(struct sk_buff *skb);
+static inline void dev_kfree_skb_irq(struct sk_buff *skb)
+{
+ __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
+}
+
+static inline void dev_consume_skb_irq(struct sk_buff *skb)
+{
+ __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
+}
+
+static inline void dev_kfree_skb_any(struct sk_buff *skb)
+{
+ __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
+}
+
+static inline void dev_consume_skb_any(struct sk_buff *skb)
+{
+ __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
+}
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
@@ -2387,6 +2494,8 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
gro_result_t napi_gro_frags(struct napi_struct *napi);
+struct packet_offload *gro_find_receive_by_type(__be16 type);
+struct packet_offload *gro_find_complete_by_type(__be16 type);
static inline void napi_free_frags(struct napi_struct *napi)
{
@@ -2417,7 +2526,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier);
int dev_get_phys_port_id(struct net_device *dev,
struct netdev_phys_port_id *ppid);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq, void *accel_priv);
+ struct netdev_queue *txq);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
extern int netdev_budget;
@@ -2772,17 +2881,10 @@ int register_netdev(struct net_device *dev);
void unregister_netdev(struct net_device *dev);
/* General hardware address lists handling functions */
-int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type);
-void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
- struct netdev_hw_addr_list *from_list,
- int addr_len, unsigned char addr_type);
int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list, int addr_len);
-void __hw_addr_flush(struct netdev_hw_addr_list *list);
void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
@@ -2790,10 +2892,6 @@ int dev_addr_add(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
int dev_addr_del(struct net_device *dev, const unsigned char *addr,
unsigned char addr_type);
-int dev_addr_add_multiple(struct net_device *to_dev,
- struct net_device *from_dev, unsigned char addr_type);
-int dev_addr_del_multiple(struct net_device *to_dev,
- struct net_device *from_dev, unsigned char addr_type);
void dev_addr_flush(struct net_device *dev);
int dev_addr_init(struct net_device *dev);
@@ -2840,7 +2938,6 @@ extern int weight_p;
extern int bpf_jit_enable;
bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
-bool netdev_has_any_upper_dev(struct net_device *dev);
struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
struct list_head **iter);
@@ -2869,6 +2966,7 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
void *netdev_adjacent_get_private(struct list_head *adj_list);
+void *netdev_lower_get_first_private_rcu(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
@@ -2879,8 +2977,7 @@ int netdev_master_upper_dev_link_private(struct net_device *dev,
void *private);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev);
-void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
- struct net_device *lower_dev);
+void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
void *netdev_lower_dev_get_private(struct net_device *dev,
struct net_device *lower_dev);
int skb_checksum_help(struct sk_buff *skb);
@@ -3008,6 +3105,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+ int pulled_hlen, u16 mac_offset,
+ int mac_len)
+{
+ skb->protocol = protocol;
+ skb->encapsulation = 1;
+ skb_push(skb, pulled_hlen);
+ skb_reset_transport_header(skb);
+ skb->mac_header = mac_offset;
+ skb->network_header = skb->mac_header + mac_len;
+ skb->mac_len = mac_len;
+}
+
static inline bool netif_is_macvlan(struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;