diff options
author | Johannes Berg <johannes.berg@intel.com> | 2015-06-10 13:44:58 +0300 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2015-06-10 13:45:09 +0300 |
commit | 206c59d1d7d42bcafc1d7f1e476e87e4427e2345 (patch) | |
tree | a2f99470bd0fe43f5cf57812fca969bb3ca3c451 /include/net | |
parent | 5ec596c41bba6f4e3eeef5dc089afc8eaa702a7e (diff) | |
parent | c3eee1fb1d308564ada5f7ea57bc51efc6130b37 (diff) | |
download | linux-206c59d1d7d42bcafc1d7f1e476e87e4427e2345.tar.xz |
Merge remote-tracking branch 'net-next/master' into mac80211-next
Merge back net-next to get wireless driver changes (from Kalle)
to be able to create the API change across all trees properly.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'include/net')
35 files changed, 889 insertions, 420 deletions
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 80456f72d70a..def59d3a34d5 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -142,6 +142,7 @@ void ipv6_mc_unmap(struct inet6_dev *idev); void ipv6_mc_remap(struct inet6_dev *idev); void ipv6_mc_init_dev(struct inet6_dev *idev); void ipv6_mc_destroy_dev(struct inet6_dev *idev); +int ipv6_mc_check_mld(struct sk_buff *skb, struct sk_buff **skb_trimmed); void addrconf_dad_failure(struct inet6_ifaddr *ifp); bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group, diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index 172632dd9930..db639a4c5ab8 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h @@ -74,7 +74,7 @@ void vsock_pending_work(struct work_struct *work); struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, - gfp_t priority, unsigned short type); + gfp_t priority, unsigned short type, int kern); /**** TRANSPORT ****/ diff --git a/include/net/bond_options.h b/include/net/bond_options.h index ea6546d2c946..c28aca25320e 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -63,6 +63,9 @@ enum { BOND_OPT_LP_INTERVAL, BOND_OPT_SLAVES, BOND_OPT_TLB_DYNAMIC_LB, + BOND_OPT_AD_ACTOR_SYS_PRIO, + BOND_OPT_AD_ACTOR_SYSTEM, + BOND_OPT_AD_USER_PORT_KEY, BOND_OPT_LAST }; diff --git a/include/net/bonding.h b/include/net/bonding.h index fda6feeb6c1f..20defc0353d1 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -30,13 +30,6 @@ #include <net/bond_alb.h> #include <net/bond_options.h> -#define DRV_VERSION "3.7.1" -#define DRV_RELDATE "April 27, 2011" -#define DRV_NAME "bonding" -#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" - -#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" - #define BOND_MAX_ARP_TARGETS 16 #define BOND_DEFAULT_MIIMON 100 @@ -143,6 +136,9 @@ struct bond_params { int packets_per_slave; int tlb_dynamic_lb; struct reciprocal_value reciprocal_packets_per_slave; + u16 ad_actor_sys_prio; + u16 ad_user_port_key; + u8 ad_actor_system[ETH_ALEN]; }; struct bond_parm_tbl { diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h index eeda67652766..290a9a69af07 100644 --- a/include/net/cfg802154.h +++ b/include/net/cfg802154.h @@ -30,11 +30,13 @@ struct wpan_phy_cca; struct cfg802154_ops { struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, const char *name, + unsigned char name_assign_type, int type); void (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, struct net_device *dev); int (*add_virtual_intf)(struct wpan_phy *wpan_phy, const char *name, + unsigned char name_assign_type, enum nl802154_iftype type, __le64 extended_addr); int (*del_virtual_intf)(struct wpan_phy *wpan_phy, @@ -42,6 +44,8 @@ struct cfg802154_ops { int (*set_channel)(struct wpan_phy *wpan_phy, u8 page, u8 channel); int (*set_cca_mode)(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca); + int (*set_cca_ed_level)(struct wpan_phy *wpan_phy, s32 ed_level); + int (*set_tx_power)(struct wpan_phy *wpan_phy, s32 power); int (*set_pan_id)(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, __le16 pan_id); int (*set_short_addr)(struct wpan_phy *wpan_phy, @@ -59,14 +63,66 @@ struct cfg802154_ops { struct wpan_dev *wpan_dev, bool mode); }; +static inline bool +wpan_phy_supported_bool(bool b, enum nl802154_supported_bool_states st) +{ + switch (st) { + case NL802154_SUPPORTED_BOOL_TRUE: + return b; + case NL802154_SUPPORTED_BOOL_FALSE: + return !b; + case NL802154_SUPPORTED_BOOL_BOTH: + return true; + default: + WARN_ON(1); + } + + return false; +} + +struct wpan_phy_supported { + u32 channels[IEEE802154_MAX_PAGE + 1], + cca_modes, cca_opts, iftypes; + enum nl802154_supported_bool_states lbt; + u8 min_minbe, max_minbe, min_maxbe, max_maxbe, + min_csma_backoffs, max_csma_backoffs; + s8 min_frame_retries, max_frame_retries; + size_t tx_powers_size, cca_ed_levels_size; + const s32 *tx_powers, *cca_ed_levels; +}; + struct wpan_phy_cca { enum nl802154_cca_modes mode; enum nl802154_cca_opts opt; }; -struct wpan_phy { - struct mutex pib_lock; +static inline bool +wpan_phy_cca_cmp(const struct wpan_phy_cca *a, const struct wpan_phy_cca *b) +{ + if (a->mode != b->mode) + return false; + + if (a->mode == NL802154_CCA_ENERGY_CARRIER) + return a->opt == b->opt; + return true; +} + +/** + * @WPAN_PHY_FLAG_TRANSMIT_POWER: Indicates that transceiver will support + * transmit power setting. + * @WPAN_PHY_FLAG_CCA_ED_LEVEL: Indicates that transceiver will support cca ed + * level setting. + * @WPAN_PHY_FLAG_CCA_MODE: Indicates that transceiver will support cca mode + * setting. + */ +enum wpan_phy_flags { + WPAN_PHY_FLAG_TXPOWER = BIT(1), + WPAN_PHY_FLAG_CCA_ED_LEVEL = BIT(2), + WPAN_PHY_FLAG_CCA_MODE = BIT(3), +}; + +struct wpan_phy { /* If multiple wpan_phys are registered and you're handed e.g. * a regular netdev with assigned ieee802154_ptr, you won't * know whether it points to a wpan_phy your driver has registered @@ -75,6 +131,8 @@ struct wpan_phy { */ const void *privid; + u32 flags; + /* * This is a PIB according to 802.15.4-2011. * We do not provide timing-related variables, as they @@ -82,12 +140,14 @@ struct wpan_phy { */ u8 current_channel; u8 current_page; - u32 channels_supported[IEEE802154_MAX_PAGE + 1]; - s8 transmit_power; + struct wpan_phy_supported supported; + /* current transmit_power in mBm */ + s32 transmit_power; struct wpan_phy_cca cca; __le64 perm_extended_addr; + /* current cca ed threshold in mBm */ s32 cca_ed_level; /* PHY depended MAC PIB values */ @@ -119,9 +179,9 @@ struct wpan_dev { __le64 extended_addr; /* MAC BSN field */ - u8 bsn; + atomic_t bsn; /* MAC DSN field */ - u8 dsn; + atomic_t dsn; u8 min_be; u8 max_be; diff --git a/include/net/checksum.h b/include/net/checksum.h index 0a55ac715077..2d1d73cb773e 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -122,7 +122,9 @@ static inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { - *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from), to)); + __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); + + *sum = csum_fold(csum_add(tmp, (__force __wsum)to)); } /* Implements RFC 1624 (Incremental Internet Checksum) diff --git a/include/net/codel.h b/include/net/codel.h index aeee28081245..267e70210061 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -7,7 +7,7 @@ * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> - * Copyright (C) 2012 Eric Dumazet <edumazet@google.com> + * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -119,12 +119,16 @@ static inline u32 codel_time_to_us(codel_time_t val) /** * struct codel_params - contains codel parameters * @target: target queue size (in time units) + * @ce_threshold: threshold for marking packets with ECN CE * @interval: width of moving time window + * @mtu: device mtu, or minimal queue backlog in bytes. * @ecn: is Explicit Congestion Notification enabled */ struct codel_params { codel_time_t target; + codel_time_t ce_threshold; codel_time_t interval; + u32 mtu; bool ecn; }; @@ -159,17 +163,24 @@ struct codel_vars { * @maxpacket: largest packet we've seen so far * @drop_count: temp count of dropped packets in dequeue() * ecn_mark: number of packets we ECN marked instead of dropping + * ce_mark: number of packets CE marked because sojourn time was above ce_threshold */ struct codel_stats { u32 maxpacket; u32 drop_count; u32 ecn_mark; + u32 ce_mark; }; -static void codel_params_init(struct codel_params *params) +#define CODEL_DISABLED_THRESHOLD INT_MAX + +static void codel_params_init(struct codel_params *params, + const struct Qdisc *sch) { params->interval = MS2TIME(100); params->target = MS2TIME(5); + params->mtu = psched_mtu(qdisc_dev(sch)); + params->ce_threshold = CODEL_DISABLED_THRESHOLD; params->ecn = false; } @@ -180,7 +191,7 @@ static void codel_vars_init(struct codel_vars *vars) static void codel_stats_init(struct codel_stats *stats) { - stats->maxpacket = 256; + stats->maxpacket = 0; } /* @@ -234,7 +245,7 @@ static bool codel_should_drop(const struct sk_buff *skb, stats->maxpacket = qdisc_pkt_len(skb); if (codel_time_before(vars->ldelay, params->target) || - sch->qstats.backlog <= stats->maxpacket) { + sch->qstats.backlog <= params->mtu) { /* went below - stay below for at least interval */ vars->first_above_time = 0; return false; @@ -350,6 +361,9 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, vars->rec_inv_sqrt); } end: + if (skb && codel_time_after(vars->ldelay, params->ce_threshold) && + INET_ECN_set_ce(skb)) + stats->ce_mark++; return skb; } #endif diff --git a/include/net/dst.h b/include/net/dst.h index 0fb99a26e973..2bc73f8a00a9 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -109,7 +109,6 @@ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); extern const u32 dst_default_metrics[]; #define DST_METRICS_READ_ONLY 0x1UL -#define DST_METRICS_FORCE_OVERWRITE 0x2UL #define DST_METRICS_FLAGS 0x3UL #define __DST_METRICS_PTR(Y) \ ((u32 *)((Y) & ~DST_METRICS_FLAGS)) @@ -120,11 +119,6 @@ static inline bool dst_metrics_read_only(const struct dst_entry *dst) return dst->_metrics & DST_METRICS_READ_ONLY; } -static inline void dst_metrics_set_force_overwrite(struct dst_entry *dst) -{ - dst->_metrics |= DST_METRICS_FORCE_OVERWRITE; -} - void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old); static inline void dst_destroy_metrics_generic(struct dst_entry *dst) @@ -355,18 +349,6 @@ static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, __skb_tunnel_rx(skb, dev, net); } -/* Children define the path of the packet through the - * Linux networking. Thus, destinations are stackable. - */ - -static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb) -{ - struct dst_entry *child = dst_clone(skb_dst(skb)->child); - - skb_dst_drop(skb); - return child; -} - int dst_discard_sk(struct sock *sk, struct sk_buff *skb); static inline int dst_discard(struct sk_buff *skb) { diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h new file mode 100644 index 000000000000..1a8c22419936 --- /dev/null +++ b/include/net/flow_dissector.h @@ -0,0 +1,220 @@ +#ifndef _NET_FLOW_DISSECTOR_H +#define _NET_FLOW_DISSECTOR_H + +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/in6.h> +#include <uapi/linux/if_ether.h> + +/** + * struct flow_dissector_key_control: + * @thoff: Transport header offset + */ +struct flow_dissector_key_control { + u16 thoff; + u16 addr_type; +}; + +/** + * struct flow_dissector_key_basic: + * @thoff: Transport header offset + * @n_proto: Network header protocol (eg. IPv4/IPv6) + * @ip_proto: Transport header protocol (eg. TCP/UDP) + */ +struct flow_dissector_key_basic { + __be16 n_proto; + u8 ip_proto; + u8 padding; +}; + +struct flow_dissector_key_tags { + u32 vlan_id:12, + flow_label:20; +}; + +struct flow_dissector_key_keyid { + __be32 keyid; +}; + +/** + * struct flow_dissector_key_ipv4_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct flow_dissector_key_ipv4_addrs { + /* (src,dst) must be grouped, in the same way than in IP header */ + __be32 src; + __be32 dst; +}; + +/** + * struct flow_dissector_key_ipv6_addrs: + * @src: source ip address + * @dst: destination ip address + */ +struct flow_dissector_key_ipv6_addrs { + /* (src,dst) must be grouped, in the same way than in IP header */ + struct in6_addr src; + struct in6_addr dst; +}; + +/** + * struct flow_dissector_key_tipc_addrs: + * @srcnode: source node address + */ +struct flow_dissector_key_tipc_addrs { + __be32 srcnode; +}; + +/** + * struct flow_dissector_key_addrs: + * @v4addrs: IPv4 addresses + * @v6addrs: IPv6 addresses + */ +struct flow_dissector_key_addrs { + union { + struct flow_dissector_key_ipv4_addrs v4addrs; + struct flow_dissector_key_ipv6_addrs v6addrs; + struct flow_dissector_key_tipc_addrs tipcaddrs; + }; +}; + +/** + * flow_dissector_key_tp_ports: + * @ports: port numbers of Transport header + * src: source port number + * dst: destination port number + */ +struct flow_dissector_key_ports { + union { + __be32 ports; + struct { + __be16 src; + __be16 dst; + }; + }; +}; + + +/** + * struct flow_dissector_key_eth_addrs: + * @src: source Ethernet address + * @dst: destination Ethernet address + */ +struct flow_dissector_key_eth_addrs { + /* (dst,src) must be grouped, in the same way than in ETH header */ + unsigned char dst[ETH_ALEN]; + unsigned char src[ETH_ALEN]; +}; + +enum flow_dissector_key_id { + FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */ + FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */ + FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */ + FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */ + FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ + FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ + FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ + FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ + FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */ + + FLOW_DISSECTOR_KEY_MAX, +}; + +struct flow_dissector_key { + enum flow_dissector_key_id key_id; + size_t offset; /* offset of struct flow_dissector_key_* + in target the struct */ +}; + +struct flow_dissector { + unsigned int used_keys; /* each bit repesents presence of one key id */ + unsigned short int offset[FLOW_DISSECTOR_KEY_MAX]; +}; + +void skb_flow_dissector_init(struct flow_dissector *flow_dissector, + const struct flow_dissector_key *key, + unsigned int key_count); + +bool __skb_flow_dissect(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container, + void *data, __be16 proto, int nhoff, int hlen); + +static inline bool skb_flow_dissect(const struct sk_buff *skb, + struct flow_dissector *flow_dissector, + void *target_container) +{ + return __skb_flow_dissect(skb, flow_dissector, target_container, + NULL, 0, 0, 0); +} + +struct flow_keys { + struct flow_dissector_key_control control; +#define FLOW_KEYS_HASH_START_FIELD basic + struct flow_dissector_key_basic basic; + struct flow_dissector_key_tags tags; + struct flow_dissector_key_keyid keyid; + struct flow_dissector_key_ports ports; + struct flow_dissector_key_addrs addrs; +}; + +#define FLOW_KEYS_HASH_OFFSET \ + offsetof(struct flow_keys, FLOW_KEYS_HASH_START_FIELD) + +__be32 flow_get_u32_src(const struct flow_keys *flow); +__be32 flow_get_u32_dst(const struct flow_keys *flow); + +extern struct flow_dissector flow_keys_dissector; +extern struct flow_dissector flow_keys_buf_dissector; + +static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb, + struct flow_keys *flow) +{ + memset(flow, 0, sizeof(*flow)); + return __skb_flow_dissect(skb, &flow_keys_dissector, flow, + NULL, 0, 0, 0); +} + +static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow, + void *data, __be16 proto, + int nhoff, int hlen) +{ + memset(flow, 0, sizeof(*flow)); + return __skb_flow_dissect(NULL, &flow_keys_buf_dissector, flow, + data, proto, nhoff, hlen); +} + +__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, + void *data, int hlen_proto); + +static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, + int thoff, u8 ip_proto) +{ + return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); +} + +u32 flow_hash_from_keys(struct flow_keys *keys); +void __skb_get_hash(struct sk_buff *skb); +u32 skb_get_poff(const struct sk_buff *skb); +u32 __skb_get_poff(const struct sk_buff *skb, void *data, + const struct flow_keys *keys, int hlen); + +/* struct flow_keys_digest: + * + * This structure is used to hold a digest of the full flow keys. This is a + * larger "hash" of a flow to allow definitively matching specific flows where + * the 32 bit skb->hash is not large enough. The size is limited to 16 bytes so + * that it can by used in CB of skb (see sch_choke for an example). + */ +#define FLOW_KEYS_DIGEST_LEN 16 +struct flow_keys_digest { + u8 data[FLOW_KEYS_DIGEST_LEN]; +}; + +void make_flow_keys_digest(struct flow_keys_digest *digest, + const struct flow_keys *flow); + +#endif diff --git a/include/net/flow_keys.h b/include/net/flow_keys.h deleted file mode 100644 index dc8fd81412bf..000000000000 --- a/include/net/flow_keys.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef _NET_FLOW_KEYS_H -#define _NET_FLOW_KEYS_H - -/* struct flow_keys: - * @src: source ip address in case of IPv4 - * For IPv6 it contains 32bit hash of src address - * @dst: destination ip address in case of IPv4 - * For IPv6 it contains 32bit hash of dst address - * @ports: port numbers of Transport header - * port16[0]: src port number - * port16[1]: dst port number - * @thoff: Transport header offset - * @n_proto: Network header protocol (eg. IPv4/IPv6) - * @ip_proto: Transport header protocol (eg. TCP/UDP) - * All the members, except thoff, are in network byte order. - */ -struct flow_keys { - /* (src,dst) must be grouped, in the same way than in IP header */ - __be32 src; - __be32 dst; - union { - __be32 ports; - __be16 port16[2]; - }; - u16 thoff; - __be16 n_proto; - u8 ip_proto; -}; - -bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow, - void *data, __be16 proto, int nhoff, int hlen); -static inline bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) -{ - return __skb_flow_dissect(skb, flow, NULL, 0, 0, 0); -} -__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto, - void *data, int hlen_proto); -static inline __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) -{ - return __skb_flow_get_ports(skb, thoff, ip_proto, NULL, 0); -} -u32 flow_hash_from_keys(struct flow_keys *keys); -unsigned int flow_get_hlen(const unsigned char *data, unsigned int max_len, - __be16 protocol); -#endif diff --git a/include/net/geneve.h b/include/net/geneve.h index 14fb8d3390b4..2a0543a1899d 100644 --- a/include/net/geneve.h +++ b/include/net/geneve.h @@ -62,6 +62,11 @@ struct genevehdr { struct geneve_opt options[]; }; +static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) +{ + return (struct genevehdr *)(udp_hdr(skb) + 1); +} + #ifdef CONFIG_INET struct geneve_sock; diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 94a297052442..0a87975128ec 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -422,16 +422,6 @@ struct ieee802154_mlme_ops { struct ieee802154_mac_params *params); struct ieee802154_llsec_ops *llsec; - - /* The fields below are required. */ - - /* - * FIXME: these should become the part of PIB/MIB interface. - * However we still don't have IB interface of any kind - */ - __le16 (*get_pan_id)(const struct net_device *dev); - __le16 (*get_short_addr)(const struct net_device *dev); - u8 (*get_dsn)(const struct net_device *dev); }; static inline struct ieee802154_mlme_ops * @@ -440,10 +430,4 @@ ieee802154_mlme_ops(const struct net_device *dev) return dev->ml_priv; } -static inline struct ieee802154_reduced_mlme_ops * -ieee802154_reduced_mlme_ops(const struct net_device *dev) -{ - return dev->ml_priv; -} - #endif diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 4a92423eefa5..279f83591971 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -41,7 +41,7 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, static inline void inet_ctl_sock_destroy(struct sock *sk) { - sk_release_kernel(sk); + sock_release(sk->sk_socket); } #endif diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 7b5887cd1172..0320bbb7d7b5 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -98,7 +98,8 @@ struct inet_connection_sock { const struct tcp_congestion_ops *icsk_ca_ops; const struct inet_connection_sock_af_ops *icsk_af_ops; unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu); - __u8 icsk_ca_state:7, + __u8 icsk_ca_state:6, + icsk_ca_setsockopt:1, icsk_ca_dst_locked:1; __u8 icsk_retransmits; __u8 icsk_pending; @@ -129,9 +130,10 @@ struct inet_connection_sock { u32 probe_timestamp; } icsk_mtup; - u32 icsk_ca_priv[16]; u32 icsk_user_timeout; -#define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) + + u64 icsk_ca_priv[64 / sizeof(u64)]; +#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64)) }; #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ @@ -279,12 +281,6 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk, void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, unsigned long timeout); -static inline void inet_csk_reqsk_queue_removed(struct sock *sk, - struct request_sock *req) -{ - reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); -} - static inline void inet_csk_reqsk_queue_added(struct sock *sk, const unsigned long timeout) { @@ -306,19 +302,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); } -static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, - struct request_sock *req) -{ - reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req); -} - -static inline void inet_csk_reqsk_queue_drop(struct sock *sk, - struct request_sock *req) -{ - inet_csk_reqsk_queue_unlink(sk, req); - inet_csk_reqsk_queue_removed(sk, req); - reqsk_put(req); -} +void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); void inet_csk_destroy_sock(struct sock *sk); void inet_csk_prepare_forced_close(struct sock *sk); diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 8d1765577acc..e1300b3dd597 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -43,7 +43,7 @@ enum { * @len: total length of the original datagram * @meat: length of received fragments so far * @flags: fragment queue flags - * @max_size: (ipv4 only) maximum received fragment size with IP_DF set + * @max_size: maximum received fragment size * @net: namespace that this frag belongs to */ struct inet_frag_queue { diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 73fe0f9525d9..b73c88a19dd4 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -24,7 +24,6 @@ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/wait.h> -#include <linux/vmalloc.h> #include <net/inet_connection_sock.h> #include <net/inet_sock.h> @@ -148,8 +147,6 @@ struct inet_hashinfo { */ struct inet_listen_hashbucket listening_hash[INET_LHTABLE_SIZE] ____cacheline_aligned_in_smp; - - atomic_t bsockets; }; static inline struct inet_ehash_bucket *inet_ehash_bucket( @@ -166,52 +163,12 @@ static inline spinlock_t *inet_ehash_lockp( return &hashinfo->ehash_locks[hash & hashinfo->ehash_locks_mask]; } -static inline int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) -{ - unsigned int i, size = 256; -#if defined(CONFIG_PROVE_LOCKING) - unsigned int nr_pcpus = 2; -#else - unsigned int nr_pcpus = num_possible_cpus(); -#endif - if (nr_pcpus >= 4) - size = 512; - if (nr_pcpus >= 8) - size = 1024; - if (nr_pcpus >= 16) - size = 2048; - if (nr_pcpus >= 32) - size = 4096; - if (sizeof(spinlock_t) != 0) { -#ifdef CONFIG_NUMA - if (size * sizeof(spinlock_t) > PAGE_SIZE) - hashinfo->ehash_locks = vmalloc(size * sizeof(spinlock_t)); - else -#endif - hashinfo->ehash_locks = kmalloc(size * sizeof(spinlock_t), - GFP_KERNEL); - if (!hashinfo->ehash_locks) - return ENOMEM; - for (i = 0; i < size; i++) - spin_lock_init(&hashinfo->ehash_locks[i]); - } - hashinfo->ehash_locks_mask = size - 1; - return 0; -} +int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) { - if (hashinfo->ehash_locks) { -#ifdef CONFIG_NUMA - unsigned int size = (hashinfo->ehash_locks_mask + 1) * - sizeof(spinlock_t); - if (size > PAGE_SIZE) - vfree(hashinfo->ehash_locks); - else -#endif - kfree(hashinfo->ehash_locks); - hashinfo->ehash_locks = NULL; - } + kvfree(hashinfo->ehash_locks); + hashinfo->ehash_locks = NULL; } struct inet_bind_bucket * diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index b6c3737da4e9..47eb67b08abd 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -187,6 +187,7 @@ struct inet_sock { transparent:1, mc_all:1, nodefrag:1; + __u8 bind_address_no_port:1; __u8 rcv_tos; __u8 convert_csum; int uc_index; diff --git a/include/net/ip.h b/include/net/ip.h index d14af7edd197..0750a186ea63 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -31,7 +31,7 @@ #include <net/route.h> #include <net/snmp.h> #include <net/flow.h> -#include <net/flow_keys.h> +#include <net/flow_dissector.h> struct sock; @@ -45,6 +45,7 @@ struct inet_skb_parm { #define IPSKB_FRAG_COMPLETE BIT(3) #define IPSKB_REROUTED BIT(4) #define IPSKB_DOREDIRECT BIT(5) +#define IPSKB_FRAG_PMTU BIT(6) u16 frag_max_size; }; @@ -108,9 +109,8 @@ int ip_local_deliver(struct sk_buff *skb); int ip_mr_input(struct sk_buff *skb); int ip_output(struct sock *sk, struct sk_buff *skb); int ip_mc_output(struct sock *sk, struct sk_buff *skb); -int ip_fragment(struct sock *sk, struct sk_buff *skb, - int (*output)(struct sock *, struct sk_buff *)); -int ip_do_nat(struct sk_buff *skb); +int ip_do_fragment(struct sock *sk, struct sk_buff *skb, + int (*output)(struct sock *, struct sk_buff *)); void ip_send_check(struct iphdr *ip); int __ip_local_out(struct sk_buff *skb); int ip_local_out_sk(struct sock *sk, struct sk_buff *skb); @@ -355,15 +355,32 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto) skb->len, proto, 0); } +/* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store + * Equivalent to : flow->v4addrs.src = iph->saddr; + * flow->v4addrs.dst = iph->daddr; + */ +static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow, + const struct iphdr *iph) +{ + BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) != + offsetof(typeof(flow->addrs), v4addrs.src) + + sizeof(flow->addrs.v4addrs.src)); + memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs)); + flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; +} + static inline void inet_set_txhash(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct flow_keys keys; - keys.src = inet->inet_saddr; - keys.dst = inet->inet_daddr; - keys.port16[0] = inet->inet_sport; - keys.port16[1] = inet->inet_dport; + memset(&keys, 0, sizeof(keys)); + + keys.addrs.v4addrs.src = inet->inet_saddr; + keys.addrs.v4addrs.dst = inet->inet_daddr; + keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; + keys.ports.src = inet->inet_sport; + keys.ports.dst = inet->inet_dport; sk->sk_txhash = flow_hash_from_keys(&keys); } @@ -478,6 +495,16 @@ enum ip_defrag_users { IP_DEFRAG_MACVLAN, }; +/* Return true if the value of 'user' is between 'lower_bond' + * and 'upper_bond' inclusively. + */ +static inline bool ip_defrag_user_in_between(u32 user, + enum ip_defrag_users lower_bond, + enum ip_defrag_users upper_bond) +{ + return user >= lower_bond && user <= upper_bond; +} + int ip_defrag(struct sk_buff *skb, u32 user); #ifdef CONFIG_INET struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user); diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 20e80fa7bbdd..3b76849c190f 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -120,45 +120,19 @@ struct rt6_info { struct rt6key rt6i_src; struct rt6key rt6i_prefsrc; + struct list_head rt6i_uncached; + struct uncached_list *rt6i_uncached_list; + struct inet6_dev *rt6i_idev; - unsigned long _rt6i_peer; + struct rt6_info * __percpu *rt6i_pcpu; u32 rt6i_metric; + u32 rt6i_pmtu; /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len; u8 rt6i_protocol; }; -static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt) -{ - return inetpeer_ptr(rt->_rt6i_peer); -} - -static inline bool rt6_has_peer(struct rt6_info *rt) -{ - return inetpeer_ptr_is_peer(rt->_rt6i_peer); -} - -static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer) -{ - __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer); -} - -static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer) -{ - return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer); -} - -static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base) -{ - inetpeer_init_ptr(&rt->_rt6i_peer, base); -} - -static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort) -{ - inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer); -} - static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) { return ((struct rt6_info *)dst)->rt6i_idev; @@ -189,13 +163,12 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout) rt0->rt6i_flags |= RTF_EXPIRES; } -static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) +static inline u32 rt6_get_cookie(const struct rt6_info *rt) { - struct dst_entry *new = (struct dst_entry *) from; + if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE)) + rt = (struct rt6_info *)(rt->dst.from); - rt->rt6i_flags &= ~RTF_EXPIRES; - dst_hold(new); - rt->dst.from = new; + return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; } static inline void ip6_rt_put(struct rt6_info *rt) diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 5e192068e6cb..297629aadb19 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -145,7 +145,7 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst, #ifdef CONFIG_IPV6_SUBTREES np->saddr_cache = saddr; #endif - np->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; + np->dst_cookie = rt6_get_cookie(rt); } static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, @@ -163,11 +163,14 @@ static inline bool ipv6_unicast_destination(const struct sk_buff *skb) return rt->rt6i_flags & RTF_LOCAL; } -static inline bool ipv6_anycast_destination(const struct sk_buff *skb) +static inline bool ipv6_anycast_destination(const struct dst_entry *dst, + const struct in6_addr *daddr) { - struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); + struct rt6_info *rt = (struct rt6_info *)dst; - return rt->rt6i_flags & RTF_ANYCAST; + return rt->rt6i_flags & RTF_ANYCAST || + (rt->rt6i_dst.plen != 128 && + ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); } int ip6_fragment(struct sock *sk, struct sk_buff *skb, @@ -194,9 +197,15 @@ static inline bool ip6_sk_ignore_df(const struct sock *sk) inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; } -static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt) +static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, + struct in6_addr *daddr) { - return &rt->rt6i_gateway; + if (rt->rt6i_flags & RTF_GATEWAY) + return &rt->rt6i_gateway; + else if (unlikely(rt->rt6i_flags & RTF_CACHE)) + return &rt->rt6i_dst.addr; + else + return daddr; } #endif diff --git a/include/net/ipv6.h b/include/net/ipv6.h index eec8ad3c9843..82dbdb092a5d 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -19,7 +19,7 @@ #include <net/if_inet6.h> #include <net/ndisc.h> #include <net/flow.h> -#include <net/flow_keys.h> +#include <net/flow_dissector.h> #include <net/snmp.h> #define SIN6_LEN_RFC2133 24 @@ -239,8 +239,10 @@ struct ip6_flowlabel { struct net *fl_net; }; -#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) -#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) +#define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) +#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) +#define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000) + #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) #define IPV6_TCLASS_SHIFT 20 @@ -669,8 +671,9 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); } -void ipv6_select_ident(struct net *net, struct frag_hdr *fhdr, - struct rt6_info *rt); +__be32 ipv6_select_ident(struct net *net, + const struct in6_addr *daddr, + const struct in6_addr *saddr); void ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); int ip6_dst_hoplimit(struct dst_entry *dst); @@ -689,6 +692,20 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, return hlimit; } +/* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store + * Equivalent to : flow->v6addrs.src = iph->saddr; + * flow->v6addrs.dst = iph->daddr; + */ +static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, + const struct ipv6hdr *iph) +{ + BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != + offsetof(typeof(flow->addrs), v6addrs.src) + + sizeof(flow->addrs.v6addrs.src)); + memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs)); + flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; +} + #if IS_ENABLED(CONFIG_IPV6) static inline void ip6_set_txhash(struct sock *sk) { @@ -696,10 +713,15 @@ static inline void ip6_set_txhash(struct sock *sk) struct ipv6_pinfo *np = inet6_sk(sk); struct flow_keys keys; - keys.src = (__force __be32)ipv6_addr_hash(&np->saddr); - keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr); - keys.port16[0] = inet->inet_sport; - keys.port16[1] = inet->inet_dport; + memset(&keys, 0, sizeof(keys)); + + memcpy(&keys.addrs.v6addrs.src, &np->saddr, + sizeof(keys.addrs.v6addrs.src)); + memcpy(&keys.addrs.v6addrs.dst, &sk->sk_v6_daddr, + sizeof(keys.addrs.v6addrs.dst)); + keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + keys.ports.src = inet->inet_sport; + keys.ports.dst = inet->inet_dport; sk->sk_txhash = flow_hash_from_keys(&keys); } @@ -719,6 +741,9 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, hash ^= hash >> 12; flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; + + if (net->ipv6.sysctl.flowlabel_state_ranges) + flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG; } return flowlabel; diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index 0134681acc4c..fe994d2e5286 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -96,7 +96,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb) } struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, - struct proto *prot); + struct proto *prot, int kern); void llc_sk_free(struct sock *sk); void llc_sk_reset(struct sock *sk); diff --git a/include/net/mac802154.h b/include/net/mac802154.h index e18e7fd43f47..9605c7f7453f 100644 --- a/include/net/mac802154.h +++ b/include/net/mac802154.h @@ -89,41 +89,26 @@ struct ieee802154_hw { #define IEEE802154_HW_TX_OMIT_CKSUM 0x00000001 /* Indicates that receiver will autorespond with ACK frames. */ #define IEEE802154_HW_AACK 0x00000002 -/* Indicates that transceiver will support transmit power setting. */ -#define IEEE802154_HW_TXPOWER 0x00000004 /* Indicates that transceiver will support listen before transmit. */ -#define IEEE802154_HW_LBT 0x00000008 -/* Indicates that transceiver will support cca mode setting. */ -#define IEEE802154_HW_CCA_MODE 0x00000010 -/* Indicates that transceiver will support cca ed level setting. */ -#define IEEE802154_HW_CCA_ED_LEVEL 0x00000020 +#define IEEE802154_HW_LBT 0x00000004 /* Indicates that transceiver will support csma (max_be, min_be, csma retries) * settings. */ -#define IEEE802154_HW_CSMA_PARAMS 0x00000040 +#define IEEE802154_HW_CSMA_PARAMS 0x00000008 /* Indicates that transceiver will support ARET frame retries setting. */ -#define IEEE802154_HW_FRAME_RETRIES 0x00000080 +#define IEEE802154_HW_FRAME_RETRIES 0x00000010 /* Indicates that transceiver will support hardware address filter setting. */ -#define IEEE802154_HW_AFILT 0x00000100 +#define IEEE802154_HW_AFILT 0x00000020 /* Indicates that transceiver will support promiscuous mode setting. */ -#define IEEE802154_HW_PROMISCUOUS 0x00000200 +#define IEEE802154_HW_PROMISCUOUS 0x00000040 /* Indicates that receiver omits FCS. */ -#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000400 +#define IEEE802154_HW_RX_OMIT_CKSUM 0x00000080 /* Indicates that receiver will not filter frames with bad checksum. */ -#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000800 +#define IEEE802154_HW_RX_DROP_BAD_CKSUM 0x00000100 /* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ #define IEEE802154_HW_OMIT_CKSUM (IEEE802154_HW_TX_OMIT_CKSUM | \ IEEE802154_HW_RX_OMIT_CKSUM) -/* This groups the most common CSMA support fields into one. */ -#define IEEE802154_HW_CSMA (IEEE802154_HW_CCA_MODE | \ - IEEE802154_HW_CCA_ED_LEVEL | \ - IEEE802154_HW_CSMA_PARAMS) - -/* This groups the most common ARET support fields into one. */ -#define IEEE802154_HW_ARET (IEEE802154_HW_CSMA | \ - IEEE802154_HW_FRAME_RETRIES) - /* struct ieee802154_ops - callbacks from mac802154 to the driver * * This structure contains various callbacks that the driver may @@ -171,7 +156,7 @@ struct ieee802154_hw { * Returns either zero, or negative errno. * * set_txpower: - * Set radio transmit power in dB. Called with pib_lock held. + * Set radio transmit power in mBm. Called with pib_lock held. * Returns either zero, or negative errno. * * set_lbt @@ -184,7 +169,7 @@ struct ieee802154_hw { * Returns either zero, or negative errno. * * set_cca_ed_level - * Sets the CCA energy detection threshold in dBm. Called with pib_lock + * Sets the CCA energy detection threshold in mBm. Called with pib_lock * held. * Returns either zero, or negative errno. * @@ -213,12 +198,11 @@ struct ieee802154_ops { int (*set_hw_addr_filt)(struct ieee802154_hw *hw, struct ieee802154_hw_addr_filt *filt, unsigned long changed); - int (*set_txpower)(struct ieee802154_hw *hw, s8 dbm); + int (*set_txpower)(struct ieee802154_hw *hw, s32 mbm); int (*set_lbt)(struct ieee802154_hw *hw, bool on); int (*set_cca_mode)(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca); - int (*set_cca_ed_level)(struct ieee802154_hw *hw, - s32 level); + int (*set_cca_ed_level)(struct ieee802154_hw *hw, s32 mbm); int (*set_csma_params)(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries); int (*set_frame_retries)(struct ieee802154_hw *hw, @@ -247,19 +231,109 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src) __put_unaligned_memmove64(swab64p(le64_src), be64_dst); } -/* Basic interface to register ieee802154 device */ +/** + * ieee802154_alloc_hw - Allocate a new hardware device + * + * This must be called once for each hardware device. The returned pointer + * must be used to refer to this device when calling other functions. + * mac802154 allocates a private data area for the driver pointed to by + * @priv in &struct ieee802154_hw, the size of this area is given as + * @priv_data_len. + * + * @priv_data_len: length of private data + * @ops: callbacks for this device + * + * Return: A pointer to the new hardware device, or %NULL on error. + */ struct ieee802154_hw * ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops); + +/** + * ieee802154_free_hw - free hardware descriptor + * + * This function frees everything that was allocated, including the + * private data for the driver. You must call ieee802154_unregister_hw() + * before calling this function. + * + * @hw: the hardware to free + */ void ieee802154_free_hw(struct ieee802154_hw *hw); + +/** + * ieee802154_register_hw - Register hardware device + * + * You must call this function before any other functions in + * mac802154. Note that before a hardware can be registered, you + * need to fill the contained wpan_phy's information. + * + * @hw: the device to register as returned by ieee802154_alloc_hw() + * + * Return: 0 on success. An error code otherwise. + */ int ieee802154_register_hw(struct ieee802154_hw *hw); + +/** + * ieee802154_unregister_hw - Unregister a hardware device + * + * This function instructs mac802154 to free allocated resources + * and unregister netdevices from the networking subsystem. + * + * @hw: the hardware to unregister + */ void ieee802154_unregister_hw(struct ieee802154_hw *hw); +/** + * ieee802154_rx - receive frame + * + * Use this function to hand received frames to mac802154. The receive + * buffer in @skb must start with an IEEE 802.15.4 header. In case of a + * paged @skb is used, the driver is recommended to put the ieee802154 + * header of the frame on the linear part of the @skb to avoid memory + * allocation and/or memcpy by the stack. + * + * This function may not be called in IRQ context. Calls to this function + * for a single hardware must be synchronized against each other. + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac802154 after this call + */ void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb); + +/** + * ieee802154_rx_irqsafe - receive frame + * + * Like ieee802154_rx() but can be called in IRQ context + * (internally defers to a tasklet.) + * + * @hw: the hardware this frame came in on + * @skb: the buffer to receive, owned by mac802154 after this call + * @lqi: link quality indicator + */ void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb, u8 lqi); - +/** + * ieee802154_wake_queue - wake ieee802154 queue + * @hw: pointer as obtained from ieee802154_alloc_hw(). + * + * Drivers should use this function instead of netif_wake_queue. + */ void ieee802154_wake_queue(struct ieee802154_hw *hw); + +/** + * ieee802154_stop_queue - stop ieee802154 queue + * @hw: pointer as obtained from ieee802154_alloc_hw(). + * + * Drivers should use this function instead of netif_stop_queue. + */ void ieee802154_stop_queue(struct ieee802154_hw *hw); + +/** + * ieee802154_xmit_complete - frame transmission complete + * + * @hw: pointer as obtained from ieee802154_alloc_hw(). + * @skb: buffer for transmission + * @ifs_handling: indicate interframe space handling + */ void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb, bool ifs_handling); diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index f733656404de..72eb23723294 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -58,6 +58,7 @@ struct net { struct list_head exit_list; /* Use only net_mutex */ struct user_namespace *user_ns; /* Owning user namespace */ + spinlock_t nsid_lock; struct idr netns_ids; struct ns_common ns; @@ -271,7 +272,9 @@ static inline struct net *read_pnet(const possible_net_t *pnet) #define __net_initconst __initconst #endif +int peernet2id_alloc(struct net *net, struct net *peer); int peernet2id(struct net *net, struct net *peer); +bool peernet_has_id(struct net *net, struct net *peer); struct net *get_net_ns_by_id(struct net *net, int id); struct pernet_operations { diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index e6bcf55dcf20..3d6f48ca40a7 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -819,6 +819,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, * @use: number of chain references to this table * @flags: table flag (see enum nft_table_flags) * @name: name of the table + * @dev: this table is bound to this device (if any) */ struct nft_table { struct list_head list; @@ -828,6 +829,11 @@ struct nft_table { u32 use; u16 flags; char name[NFT_TABLE_MAXNAMELEN]; + struct net_device *dev; +}; + +enum nft_af_flags { + NFT_AF_NEEDS_DEV = (1 << 0), }; /** @@ -838,6 +844,7 @@ struct nft_table { * @nhooks: number of hooks in this family * @owner: module owner * @tables: used internally + * @flags: family flags * @nops: number of hook ops in this family * @hook_ops_init: initialization function for chain hook ops * @hooks: hookfn overrides for packet validation @@ -848,6 +855,7 @@ struct nft_af_info { unsigned int nhooks; struct module *owner; struct list_head tables; + u32 flags; unsigned int nops; void (*hook_ops_init)(struct nf_hook_ops *, unsigned int); diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 614a49be68a9..c68926b4899c 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -19,6 +19,7 @@ struct sock; struct local_ports { seqlock_t lock; int range[2]; + bool warned; }; struct ping_group_range { @@ -77,6 +78,8 @@ struct netns_ipv4 { struct local_ports ip_local_ports; int sysctl_tcp_ecn; + int sysctl_tcp_ecn_fallback; + int sysctl_ip_no_pmtu_disc; int sysctl_ip_fwd_use_pmtu; int sysctl_ip_nonlocal_bind; diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index d2527bf81142..8d93544a2d2b 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -34,6 +34,7 @@ struct netns_sysctl_ipv6 { int fwmark_reflect; int idgen_retries; int idgen_delay; + int flowlabel_state_ranges; }; struct netns_ipv6 { diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index eee608b12cc9..c80781146019 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -13,6 +13,7 @@ struct netns_nftables { struct nft_af_info *inet; struct nft_af_info *arp; struct nft_af_info *bridge; + struct nft_af_info *netdev; unsigned int base_seq; u8 gencursor; }; diff --git a/include/net/nl802154.h b/include/net/nl802154.h index f8b5bc997959..0badebd1de7f 100644 --- a/include/net/nl802154.h +++ b/include/net/nl802154.h @@ -100,6 +100,8 @@ enum nl802154_attrs { NL802154_ATTR_EXTENDED_ADDR, + NL802154_ATTR_WPAN_PHY_CAPS, + /* add attributes here, update the policy in nl802154.c */ __NL802154_ATTR_AFTER_LAST, @@ -120,6 +122,61 @@ enum nl802154_iftype { }; /** + * enum nl802154_wpan_phy_capability_attr - wpan phy capability attributes + * + * @__NL802154_CAP_ATTR_INVALID: attribute number 0 is reserved + * @NL802154_CAP_ATTR_CHANNELS: a nested attribute for nl802154_channel_attr + * @NL802154_CAP_ATTR_TX_POWERS: a nested attribute for + * nl802154_wpan_phy_tx_power + * @NL802154_CAP_ATTR_MIN_CCA_ED_LEVEL: minimum value for cca_ed_level + * @NL802154_CAP_ATTR_MAX_CCA_ED_LEVEL: maxmimum value for cca_ed_level + * @NL802154_CAP_ATTR_CCA_MODES: nl802154_cca_modes flags + * @NL802154_CAP_ATTR_CCA_OPTS: nl802154_cca_opts flags + * @NL802154_CAP_ATTR_MIN_MINBE: minimum of minbe value + * @NL802154_CAP_ATTR_MAX_MINBE: maximum of minbe value + * @NL802154_CAP_ATTR_MIN_MAXBE: minimum of maxbe value + * @NL802154_CAP_ATTR_MAX_MINBE: maximum of maxbe value + * @NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS: minimum of csma backoff value + * @NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS: maximum of csma backoffs value + * @NL802154_CAP_ATTR_MIN_FRAME_RETRIES: minimum of frame retries value + * @NL802154_CAP_ATTR_MAX_FRAME_RETRIES: maximum of frame retries value + * @NL802154_CAP_ATTR_IFTYPES: nl802154_iftype flags + * @NL802154_CAP_ATTR_LBT: nl802154_supported_bool_states flags + * @NL802154_CAP_ATTR_MAX: highest cap attribute currently defined + * @__NL802154_CAP_ATTR_AFTER_LAST: internal use + */ +enum nl802154_wpan_phy_capability_attr { + __NL802154_CAP_ATTR_INVALID, + + NL802154_CAP_ATTR_IFTYPES, + + NL802154_CAP_ATTR_CHANNELS, + NL802154_CAP_ATTR_TX_POWERS, + + NL802154_CAP_ATTR_CCA_ED_LEVELS, + NL802154_CAP_ATTR_CCA_MODES, + NL802154_CAP_ATTR_CCA_OPTS, + + NL802154_CAP_ATTR_MIN_MINBE, + NL802154_CAP_ATTR_MAX_MINBE, + + NL802154_CAP_ATTR_MIN_MAXBE, + NL802154_CAP_ATTR_MAX_MAXBE, + + NL802154_CAP_ATTR_MIN_CSMA_BACKOFFS, + NL802154_CAP_ATTR_MAX_CSMA_BACKOFFS, + + NL802154_CAP_ATTR_MIN_FRAME_RETRIES, + NL802154_CAP_ATTR_MAX_FRAME_RETRIES, + + NL802154_CAP_ATTR_LBT, + + /* keep last */ + __NL802154_CAP_ATTR_AFTER_LAST, + NL802154_CAP_ATTR_MAX = __NL802154_CAP_ATTR_AFTER_LAST - 1 +}; + +/** * enum nl802154_cca_modes - cca modes * * @__NL802154_CCA_INVALID: cca mode number 0 is reserved @@ -162,4 +219,26 @@ enum nl802154_cca_opts { NL802154_CCA_OPT_ATTR_MAX = __NL802154_CCA_OPT_ATTR_AFTER_LAST - 1 }; +/** + * enum nl802154_supported_bool_states - bool states for bool capability entry + * + * @NL802154_SUPPORTED_BOOL_FALSE: indicates to set false + * @NL802154_SUPPORTED_BOOL_TRUE: indicates to set true + * @__NL802154_SUPPORTED_BOOL_INVALD: reserved + * @NL802154_SUPPORTED_BOOL_BOTH: indicates to set true and false + * @__NL802154_SUPPORTED_BOOL_AFTER_LAST: Internal + * @NL802154_SUPPORTED_BOOL_MAX: highest value for bool states + */ +enum nl802154_supported_bool_states { + NL802154_SUPPORTED_BOOL_FALSE, + NL802154_SUPPORTED_BOOL_TRUE, + /* to handle them in a mask */ + __NL802154_SUPPORTED_BOOL_INVALD, + NL802154_SUPPORTED_BOOL_BOTH, + + /* keep last */ + __NL802154_SUPPORTED_BOOL_AFTER_LAST, + NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1 +}; + #endif /* __NL802154_H */ diff --git a/include/net/request_sock.h b/include/net/request_sock.h index fe41f3ceb008..87935cad2f7b 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -64,6 +64,7 @@ struct request_sock { struct timer_list rsk_timer; const struct request_sock_ops *rsk_ops; struct sock *sk; + u32 *saved_syn; u32 secid; u32 peer_secid; }; @@ -77,7 +78,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener) req->rsk_ops = ops; sock_hold(sk_listener); req->rsk_listener = sk_listener; - + req->saved_syn = NULL; /* Following is temporary. It is coupled with debugging * helpers in reqsk_put() & reqsk_free() */ @@ -104,6 +105,7 @@ static inline void reqsk_free(struct request_sock *req) req->rsk_ops->destructor(req); if (req->rsk_listener) sock_put(req->rsk_listener); + kfree(req->saved_syn); kmem_cache_free(req->rsk_ops->slab, req); } @@ -212,24 +214,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue) return queue->rskq_accept_head == NULL; } -static inline void reqsk_queue_unlink(struct request_sock_queue *queue, - struct request_sock *req) -{ - struct listen_sock *lopt = queue->listen_opt; - struct request_sock **prev; - - spin_lock(&queue->syn_wait_lock); - - prev = &lopt->syn_table[req->rsk_hash]; - while (*prev != req) - prev = &(*prev)->dl_next; - *prev = req->dl_next; - - spin_unlock(&queue->syn_wait_lock); - if (del_timer(&req->rsk_timer)) - reqsk_put(req); -} - static inline void reqsk_queue_add(struct request_sock_queue *queue, struct request_sock *req, struct sock *parent, diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6d778efcfdfd..2738f6f87908 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -501,12 +501,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) return sch->enqueue(skb, sch); } -static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) -{ - qdisc_skb_cb(skb)->pkt_len = skb->len; - return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; -} - static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) { return q->flags & TCQ_F_CPUSTATS; @@ -745,23 +739,6 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) return rtab->data[slot]; } -#ifdef CONFIG_NET_CLS_ACT -static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask, - int action) -{ - struct sk_buff *n; - - n = skb_clone(skb, gfp_mask); - - if (n) { - n->tc_verd = SET_TC_VERD(n->tc_verd, 0); - n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); - n->tc_verd = CLR_TC_MUNGED(n->tc_verd); - } - return n; -} -#endif - struct psched_ratecfg { u64 rate_bytes_ps; /* bytes per second */ u32 mult; diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index c56a438c3a1e..ce13cf20f625 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr) /* Map v4 address to v4-mapped v6 address */ static inline void sctp_v4_map_v6(union sctp_addr *addr) { + __be16 port; + + port = addr->v4.sin_port; + addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; + addr->v6.sin6_port = port; addr->v6.sin6_family = AF_INET6; addr->v6.sin6_flowinfo = 0; addr->v6.sin6_scope_id = 0; - addr->v6.sin6_port = addr->v4.sin_port; - addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr; addr->v6.sin6_addr.s6_addr32[0] = 0; addr->v6.sin6_addr.s6_addr32[1] = 0; addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); diff --git a/include/net/sock.h b/include/net/sock.h index 3a4898ec8c67..26c1c3171e00 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -184,6 +184,7 @@ struct sock_common { unsigned char skc_reuse:4; unsigned char skc_reuseport:1; unsigned char skc_ipv6only:1; + unsigned char skc_net_refcnt:1; int skc_bound_dev_if; union { struct hlist_node skc_bind_node; @@ -323,6 +324,7 @@ struct sock { #define sk_reuse __sk_common.skc_reuse #define sk_reuseport __sk_common.skc_reuseport #define sk_ipv6only __sk_common.skc_ipv6only +#define sk_net_refcnt __sk_common.skc_net_refcnt #define sk_bound_dev_if __sk_common.skc_bound_dev_if #define sk_bind_node __sk_common.skc_bind_node #define sk_prot __sk_common.skc_prot @@ -1366,7 +1368,7 @@ static inline struct inode *SOCK_INODE(struct socket *socket) * Functions for memory accounting */ int __sk_mem_schedule(struct sock *sk, int size, int kind); -void __sk_mem_reclaim(struct sock *sk); +void __sk_mem_reclaim(struct sock *sk, int amount); #define SK_MEM_QUANTUM ((int)PAGE_SIZE) #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM) @@ -1407,7 +1409,7 @@ static inline void sk_mem_reclaim(struct sock *sk) if (!sk_has_account(sk)) return; if (sk->sk_forward_alloc >= SK_MEM_QUANTUM) - __sk_mem_reclaim(sk); + __sk_mem_reclaim(sk, sk->sk_forward_alloc); } static inline void sk_mem_reclaim_partial(struct sock *sk) @@ -1415,7 +1417,7 @@ static inline void sk_mem_reclaim_partial(struct sock *sk) if (!sk_has_account(sk)) return; if (sk->sk_forward_alloc > SK_MEM_QUANTUM) - __sk_mem_reclaim(sk); + __sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); } static inline void sk_mem_charge(struct sock *sk, int size) @@ -1514,9 +1516,8 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow) struct sock *sk_alloc(struct net *net, int family, gfp_t priority, - struct proto *prot); + struct proto *prot, int kern); void sk_free(struct sock *sk); -void sk_release_kernel(struct sock *sk); struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority); struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, @@ -2024,7 +2025,8 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) } } -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp); +struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, + bool force_schedule); /** * sk_page_frag - return an appropriate page_frag @@ -2192,22 +2194,6 @@ void sock_net_set(struct sock *sk, struct net *net) write_pnet(&sk->sk_net, net); } -/* - * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace. - * They should not hold a reference to a namespace in order to allow - * to stop it. - * Sockets after sk_change_net should be released using sk_release_kernel - */ -static inline void sk_change_net(struct sock *sk, struct net *net) -{ - struct net *current_net = sock_net(sk); - - if (!net_eq(current_net, net)) { - put_net(current_net); - sock_net_set(sk, net); - } -} - static inline struct sock *skb_steal_sock(struct sk_buff *skb) { if (skb->sk) { diff --git a/include/net/switchdev.h b/include/net/switchdev.h index d2e69ee3019a..437f8fe75705 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -14,154 +14,261 @@ #include <linux/netdevice.h> #include <linux/notifier.h> +#define SWITCHDEV_F_NO_RECURSE BIT(0) + +enum switchdev_trans { + SWITCHDEV_TRANS_NONE, + SWITCHDEV_TRANS_PREPARE, + SWITCHDEV_TRANS_ABORT, + SWITCHDEV_TRANS_COMMIT, +}; + +enum switchdev_attr_id { + SWITCHDEV_ATTR_UNDEFINED, + SWITCHDEV_ATTR_PORT_PARENT_ID, + SWITCHDEV_ATTR_PORT_STP_STATE, + SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS, +}; + +struct switchdev_attr { + enum switchdev_attr_id id; + enum switchdev_trans trans; + u32 flags; + union { + struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */ + u8 stp_state; /* PORT_STP_STATE */ + unsigned long brport_flags; /* PORT_BRIDGE_FLAGS */ + } u; +}; + struct fib_info; +enum switchdev_obj_id { + SWITCHDEV_OBJ_UNDEFINED, + SWITCHDEV_OBJ_PORT_VLAN, + SWITCHDEV_OBJ_IPV4_FIB, + SWITCHDEV_OBJ_PORT_FDB, +}; + +struct switchdev_obj { + enum switchdev_obj_id id; + enum switchdev_trans trans; + int (*cb)(struct net_device *dev, struct switchdev_obj *obj); + union { + struct switchdev_obj_vlan { /* PORT_VLAN */ + u16 flags; + u16 vid_start; + u16 vid_end; + } vlan; + struct switchdev_obj_ipv4_fib { /* IPV4_FIB */ + u32 dst; + int dst_len; + struct fib_info *fi; + u8 tos; + u8 type; + u32 nlflags; + u32 tb_id; + } ipv4_fib; + struct switchdev_obj_fdb { /* PORT_FDB */ + const unsigned char *addr; + u16 vid; + } fdb; + } u; +}; + /** * struct switchdev_ops - switchdev operations * - * @swdev_parent_id_get: Called to get an ID of the switch chip this port - * is part of. If driver implements this, it indicates that it - * represents a port of a switch chip. + * @switchdev_port_attr_get: Get a port attribute (see switchdev_attr). + * + * @switchdev_port_attr_set: Set a port attribute (see switchdev_attr). * - * @swdev_port_stp_update: Called to notify switch device port of bridge - * port STP state change. + * @switchdev_port_obj_add: Add an object to port (see switchdev_obj). * - * @swdev_fib_ipv4_add: Called to add/modify IPv4 route to switch device. + * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj). * - * @swdev_fib_ipv4_del: Called to delete IPv4 route from switch device. + * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj). */ -struct swdev_ops { - int (*swdev_parent_id_get)(struct net_device *dev, - struct netdev_phys_item_id *psid); - int (*swdev_port_stp_update)(struct net_device *dev, u8 state); - int (*swdev_fib_ipv4_add)(struct net_device *dev, __be32 dst, - int dst_len, struct fib_info *fi, - u8 tos, u8 type, u32 nlflags, - u32 tb_id); - int (*swdev_fib_ipv4_del)(struct net_device *dev, __be32 dst, - int dst_len, struct fib_info *fi, - u8 tos, u8 type, u32 tb_id); +struct switchdev_ops { + int (*switchdev_port_attr_get)(struct net_device *dev, + struct switchdev_attr *attr); + int (*switchdev_port_attr_set)(struct net_device *dev, + struct switchdev_attr *attr); + int (*switchdev_port_obj_add)(struct net_device *dev, + struct switchdev_obj *obj); + int (*switchdev_port_obj_del)(struct net_device *dev, + struct switchdev_obj *obj); + int (*switchdev_port_obj_dump)(struct net_device *dev, + struct switchdev_obj *obj); }; -enum netdev_switch_notifier_type { - NETDEV_SWITCH_FDB_ADD = 1, - NETDEV_SWITCH_FDB_DEL, +enum switchdev_notifier_type { + SWITCHDEV_FDB_ADD = 1, + SWITCHDEV_FDB_DEL, }; -struct netdev_switch_notifier_info { +struct switchdev_notifier_info { struct net_device *dev; }; -struct netdev_switch_notifier_fdb_info { - struct netdev_switch_notifier_info info; /* must be first */ +struct switchdev_notifier_fdb_info { + struct switchdev_notifier_info info; /* must be first */ const unsigned char *addr; u16 vid; }; static inline struct net_device * -netdev_switch_notifier_info_to_dev(const struct netdev_switch_notifier_info *info) +switchdev_notifier_info_to_dev(const struct switchdev_notifier_info *info) { return info->dev; } #ifdef CONFIG_NET_SWITCHDEV -int netdev_switch_parent_id_get(struct net_device *dev, - struct netdev_phys_item_id *psid); -int netdev_switch_port_stp_update(struct net_device *dev, u8 state); -int register_netdev_switch_notifier(struct notifier_block *nb); -int unregister_netdev_switch_notifier(struct notifier_block *nb); -int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev, - struct netdev_switch_notifier_info *info); -int netdev_switch_port_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags); -int netdev_switch_port_bridge_dellink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags); -int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags); -int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags); -int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, - u8 tos, u8 type, u32 nlflags, u32 tb_id); -int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, - u8 tos, u8 type, u32 tb_id); -void netdev_switch_fib_ipv4_abort(struct fib_info *fi); +int switchdev_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr); +int switchdev_port_attr_set(struct net_device *dev, + struct switchdev_attr *attr); +int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj); +int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj); +int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj); +int register_switchdev_notifier(struct notifier_block *nb); +int unregister_switchdev_notifier(struct notifier_block *nb); +int call_switchdev_notifiers(unsigned long val, struct net_device *dev, + struct switchdev_notifier_info *info); +int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags); +int switchdev_port_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, u16 flags); +int switchdev_port_bridge_dellink(struct net_device *dev, + struct nlmsghdr *nlh, u16 flags); +int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 nlflags, u32 tb_id); +int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi, + u8 tos, u8 type, u32 tb_id); +void switchdev_fib_ipv4_abort(struct fib_info *fi); +int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 vid, u16 nlm_flags); +int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, const unsigned char *addr, + u16 vid); +int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, int idx); #else -static inline int netdev_switch_parent_id_get(struct net_device *dev, - struct netdev_phys_item_id *psid) +static inline int switchdev_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + return -EOPNOTSUPP; +} + +static inline int switchdev_port_attr_set(struct net_device *dev, + struct switchdev_attr *attr) +{ + return -EOPNOTSUPP; +} + +static inline int switchdev_port_obj_add(struct net_device *dev, + struct switchdev_obj *obj) { return -EOPNOTSUPP; } -static inline int netdev_switch_port_stp_update(struct net_device *dev, - u8 state) +static inline int switchdev_port_obj_del(struct net_device *dev, + struct switchdev_obj *obj) { return -EOPNOTSUPP; } -static inline int register_netdev_switch_notifier(struct notifier_block *nb) +static inline int switchdev_port_obj_dump(struct net_device *dev, + struct switchdev_obj *obj) +{ + return -EOPNOTSUPP; +} + +static inline int register_switchdev_notifier(struct notifier_block *nb) { return 0; } -static inline int unregister_netdev_switch_notifier(struct notifier_block *nb) +static inline int unregister_switchdev_notifier(struct notifier_block *nb) { return 0; } -static inline int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev, - struct netdev_switch_notifier_info *info) +static inline int call_switchdev_notifiers(unsigned long val, + struct net_device *dev, + struct switchdev_notifier_info *info) { return NOTIFY_DONE; } -static inline int netdev_switch_port_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, - u16 flags) +static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, + u32 seq, struct net_device *dev, + u32 filter_mask, int nlflags) { return -EOPNOTSUPP; } -static inline int netdev_switch_port_bridge_dellink(struct net_device *dev, - struct nlmsghdr *nlh, - u16 flags) +static inline int switchdev_port_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags) { return -EOPNOTSUPP; } -static inline int ndo_dflt_netdev_switch_port_bridge_dellink(struct net_device *dev, - struct nlmsghdr *nlh, - u16 flags) +static inline int switchdev_port_bridge_dellink(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags) { - return 0; + return -EOPNOTSUPP; } -static inline int ndo_dflt_netdev_switch_port_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, - u16 flags) +static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, + u32 nlflags, u32 tb_id) { return 0; } -static inline int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, - struct fib_info *fi, - u8 tos, u8 type, - u32 nlflags, u32 tb_id) +static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len, + struct fib_info *fi, + u8 tos, u8 type, u32 tb_id) { return 0; } -static inline int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, - struct fib_info *fi, - u8 tos, u8 type, u32 tb_id) +static inline void switchdev_fib_ipv4_abort(struct fib_info *fi) { - return 0; } -static inline void netdev_switch_fib_ipv4_abort(struct fib_info *fi) +static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, u16 nlm_flags) { + return -EOPNOTSUPP; +} + +static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + return -EOPNOTSUPP; +} + +static inline int switchdev_port_fdb_dump(struct sk_buff *skb, + struct netlink_callback *cb, + struct net_device *dev, + struct net_device *filter_dev, + int idx) +{ + return -EOPNOTSUPP; } #endif diff --git a/include/net/tcp.h b/include/net/tcp.h index 051dc5c2802d..978cebedd3fc 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -286,6 +286,14 @@ extern atomic_long_t tcp_memory_allocated; extern struct percpu_counter tcp_sockets_allocated; extern int tcp_memory_pressure; +/* optimized version of sk_under_memory_pressure() for TCP sockets */ +static inline bool tcp_under_memory_pressure(const struct sock *sk) +{ + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) + return !!sk->sk_cgrp->memory_pressure; + + return tcp_memory_pressure; +} /* * The next routines deal with comparing 32 bit unsigned ints * and worry about wraparound (automatic with unsigned arithmetic). @@ -311,6 +319,8 @@ static inline bool tcp_out_of_memory(struct sock *sk) return false; } +void sk_forced_mem_schedule(struct sock *sk, int size); + static inline bool tcp_too_many_orphans(struct sock *sk, int shift) { struct percpu_counter *ocp = sk->sk_prot->orphan_count; @@ -326,18 +336,6 @@ static inline bool tcp_too_many_orphans(struct sock *sk, int shift) bool tcp_check_oom(struct sock *sk, int shift); -/* syncookies: remember time of last synqueue overflow */ -static inline void tcp_synq_overflow(struct sock *sk) -{ - tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies; -} - -/* syncookies: no recent synqueue overflow on this listening socket? */ -static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) -{ - unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; - return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK); -} extern struct proto tcp_prot; @@ -471,6 +469,9 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ +struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, + struct dst_entry *dst); int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, u32 cookie); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); @@ -483,13 +484,35 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if * the counter advances immediately after a cookie is generated). */ -#define MAX_SYNCOOKIE_AGE 2 +#define MAX_SYNCOOKIE_AGE 2 +#define TCP_SYNCOOKIE_PERIOD (60 * HZ) +#define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD) + +/* syncookies: remember time of last synqueue overflow + * But do not dirty this field too often (once per second is enough) + */ +static inline void tcp_synq_overflow(struct sock *sk) +{ + unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + unsigned long now = jiffies; + + if (time_after(now, last_overflow + HZ)) + tcp_sk(sk)->rx_opt.ts_recent_stamp = now; +} + +/* syncookies: no recent synqueue overflow on this listening socket? */ +static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) +{ + unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; + + return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); +} static inline u32 tcp_cookie_time(void) { u64 val = get_jiffies_64(); - do_div(val, 60 * HZ); + do_div(val, TCP_SYNCOOKIE_PERIOD); return val; } @@ -527,7 +550,7 @@ int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t); void tcp_send_probe0(struct sock *); void tcp_send_partial(struct sock *); -int tcp_write_wakeup(struct sock *); +int tcp_write_wakeup(struct sock *, int mib); void tcp_send_fin(struct sock *sk); void tcp_send_active_reset(struct sock *sk, gfp_t priority); int tcp_send_synack(struct sock *); @@ -576,7 +599,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) } /* tcp.c */ -void tcp_get_info(const struct sock *, struct tcp_info *); +void tcp_get_info(struct sock *, struct tcp_info *); /* Read 'sendfile()'-style from a TCP socket */ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, @@ -692,6 +715,8 @@ static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) #define TCPHDR_ECE 0x40 #define TCPHDR_CWR 0x80 +#define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR) + /* This is what the send packet queuing engine uses to pass * TCP per-packet control information to the transmission code. * We also store the host-order sequence numbers in here too. @@ -804,6 +829,8 @@ enum tcp_ca_ack_event_flags { /* Requires ECN/ECT set on all packets */ #define TCP_CONG_NEEDS_ECN 0x2 +union tcp_cc_info; + struct tcp_congestion_ops { struct list_head list; u32 key; @@ -829,7 +856,8 @@ struct tcp_congestion_ops { /* hook for packet ack accounting (optional) */ void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us); /* get info for inet_diag (optional) */ - int (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb); + size_t (*get_info)(struct sock *sk, u32 ext, int *attr, + union tcp_cc_info *info); char name[TCP_CA_NAME_MAX]; struct module *owner; @@ -1040,14 +1068,31 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk) return tp->is_cwnd_limited; } -static inline void tcp_check_probe_timer(struct sock *sk) +/* Something is really bad, we could not queue an additional packet, + * because qdisc is full or receiver sent a 0 window. + * We do not want to add fuel to the fire, or abort too early, + * so make sure the timer we arm now is at least 200ms in the future, + * regardless of current icsk_rto value (as it could be ~2ms) + */ +static inline unsigned long tcp_probe0_base(const struct sock *sk) { - const struct tcp_sock *tp = tcp_sk(sk); - const struct inet_connection_sock *icsk = inet_csk(sk); + return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); +} + +/* Variant of inet_csk_rto_backoff() used for zero window probes */ +static inline unsigned long tcp_probe0_when(const struct sock *sk, + unsigned long max_when) +{ + u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; + + return (unsigned long)min_t(u64, when, max_when); +} - if (!tp->packets_out && !icsk->icsk_pending) +static inline void tcp_check_probe_timer(struct sock *sk) +{ + if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, - icsk->icsk_rto, TCP_RTO_MAX); + tcp_probe0_base(sk), TCP_RTO_MAX); } static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) |