summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/Kconfig7
-rw-r--r--net/atm/clip.c6
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/bluetooth/6lowpan.c229
-rw-r--r--net/bluetooth/af_bluetooth.c3
-rw-r--r--net/bluetooth/amp.c13
-rw-r--r--net/bluetooth/hci_conn.c92
-rw-r--r--net/bluetooth/hci_core.c122
-rw-r--r--net/bluetooth/hci_event.c44
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/bluetooth/l2cap_core.c406
-rw-r--r--net/bluetooth/l2cap_sock.c23
-rw-r--r--net/bluetooth/lib.c14
-rw-r--r--net/bluetooth/mgmt.c161
-rw-r--r--net/bluetooth/smp.c903
-rw-r--r--net/bluetooth/smp.h20
-rw-r--r--net/bridge/Makefile4
-rw-r--r--net/bridge/br.c14
-rw-r--r--net/bridge/br_device.c12
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/bridge/br_if.c20
-rw-r--r--net/bridge/br_input.c1
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/br_netfilter.c121
-rw-r--r--net/bridge/br_netlink.c116
-rw-r--r--net/bridge/br_nf_core.c96
-rw-r--r--net/bridge/br_private.h36
-rw-r--r--net/bridge/br_stp.c15
-rw-r--r--net/bridge/br_stp_if.c4
-rw-r--r--net/bridge/br_stp_timer.c4
-rw-r--r--net/bridge/br_sysfs_br.c21
-rw-r--r--net/bridge/br_vlan.c147
-rw-r--r--net/bridge/netfilter/ebtables.c15
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c2
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c95
-rw-r--r--net/core/dev.c459
-rw-r--r--net/core/dev_ioctl.c7
-rw-r--r--net/core/ethtool.c82
-rw-r--r--net/core/filter.c45
-rw-r--r--net/core/flow_dissector.c115
-rw-r--r--net/core/gen_estimator.c29
-rw-r--r--net/core/gen_stats.c112
-rw-r--r--net/core/net_namespace.c2
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/core/pktgen.c76
-rw-r--r--net/core/rtnetlink.c66
-rw-r--r--net/core/secure_seq.c6
-rw-r--r--net/core/skbuff.c395
-rw-r--r--net/core/sock.c110
-rw-r--r--net/core/timestamping.c43
-rw-r--r--net/core/utils.c12
-rw-r--r--net/dccp/ccid.c2
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/decnet/af_decnet.c3
-rw-r--r--net/decnet/dn_dev.c3
-rw-r--r--net/decnet/dn_timer.c3
-rw-r--r--net/dsa/Kconfig3
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c186
-rw-r--r--net/dsa/dsa_priv.h29
-rw-r--r--net/dsa/slave.c304
-rw-r--r--net/dsa/tag_brcm.c171
-rw-r--r--net/dsa/tag_dsa.c9
-rw-r--r--net/dsa/tag_edsa.c9
-rw-r--r--net/dsa/tag_trailer.c9
-rw-r--r--net/ethernet/eth.c34
-rw-r--r--net/ieee802154/6lowpan_rtnl.c127
-rw-r--r--net/ieee802154/reassembly.c4
-rw-r--r--net/ipv4/Kconfig51
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/af_inet.c47
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c6
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/fib_frontend.c14
-rw-r--r--net/ipv4/fib_semantics.c8
-rw-r--r--net/ipv4/fou.c514
-rw-r--r--net/ipv4/geneve.c373
-rw-r--r--net/ipv4/gre_demux.c9
-rw-r--r--net/ipv4/gre_offload.c55
-rw-r--r--net/ipv4/icmp.c64
-rw-r--r--net/ipv4/igmp.c35
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inetpeer.c21
-rw-r--r--net/ipv4/ip_fragment.c4
-rw-r--r--net/ipv4/ip_gre.c94
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/ip_output.c10
-rw-r--r--net/ipv4/ip_sockglue.c19
-rw-r--r--net/ipv4/ip_tunnel.c106
-rw-r--r--net/ipv4/ip_vti.c2
-rw-r--r--net/ipv4/ipconfig.c3
-rw-r--r--net/ipv4/ipip.c82
-rw-r--r--net/ipv4/netfilter/Kconfig39
-rw-r--r--net/ipv4/netfilter/Makefile5
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c2
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c108
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c2
-rw-r--r--net/ipv4/netfilter/iptable_nat.c233
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_l3proto_ipv4.c199
-rw-r--r--net/ipv4/netfilter/nf_nat_masquerade_ipv4.c153
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c127
-rw-r--r--net/ipv4/netfilter/nft_chain_nat_ipv4.c157
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c77
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c1
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/protocol.c1
-rw-r--r--net/ipv4/route.c14
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c40
-rw-r--r--net/ipv4/tcp.c36
-rw-r--r--net/ipv4/tcp_bic.c11
-rw-r--r--net/ipv4/tcp_cong.c55
-rw-r--r--net/ipv4/tcp_cubic.c18
-rw-r--r--net/ipv4/tcp_dctcp.c344
-rw-r--r--net/ipv4/tcp_diag.c5
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_highspeed.c145
-rw-r--r--net/ipv4/tcp_htcp.c6
-rw-r--r--net/ipv4/tcp_hybla.c1
-rw-r--r--net/ipv4/tcp_illinois.c3
-rw-r--r--net/ipv4/tcp_input.c285
-rw-r--r--net/ipv4/tcp_ipv4.c68
-rw-r--r--net/ipv4/tcp_minisocks.c13
-rw-r--r--net/ipv4/tcp_offload.c72
-rw-r--r--net/ipv4/tcp_output.c124
-rw-r--r--net/ipv4/tcp_probe.c6
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/tcp_timer.c52
-rw-r--r--net/ipv4/tcp_vegas.c3
-rw-r--r--net/ipv4/tcp_veno.c1
-rw-r--r--net/ipv4/tcp_westwood.c35
-rw-r--r--net/ipv4/tcp_yeah.c9
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv4/udp_offload.c171
-rw-r--r--net/ipv4/udp_tunnel.c138
-rw-r--r--net/ipv6/Makefile4
-rw-r--r--net/ipv6/addrconf.c17
-rw-r--r--net/ipv6/af_inet6.c20
-rw-r--r--net/ipv6/ah6.c23
-rw-r--r--net/ipv6/anycast.c108
-rw-r--r--net/ipv6/datagram.c23
-rw-r--r--net/ipv6/esp6.c15
-rw-r--r--net/ipv6/exthdrs.c2
-rw-r--r--net/ipv6/icmp.c34
-rw-r--r--net/ipv6/inet6_connection_sock.c6
-rw-r--r--net/ipv6/inet6_hashtables.c7
-rw-r--r--net/ipv6/ip6_fib.c142
-rw-r--r--net/ipv6/ip6_flowlabel.c19
-rw-r--r--net/ipv6/ip6_gre.c10
-rw-r--r--net/ipv6/ip6_icmp.c2
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c34
-rw-r--r--net/ipv6/ip6_output.c27
-rw-r--r--net/ipv6/ip6_tunnel.c34
-rw-r--r--net/ipv6/ip6_udp_tunnel.c107
-rw-r--r--net/ipv6/ip6_vti.c2
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ipcomp6.c6
-rw-r--r--net/ipv6/ipv6_sockglue.c26
-rw-r--r--net/ipv6/mcast.c302
-rw-r--r--net/ipv6/mip6.c10
-rw-r--r--net/ipv6/ndisc.c17
-rw-r--r--net/ipv6/netfilter/Kconfig43
-rw-r--r--net/ipv6/netfilter/Makefile5
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c76
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c233
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c2
-rw-r--r--net/ipv6/netfilter/nf_nat_l3proto_ipv6.c199
-rw-r--r--net/ipv6/netfilter/nf_nat_masquerade_ipv6.c120
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c163
-rw-r--r--net/ipv6/netfilter/nft_chain_nat_ipv6.c165
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c77
-rw-r--r--net/ipv6/output_core.c2
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/protocol.c1
-rw-r--r--net/ipv6/raw.c8
-rw-r--r--net/ipv6/reassembly.c12
-rw-r--r--net/ipv6/route.c22
-rw-r--r--net/ipv6/sit.c123
-rw-r--r--net/ipv6/syncookies.c4
-rw-r--r--net/ipv6/sysctl_net_ipv6.c10
-rw-r--r--net/ipv6/tcp_ipv6.c32
-rw-r--r--net/ipv6/tcpv6_offload.c69
-rw-r--r--net/ipv6/tunnel6.c4
-rw-r--r--net/ipv6/udp.c26
-rw-r--r--net/ipv6/udp_offload.c92
-rw-r--r--net/ipv6/xfrm6_input.c6
-rw-r--r--net/ipv6/xfrm6_output.c1
-rw-r--r--net/ipv6/xfrm6_policy.c22
-rw-r--r--net/ipv6/xfrm6_state.c14
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
-rw-r--r--net/irda/irlan/irlan_common.c4
-rw-r--r--net/iucv/iucv.c9
-rw-r--r--net/l2tp/l2tp_core.c24
-rw-r--r--net/mac80211/agg-rx.c5
-rw-r--r--net/mac80211/cfg.c114
-rw-r--r--net/mac80211/chan.c191
-rw-r--r--net/mac80211/debugfs.c6
-rw-r--r--net/mac80211/debugfs_netdev.c4
-rw-r--r--net/mac80211/debugfs_sta.c4
-rw-r--r--net/mac80211/driver-ops.h2
-rw-r--r--net/mac80211/ibss.c3
-rw-r--r--net/mac80211/ieee80211_i.h9
-rw-r--r--net/mac80211/iface.c15
-rw-r--r--net/mac80211/key.c15
-rw-r--r--net/mac80211/main.c1
-rw-r--r--net/mac80211/mesh_pathtbl.c4
-rw-r--r--net/mac80211/mesh_plink.c14
-rw-r--r--net/mac80211/mlme.c162
-rw-r--r--net/mac80211/rc80211_minstrel.c98
-rw-r--r--net/mac80211/rc80211_minstrel.h43
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c19
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c303
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h41
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c10
-rw-r--r--net/mac80211/rx.c13
-rw-r--r--net/mac80211/scan.c3
-rw-r--r--net/mac80211/sta_info.c5
-rw-r--r--net/mac80211/sta_info.h9
-rw-r--r--net/mac80211/status.c22
-rw-r--r--net/mac80211/tdls.c7
-rw-r--r--net/mac80211/trace.h4
-rw-r--r--net/mac80211/tx.c32
-rw-r--r--net/mac80211/util.c26
-rw-r--r--net/mac80211/wme.c5
-rw-r--r--net/mac80211/wpa.c7
-rw-r--r--net/mac802154/rx.c5
-rw-r--r--net/mac802154/tx.c15
-rw-r--r--net/mac802154/wpan.c10
-rw-r--r--net/mpls/mpls_gso.c7
-rw-r--r--net/netfilter/Kconfig9
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipset/Kconfig9
-rw-r--r--net/netfilter/ipset/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_gen.h4
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c15
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c15
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c15
-rw-r--r--net/netfilter/ipset/ip_set_core.c53
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h30
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmark.c14
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c22
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c14
-rw-r--r--net/netfilter/ipset/ip_set_hash_mac.c173
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c16
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c20
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c29
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c16
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c22
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c23
-rw-r--r--net/netfilter/ipvs/Kconfig10
-rw-r--r--net/netfilter/ipvs/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c74
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c223
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_fo.c79
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c388
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c26
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_log_common.c2
-rw-r--r--net/netfilter/nf_nat_core.c5
-rw-r--r--net/netfilter/nf_queue.c4
-rw-r--r--net/netfilter/nf_tables_api.c601
-rw-r--r--net/netfilter/nfnetlink.c6
-rw-r--r--net/netfilter/nfnetlink_acct.c54
-rw-r--r--net/netfilter/nfnetlink_log.c8
-rw-r--r--net/netfilter/nfnetlink_queue_core.c12
-rw-r--r--net/netfilter/nft_compat.c116
-rw-r--r--net/netfilter/nft_masq.c59
-rw-r--r--net/netfilter/nft_meta.c45
-rw-r--r--net/netfilter/nft_nat.c16
-rw-r--r--net/netfilter/nft_reject.c37
-rw-r--r--net/netfilter/nft_reject_inet.c94
-rw-r--r--net/netfilter/x_tables.c30
-rw-r--r--net/netfilter/xt_HMARK.c2
-rw-r--r--net/netfilter/xt_RATEEST.c2
-rw-r--r--net/netfilter/xt_cluster.c3
-rw-r--r--net/netfilter/xt_connbytes.c2
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_physdev.c3
-rw-r--r--net/netfilter/xt_set.c191
-rw-r--r--net/netfilter/xt_string.c1
-rw-r--r--net/netlabel/netlabel_user.c6
-rw-r--r--net/nfc/digital_dep.c101
-rw-r--r--net/nfc/nci/core.c21
-rw-r--r--net/nfc/nci/data.c7
-rw-r--r--net/nfc/nci/ntf.c40
-rw-r--r--net/openvswitch/Kconfig11
-rw-r--r--net/openvswitch/Makefile4
-rw-r--r--net/openvswitch/actions.c261
-rw-r--r--net/openvswitch/datapath.c96
-rw-r--r--net/openvswitch/datapath.h23
-rw-r--r--net/openvswitch/flow.c123
-rw-r--r--net/openvswitch/flow.h54
-rw-r--r--net/openvswitch/flow_netlink.c292
-rw-r--r--net/openvswitch/flow_netlink.h4
-rw-r--r--net/openvswitch/vport-geneve.c235
-rw-r--r--net/openvswitch/vport-gre.c33
-rw-r--r--net/openvswitch/vport-vxlan.c27
-rw-r--r--net/openvswitch/vport.c45
-rw-r--r--net/openvswitch/vport.h14
-rw-r--r--net/packet/af_packet.c12
-rw-r--r--net/phonet/pn_dev.c6
-rw-r--r--net/rose/rose_link.c3
-rw-r--r--net/rxrpc/ar-error.c14
-rw-r--r--net/rxrpc/ar-input.c9
-rw-r--r--net/sched/act_api.c9
-rw-r--r--net/sched/act_police.c6
-rw-r--r--net/sched/cls_api.c32
-rw-r--r--net/sched/cls_basic.c89
-rw-r--r--net/sched/cls_bpf.c102
-rw-r--r--net/sched/cls_cgroup.c79
-rw-r--r--net/sched/cls_flow.c151
-rw-r--r--net/sched/cls_fw.c120
-rw-r--r--net/sched/cls_route.c241
-rw-r--r--net/sched/cls_rsvp.h208
-rw-r--r--net/sched/cls_tcindex.c273
-rw-r--r--net/sched/cls_u32.c407
-rw-r--r--net/sched/em_canid.c4
-rw-r--r--net/sched/em_ipset.c7
-rw-r--r--net/sched/em_meta.c4
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/em_text.c4
-rw-r--r--net/sched/ematch.c10
-rw-r--r--net/sched/sch_api.c65
-rw-r--r--net/sched/sch_atm.c28
-rw-r--r--net/sched/sch_cbq.c35
-rw-r--r--net/sched/sch_choke.c29
-rw-r--r--net/sched/sch_codel.c2
-rw-r--r--net/sched/sch_drr.c27
-rw-r--r--net/sched/sch_dsmark.c11
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/sched/sch_fq.c14
-rw-r--r--net/sched/sch_fq_codel.c24
-rw-r--r--net/sched/sch_generic.c82
-rw-r--r--net/sched/sch_gred.c4
-rw-r--r--net/sched/sch_hfsc.c32
-rw-r--r--net/sched/sch_hhf.c8
-rw-r--r--net/sched/sch_htb.c48
-rw-r--r--net/sched/sch_ingress.c10
-rw-r--r--net/sched/sch_mq.c6
-rw-r--r--net/sched/sch_mqprio.c20
-rw-r--r--net/sched/sch_multiq.c17
-rw-r--r--net/sched/sch_netem.c15
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sched/sch_prio.c20
-rw-r--r--net/sched/sch_qfq.c25
-rw-r--r--net/sched/sch_red.c8
-rw-r--r--net/sched/sch_sfb.c25
-rw-r--r--net/sched/sch_sfq.c35
-rw-r--r--net/sched/sch_tbf.c17
-rw-r--r--net/sched/sch_teql.c20
-rw-r--r--net/sctp/input.c8
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/socket.c7
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/bcast.c20
-rw-r--r--net/tipc/bcast.h2
-rw-r--r--net/tipc/config.c4
-rw-r--r--net/tipc/core.c9
-rw-r--r--net/tipc/core.h6
-rw-r--r--net/tipc/link.c120
-rw-r--r--net/tipc/link.h7
-rw-r--r--net/tipc/msg.c38
-rw-r--r--net/tipc/msg.h5
-rw-r--r--net/tipc/name_distr.c140
-rw-r--r--net/tipc/name_distr.h1
-rw-r--r--net/tipc/name_table.c9
-rw-r--r--net/tipc/net.c3
-rw-r--r--net/tipc/node.c95
-rw-r--r--net/tipc/node.h8
-rw-r--r--net/tipc/port.c514
-rw-r--r--net/tipc/port.h190
-rw-r--r--net/tipc/ref.c266
-rw-r--r--net/tipc/ref.h48
-rw-r--r--net/tipc/socket.c884
-rw-r--r--net/tipc/socket.h55
-rw-r--r--net/tipc/subscr.c1
-rw-r--r--net/tipc/sysctl.c7
-rw-r--r--net/unix/garbage.c2
-rw-r--r--net/wimax/id-table.c2
-rw-r--r--net/wimax/op-msg.c9
-rw-r--r--net/wimax/op-reset.c3
-rw-r--r--net/wimax/op-rfkill.c3
-rw-r--r--net/wimax/op-state-get.c3
-rw-r--r--net/wimax/stack.c7
-rw-r--r--net/wimax/wimax-internal.h6
-rw-r--r--net/wireless/chan.c1
-rw-r--r--net/wireless/core.c16
-rw-r--r--net/wireless/ibss.c4
-rw-r--r--net/wireless/mlme.c8
-rw-r--r--net/wireless/nl80211.c249
-rw-r--r--net/wireless/nl80211.h3
-rw-r--r--net/wireless/rdev-ops.h31
-rw-r--r--net/wireless/reg.c82
-rw-r--r--net/wireless/scan.c22
-rw-r--r--net/wireless/sme.c6
-rw-r--r--net/wireless/trace.h45
-rw-r--r--net/wireless/util.c3
-rw-r--r--net/wireless/wext-compat.c2
-rw-r--r--net/wireless/wext-sme.c2
-rw-r--r--net/xfrm/xfrm_hash.h76
-rw-r--r--net/xfrm/xfrm_output.c6
-rw-r--r--net/xfrm/xfrm_policy.c144
-rw-r--r--net/xfrm/xfrm_state.c13
-rw-r--r--net/xfrm/xfrm_user.c83
430 files changed, 15329 insertions, 7896 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 35a6b6b15e8a..0d441ec8763e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -799,7 +799,8 @@ void vlan_setup(struct net_device *dev)
ether_setup(dev);
dev->priv_flags |= IFF_802_1Q_VLAN;
- dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ netif_keep_dst(dev);
dev->tx_queue_len = 0;
dev->netdev_ops = &vlan_netdev_ops;
diff --git a/net/Kconfig b/net/Kconfig
index 4051fdfa4367..d6b138e2c263 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -176,10 +176,11 @@ config NETFILTER_ADVANCED
If unsure, say Y.
config BRIDGE_NETFILTER
- bool "Bridged IP/ARP packets filtering"
- depends on BRIDGE && NETFILTER && INET
+ tristate "Bridged IP/ARP packets filtering"
+ depends on BRIDGE
+ depends on NETFILTER && INET
depends on NETFILTER_ADVANCED
- default y
+ default m
---help---
Enabling this option will let arptables resp. iptables see bridged
ARP resp. IP traffic. If you want a bridging firewall, you probably
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 46339040fef0..17e55dfecbe2 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -384,7 +384,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
if (old) {
- pr_warning("XOFF->XOFF transition\n");
+ pr_warn("XOFF->XOFF transition\n");
goto out_release_neigh;
}
dev->stats.tx_packets++;
@@ -447,7 +447,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
struct rtable *rt;
if (vcc->push != clip_push) {
- pr_warning("non-CLIP VCC\n");
+ pr_warn("non-CLIP VCC\n");
return -EBADF;
}
clip_vcc = CLIP_VCC(vcc);
@@ -501,7 +501,7 @@ static void clip_setup(struct net_device *dev)
/* without any more elaborate queuing. 100 is a reasonable */
/* compromise between decent burst-tolerance and protection */
/* against memory hogs. */
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
}
static int clip_create(int number)
diff --git a/net/atm/common.c b/net/atm/common.c
index 7b491006eaf4..6a765156a3f6 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -300,7 +300,7 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
max_sdu = ATM_MAX_AAL34_PDU;
break;
default:
- pr_warning("AAL problems ... (%d)\n", aal);
+ pr_warn("AAL problems ... (%d)\n", aal);
/* fall through */
case ATM_AAL5:
max_sdu = ATM_MAX_AAL5_PDU;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index e8e0e7a8a23d..0e982222d425 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -599,7 +599,7 @@ static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
}
non_ip:
- return mpc->old_ops->ndo_start_xmit(skb, dev);
+ return __netdev_start_xmit(mpc->old_ops, skb, dev, false);
}
static int atm_mpoa_vcc_attach(struct atm_vcc *vcc, void __user *arg)
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 206b65ccd5b8..c2e0d14433df 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -39,6 +39,7 @@ static struct dentry *lowpan_control_debugfs;
struct skb_cb {
struct in6_addr addr;
+ struct in6_addr gw;
struct l2cap_chan *chan;
int status;
};
@@ -158,6 +159,54 @@ static inline struct lowpan_peer *peer_lookup_conn(struct lowpan_dev *dev,
return NULL;
}
+static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_dev *dev,
+ struct in6_addr *daddr,
+ struct sk_buff *skb)
+{
+ struct lowpan_peer *peer, *tmp;
+ struct in6_addr *nexthop;
+ struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
+ int count = atomic_read(&dev->peer_count);
+
+ BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt);
+
+ /* If we have multiple 6lowpan peers, then check where we should
+ * send the packet. If only one peer exists, then we can send the
+ * packet right away.
+ */
+ if (count == 1)
+ return list_first_entry(&dev->peers, struct lowpan_peer,
+ list);
+
+ if (!rt) {
+ nexthop = &lowpan_cb(skb)->gw;
+
+ if (ipv6_addr_any(nexthop))
+ return NULL;
+ } else {
+ nexthop = rt6_nexthop(rt);
+
+ /* We need to remember the address because it is needed
+ * by bt_xmit() when sending the packet. In bt_xmit(), the
+ * destination routing info is not set.
+ */
+ memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr));
+ }
+
+ BT_DBG("gw %pI6c", nexthop);
+
+ list_for_each_entry_safe(peer, tmp, &dev->peers, list) {
+ BT_DBG("dst addr %pMR dst type %d ip %pI6c",
+ &peer->chan->dst, peer->chan->dst_type,
+ &peer->peer_addr);
+
+ if (!ipv6_addr_cmp(&peer->peer_addr, nexthop))
+ return peer;
+ }
+
+ return NULL;
+}
+
static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn)
{
struct lowpan_dev *entry, *tmp;
@@ -377,58 +426,85 @@ static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
*addr_type = get_addr_type_from_eui64(addr->b[5]);
}
-static int header_create(struct sk_buff *skb, struct net_device *netdev,
- unsigned short type, const void *_daddr,
- const void *_saddr, unsigned int len)
+static int setup_header(struct sk_buff *skb, struct net_device *netdev,
+ bdaddr_t *peer_addr, u8 *peer_addr_type)
{
- struct ipv6hdr *hdr;
+ struct in6_addr ipv6_daddr;
struct lowpan_dev *dev;
struct lowpan_peer *peer;
bdaddr_t addr, *any = BDADDR_ANY;
- u8 *saddr, *daddr = any->b;
- u8 addr_type;
-
- if (type != ETH_P_IPV6)
- return -EINVAL;
-
- hdr = ipv6_hdr(skb);
+ u8 *daddr = any->b;
+ int err, status = 0;
dev = lowpan_dev(netdev);
- if (ipv6_addr_is_multicast(&hdr->daddr)) {
- memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
- sizeof(struct in6_addr));
+ memcpy(&ipv6_daddr, &lowpan_cb(skb)->addr, sizeof(ipv6_daddr));
+
+ if (ipv6_addr_is_multicast(&ipv6_daddr)) {
lowpan_cb(skb)->chan = NULL;
} else {
unsigned long flags;
+ u8 addr_type;
/* Get destination BT device from skb.
* If there is no such peer then discard the packet.
*/
- convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
+ convert_dest_bdaddr(&ipv6_daddr, &addr, &addr_type);
BT_DBG("dest addr %pMR type %d IP %pI6c", &addr,
- addr_type, &hdr->daddr);
+ addr_type, &ipv6_daddr);
read_lock_irqsave(&devices_lock, flags);
peer = peer_lookup_ba(dev, &addr, addr_type);
read_unlock_irqrestore(&devices_lock, flags);
if (!peer) {
- BT_DBG("no such peer %pMR found", &addr);
- return -ENOENT;
+ /* The packet might be sent to 6lowpan interface
+ * because of routing (either via default route
+ * or user set route) so get peer according to
+ * the destination address.
+ */
+ read_lock_irqsave(&devices_lock, flags);
+ peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
+ read_unlock_irqrestore(&devices_lock, flags);
+ if (!peer) {
+ BT_DBG("no such peer %pMR found", &addr);
+ return -ENOENT;
+ }
}
daddr = peer->eui64_addr;
-
- memcpy(&lowpan_cb(skb)->addr, &hdr->daddr,
- sizeof(struct in6_addr));
+ *peer_addr = addr;
+ *peer_addr_type = addr_type;
lowpan_cb(skb)->chan = peer->chan;
+
+ status = 1;
}
- saddr = dev->netdev->dev_addr;
+ lowpan_header_compress(skb, netdev, ETH_P_IPV6, daddr,
+ dev->netdev->dev_addr, skb->len);
+
+ err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
+ if (err < 0)
+ return err;
+
+ return status;
+}
+
+static int header_create(struct sk_buff *skb, struct net_device *netdev,
+ unsigned short type, const void *_daddr,
+ const void *_saddr, unsigned int len)
+{
+ struct ipv6hdr *hdr;
+
+ if (type != ETH_P_IPV6)
+ return -EINVAL;
+
+ hdr = ipv6_hdr(skb);
+
+ memcpy(&lowpan_cb(skb)->addr, &hdr->daddr, sizeof(struct in6_addr));
- return lowpan_header_compress(skb, netdev, type, daddr, saddr, len);
+ return 0;
}
/* Packet to BT LE device */
@@ -470,11 +546,12 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
return err;
}
-static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
+static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
{
struct sk_buff *local_skb;
struct lowpan_dev *entry, *tmp;
unsigned long flags;
+ int err = 0;
read_lock_irqsave(&devices_lock, flags);
@@ -488,55 +565,77 @@ static void send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev)
dev = lowpan_dev(entry->netdev);
list_for_each_entry_safe(pentry, ptmp, &dev->peers, list) {
+ int ret;
+
local_skb = skb_clone(skb, GFP_ATOMIC);
- send_pkt(pentry->chan, local_skb, netdev);
+ BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
+ netdev->name,
+ &pentry->chan->dst, pentry->chan->dst_type,
+ &pentry->peer_addr, pentry->chan);
+ ret = send_pkt(pentry->chan, local_skb, netdev);
+ if (ret < 0)
+ err = ret;
kfree_skb(local_skb);
}
}
read_unlock_irqrestore(&devices_lock, flags);
+
+ return err;
}
static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
{
int err = 0;
- struct lowpan_dev *dev;
- struct lowpan_peer *peer;
bdaddr_t addr;
u8 addr_type;
+ struct sk_buff *tmpskb;
- if (ipv6_addr_is_multicast(&lowpan_cb(skb)->addr)) {
- /* We need to send the packet to every device
- * behind this interface.
- */
- send_mcast_pkt(skb, netdev);
- } else {
- unsigned long flags;
-
- convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
- dev = lowpan_dev(netdev);
-
- read_lock_irqsave(&devices_lock, flags);
- peer = peer_lookup_ba(dev, &addr, addr_type);
- read_unlock_irqrestore(&devices_lock, flags);
+ /* We must take a copy of the skb before we modify/replace the ipv6
+ * header as the header could be used elsewhere
+ */
+ tmpskb = skb_unshare(skb, GFP_ATOMIC);
+ if (!tmpskb) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+ skb = tmpskb;
- BT_DBG("xmit %s to %pMR type %d IP %pI6c peer %p",
- netdev->name, &addr, addr_type,
- &lowpan_cb(skb)->addr, peer);
+ /* Return values from setup_header()
+ * <0 - error, packet is dropped
+ * 0 - this is a multicast packet
+ * 1 - this is unicast packet
+ */
+ err = setup_header(skb, netdev, &addr, &addr_type);
+ if (err < 0) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
- if (peer && peer->chan)
- err = send_pkt(peer->chan, skb, netdev);
- else
+ if (err) {
+ if (lowpan_cb(skb)->chan) {
+ BT_DBG("xmit %s to %pMR type %d IP %pI6c chan %p",
+ netdev->name, &addr, addr_type,
+ &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan);
+ err = send_pkt(lowpan_cb(skb)->chan, skb, netdev);
+ } else {
err = -ENOENT;
+ }
+ } else {
+ /* We need to send the packet to every device behind this
+ * interface.
+ */
+ err = send_mcast_pkt(skb, netdev);
}
+
dev_kfree_skb(skb);
if (err)
BT_DBG("ERROR: xmit failed (%d)", err);
- return (err < 0) ? NET_XMIT_DROP : err;
+ return err < 0 ? NET_XMIT_DROP : err;
}
static const struct net_device_ops netdev_ops = {
@@ -556,7 +655,8 @@ static void netdev_setup(struct net_device *dev)
dev->needed_tailroom = 0;
dev->mtu = IPV6_MIN_MTU;
dev->tx_queue_len = 0;
- dev->flags = IFF_RUNNING | IFF_POINTOPOINT;
+ dev->flags = IFF_RUNNING | IFF_POINTOPOINT |
+ IFF_MULTICAST;
dev->watchdog_timeo = 0;
dev->netdev_ops = &netdev_ops;
@@ -671,6 +771,14 @@ static struct l2cap_chan *chan_open(struct l2cap_chan *pchan)
return chan;
}
+static void set_ip_addr_bits(u8 addr_type, u8 *addr)
+{
+ if (addr_type == BDADDR_LE_PUBLIC)
+ *addr |= 0x02;
+ else
+ *addr &= ~0x02;
+}
+
static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
struct lowpan_dev *dev)
{
@@ -693,6 +801,11 @@ static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan,
memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
EUI64_ADDR_LEN);
+ /* IPv6 address needs to have the U/L bit set properly so toggle
+ * it back here.
+ */
+ set_ip_addr_bits(chan->dst_type, (u8 *)&peer->peer_addr.s6_addr + 8);
+
write_lock_irqsave(&devices_lock, flags);
INIT_LIST_HEAD(&peer->list);
peer_add(dev, peer);
@@ -772,16 +885,16 @@ static inline void chan_ready_cb(struct l2cap_chan *chan)
ifup(dev->netdev);
}
-static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *chan)
+static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan)
{
- struct l2cap_chan *pchan;
+ struct l2cap_chan *chan;
- pchan = chan_open(chan);
- pchan->ops = chan->ops;
+ chan = chan_open(pchan);
+ chan->ops = pchan->ops;
BT_DBG("chan %p pchan %p", chan, pchan);
- return pchan;
+ return chan;
}
static void delete_netdev(struct work_struct *work)
@@ -876,6 +989,9 @@ static void chan_suspend_cb(struct l2cap_chan *chan)
BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
+ if (!skb)
+ return;
+
lowpan_cb(skb)->status = -EAGAIN;
}
@@ -885,12 +1001,15 @@ static void chan_resume_cb(struct l2cap_chan *chan)
BT_DBG("chan %p conn %p skb %p", chan, chan->conn, skb);
+ if (!skb)
+ return;
+
lowpan_cb(skb)->status = 0;
}
static long chan_get_sndtimeo_cb(struct l2cap_chan *chan)
{
- return msecs_to_jiffies(1000);
+ return L2CAP_CONN_TIMEOUT;
}
static const struct l2cap_ops bt_6lowpan_chan_ops = {
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 4dca0299ed96..339c74ad4553 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -709,8 +709,11 @@ EXPORT_SYMBOL_GPL(bt_debugfs);
static int __init bt_init(void)
{
+ struct sk_buff *skb;
int err;
+ BUILD_BUG_ON(sizeof(struct bt_skb_cb) > sizeof(skb->cb));
+
BT_INFO("Core ver %s", VERSION);
bt_debugfs = debugfs_create_dir("bluetooth", NULL);
diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c
index 016cdb66df6c..2640d78f30b8 100644
--- a/net/bluetooth/amp.c
+++ b/net/bluetooth/amp.c
@@ -149,15 +149,14 @@ static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output)
if (ret) {
BT_DBG("crypto_ahash_setkey failed: err %d", ret);
} else {
- struct {
- struct shash_desc shash;
- char ctx[crypto_shash_descsize(tfm)];
- } desc;
+ char desc[sizeof(struct shash_desc) +
+ crypto_shash_descsize(tfm)] CRYPTO_MINALIGN_ATTR;
+ struct shash_desc *shash = (struct shash_desc *)desc;
- desc.shash.tfm = tfm;
- desc.shash.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ shash->tfm = tfm;
+ shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- ret = crypto_shash_digest(&desc.shash, plaintext, psize,
+ ret = crypto_shash_digest(shash, plaintext, psize,
output);
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index faff6247ac8f..b9517bd17190 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -36,19 +36,25 @@
struct sco_param {
u16 pkt_type;
u16 max_latency;
+ u8 retrans_effort;
+};
+
+static const struct sco_param esco_param_cvsd[] = {
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
+ { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
+ { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
+ { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
};
static const struct sco_param sco_param_cvsd[] = {
- { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
- { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
- { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
- { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
- { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
+ { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
+ { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
};
-static const struct sco_param sco_param_wideband[] = {
- { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
- { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
+static const struct sco_param esco_param_msbc[] = {
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
+ { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
};
static void hci_le_create_connection_cancel(struct hci_conn *conn)
@@ -116,23 +122,36 @@ static void hci_reject_sco(struct hci_conn *conn)
{
struct hci_cp_reject_sync_conn_req cp;
- cp.reason = HCI_ERROR_REMOTE_USER_TERM;
+ cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
bacpy(&cp.bdaddr, &conn->dst);
hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
}
-void hci_disconnect(struct hci_conn *conn, __u8 reason)
+int hci_disconnect(struct hci_conn *conn, __u8 reason)
{
struct hci_cp_disconnect cp;
BT_DBG("hcon %p", conn);
+ /* When we are master of an established connection and it enters
+ * the disconnect timeout, then go ahead and try to read the
+ * current clock offset. Processing of the result is done
+ * within the event handling and hci_clock_offset_evt function.
+ */
+ if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
+ struct hci_dev *hdev = conn->hdev;
+ struct hci_cp_read_clock_offset cp;
+
+ cp.handle = cpu_to_le16(conn->handle);
+ hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(cp), &cp);
+ }
+
conn->state = BT_DISCONN;
cp.handle = cpu_to_le16(conn->handle);
cp.reason = reason;
- hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
+ return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
}
static void hci_amp_disconn(struct hci_conn *conn)
@@ -188,21 +207,26 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
switch (conn->setting & SCO_AIRMODE_MASK) {
case SCO_AIRMODE_TRANSP:
- if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
+ if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
return false;
- cp.retrans_effort = 0x02;
- param = &sco_param_wideband[conn->attempt - 1];
+ param = &esco_param_msbc[conn->attempt - 1];
break;
case SCO_AIRMODE_CVSD:
- if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
- return false;
- cp.retrans_effort = 0x01;
- param = &sco_param_cvsd[conn->attempt - 1];
+ if (lmp_esco_capable(conn->link)) {
+ if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
+ return false;
+ param = &esco_param_cvsd[conn->attempt - 1];
+ } else {
+ if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
+ return false;
+ param = &sco_param_cvsd[conn->attempt - 1];
+ }
break;
default:
return false;
}
+ cp.retrans_effort = param->retrans_effort;
cp.pkt_type = __cpu_to_le16(param->pkt_type);
cp.max_latency = __cpu_to_le16(param->max_latency);
@@ -325,25 +349,6 @@ static void hci_conn_timeout(struct work_struct *work)
hci_amp_disconn(conn);
} else {
__u8 reason = hci_proto_disconn_ind(conn);
-
- /* When we are master of an established connection
- * and it enters the disconnect timeout, then go
- * ahead and try to read the current clock offset.
- *
- * Processing of the result is done within the
- * event handling and hci_clock_offset_evt function.
- */
- if (conn->type == ACL_LINK &&
- conn->role == HCI_ROLE_MASTER) {
- struct hci_dev *hdev = conn->hdev;
- struct hci_cp_read_clock_offset cp;
-
- cp.handle = cpu_to_le16(conn->handle);
-
- hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
- sizeof(cp), &cp);
- }
-
hci_disconnect(conn, reason);
}
break;
@@ -595,6 +600,7 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
conn->dst_type);
if (params && params->conn) {
hci_conn_drop(params->conn);
+ hci_conn_put(params->conn);
params->conn = NULL;
}
@@ -1290,11 +1296,16 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
BT_DBG("%s hcon %p", hdev->name, conn);
+ if (test_bit(HCI_CONN_DROP, &conn->flags)) {
+ BT_DBG("Refusing to create new hci_chan");
+ return NULL;
+ }
+
chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan)
return NULL;
- chan->conn = conn;
+ chan->conn = hci_conn_get(conn);
skb_queue_head_init(&chan->data_q);
chan->state = BT_CONNECTED;
@@ -1314,7 +1325,10 @@ void hci_chan_del(struct hci_chan *chan)
synchronize_rcu();
- hci_conn_drop(conn);
+ /* Prevent new hci_chan's to be created for this hci_conn */
+ set_bit(HCI_CONN_DROP, &conn->flags);
+
+ hci_conn_put(conn);
skb_queue_purge(&chan->data_q);
kfree(chan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 1d9c29a00568..cb05d7f16a34 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1898,6 +1898,8 @@ static int __hci_init(struct hci_dev *hdev)
debugfs_create_u16("discov_interleaved_timeout", 0644,
hdev->debugfs,
&hdev->discov_interleaved_timeout);
+
+ smp_register(hdev);
}
return 0;
@@ -2539,6 +2541,7 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)
list_for_each_entry(p, &hdev->le_conn_params, list) {
if (p->conn) {
hci_conn_drop(p->conn);
+ hci_conn_put(p->conn);
p->conn = NULL;
}
list_del_init(&p->action);
@@ -3238,7 +3241,7 @@ struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
}
list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
- if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
+ if (smp_irk_matches(hdev, irk->val, rpa)) {
bacpy(&irk->rpa, rpa);
return irk;
}
@@ -3723,6 +3726,18 @@ int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
return 0;
}
+static void hci_conn_params_free(struct hci_conn_params *params)
+{
+ if (params->conn) {
+ hci_conn_drop(params->conn);
+ hci_conn_put(params->conn);
+ }
+
+ list_del(&params->action);
+ list_del(&params->list);
+ kfree(params);
+}
+
/* This function requires the caller holds hdev->lock */
void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
{
@@ -3732,12 +3747,7 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
if (!params)
return;
- if (params->conn)
- hci_conn_drop(params->conn);
-
- list_del(&params->action);
- list_del(&params->list);
- kfree(params);
+ hci_conn_params_free(params);
hci_update_background_scan(hdev);
@@ -3764,13 +3774,8 @@ void hci_conn_params_clear_all(struct hci_dev *hdev)
{
struct hci_conn_params *params, *tmp;
- list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
- if (params->conn)
- hci_conn_drop(params->conn);
- list_del(&params->action);
- list_del(&params->list);
- kfree(params);
- }
+ list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
+ hci_conn_params_free(params);
hci_update_background_scan(hdev);
@@ -3867,6 +3872,7 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
BT_DBG("Deferring random address update");
+ set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
return;
}
@@ -3892,7 +3898,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
!bacmp(&hdev->random_addr, &hdev->rpa))
return 0;
- err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
+ err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
if (err < 0) {
BT_ERR("%s failed to generate new RPA", hdev->name);
return err;
@@ -4100,18 +4106,9 @@ int hci_register_dev(struct hci_dev *hdev)
dev_set_name(&hdev->dev, "%s", hdev->name);
- hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
- CRYPTO_ALG_ASYNC);
- if (IS_ERR(hdev->tfm_aes)) {
- BT_ERR("Unable to create crypto context");
- error = PTR_ERR(hdev->tfm_aes);
- hdev->tfm_aes = NULL;
- goto err_wqueue;
- }
-
error = device_add(&hdev->dev);
if (error < 0)
- goto err_tfm;
+ goto err_wqueue;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
@@ -4153,8 +4150,6 @@ int hci_register_dev(struct hci_dev *hdev)
return id;
-err_tfm:
- crypto_free_blkcipher(hdev->tfm_aes);
err_wqueue:
destroy_workqueue(hdev->workqueue);
destroy_workqueue(hdev->req_workqueue);
@@ -4206,8 +4201,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
rfkill_destroy(hdev->rfkill);
}
- if (hdev->tfm_aes)
- crypto_free_blkcipher(hdev->tfm_aes);
+ smp_unregister(hdev);
device_del(&hdev->dev);
@@ -4380,26 +4374,6 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
return remain;
}
-int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
-{
- int rem = 0;
-
- if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
- return -EILSEQ;
-
- while (count) {
- rem = hci_reassembly(hdev, type, data, count, type - 1);
- if (rem < 0)
- return rem;
-
- data += (count - rem);
- count = rem;
- }
-
- return rem;
-}
-EXPORT_SYMBOL(hci_recv_fragment);
-
#define STREAM_REASSEMBLY 0
int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
@@ -4553,6 +4527,7 @@ static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
BT_DBG("skb len %d", skb->len);
bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
+ bt_cb(skb)->opcode = opcode;
return skb;
}
@@ -5690,3 +5665,52 @@ void hci_update_background_scan(struct hci_dev *hdev)
if (err)
BT_ERR("Failed to run HCI request: err %d", err);
}
+
+static bool disconnected_whitelist_entries(struct hci_dev *hdev)
+{
+ struct bdaddr_list *b;
+
+ list_for_each_entry(b, &hdev->whitelist, list) {
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
+ if (!conn)
+ return true;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ return true;
+ }
+
+ return false;
+}
+
+void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
+{
+ u8 scan;
+
+ if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
+ return;
+
+ if (!hdev_is_powered(hdev))
+ return;
+
+ if (mgmt_powering_down(hdev))
+ return;
+
+ if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
+ disconnected_whitelist_entries(hdev))
+ scan = SCAN_PAGE;
+ else
+ scan = SCAN_DISABLED;
+
+ if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
+ return;
+
+ if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
+ scan |= SCAN_INQUIRY;
+
+ if (req)
+ hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ else
+ hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index a6000823f0ff..8b0a2a6de419 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2071,6 +2071,8 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
cp.handle = ev->handle;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
sizeof(cp), &cp);
+
+ hci_update_page_scan(hdev, NULL);
}
/* Set packet type for incoming connection */
@@ -2247,9 +2249,12 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
reason, mgmt_connected);
- if (conn->type == ACL_LINK &&
- test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
- hci_remove_link_key(hdev, &conn->dst);
+ if (conn->type == ACL_LINK) {
+ if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
+ hci_remove_link_key(hdev, &conn->dst);
+
+ hci_update_page_scan(hdev, NULL);
+ }
params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
if (params) {
@@ -2315,8 +2320,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->sec_level = conn->pending_sec_level;
}
} else {
- mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
- ev->status);
+ mgmt_auth_failed(conn, ev->status);
}
clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
@@ -2434,6 +2438,12 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
}
+ /* We should disregard the current RPA and generate a new one
+ * whenever the encryption procedure fails.
+ */
+ if (ev->status && conn->type == LE_LINK)
+ set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
+
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
if (ev->status && conn->state == BT_CONNECTED) {
@@ -3895,8 +3905,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
* event gets always produced as initiator and is also mapped to
* the mgmt_auth_failed event */
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
- mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
- ev->status);
+ mgmt_auth_failed(conn, ev->status);
hci_conn_drop(conn);
@@ -4188,16 +4197,16 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->dst_type = irk->addr_type;
}
- if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
- addr_type = BDADDR_LE_PUBLIC;
- else
- addr_type = BDADDR_LE_RANDOM;
-
if (ev->status) {
hci_le_conn_failed(conn, ev->status);
goto unlock;
}
+ if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
+ addr_type = BDADDR_LE_PUBLIC;
+ else
+ addr_type = BDADDR_LE_RANDOM;
+
/* Drop the connection if the device is blocked */
if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
hci_conn_drop(conn);
@@ -4220,11 +4229,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_proto_connect_cfm(conn, ev->status);
- params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+ params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
+ conn->dst_type);
if (params) {
list_del_init(&params->action);
if (params->conn) {
hci_conn_drop(params->conn);
+ hci_conn_put(params->conn);
params->conn = NULL;
}
}
@@ -4316,7 +4327,7 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
* the parameters get removed and keep the reference
* count consistent once the connection is established.
*/
- params->conn = conn;
+ params->conn = hci_conn_get(conn);
return;
}
@@ -4501,10 +4512,7 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
cp.handle = cpu_to_le16(conn->handle);
- if (ltk->authenticated)
- conn->pending_sec_level = BT_SECURITY_HIGH;
- else
- conn->pending_sec_level = BT_SECURITY_MEDIUM;
+ conn->pending_sec_level = smp_ltk_sec_level(ltk);
conn->enc_key_size = ltk->enc_size;
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 6c7ecf116e74..1b7d605706aa 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -915,7 +915,7 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
/* connection management */
bacpy(&session->bdaddr, bdaddr);
- session->conn = conn;
+ session->conn = l2cap_conn_get(conn);
session->user.probe = hidp_session_probe;
session->user.remove = hidp_session_remove;
session->ctrl_sock = ctrl_sock;
@@ -941,13 +941,13 @@ static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
if (ret)
goto err_free;
- l2cap_conn_get(session->conn);
get_file(session->intr_sock->file);
get_file(session->ctrl_sock->file);
*out = session;
return 0;
err_free:
+ l2cap_conn_put(session->conn);
kfree(session);
return ret;
}
@@ -1327,10 +1327,8 @@ int hidp_connection_add(struct hidp_connadd_req *req,
conn = NULL;
l2cap_chan_lock(chan);
- if (chan->conn) {
- l2cap_conn_get(chan->conn);
- conn = chan->conn;
- }
+ if (chan->conn)
+ conn = l2cap_conn_get(chan->conn);
l2cap_chan_unlock(chan);
if (!conn)
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 46547b920f88..b6f9777e057d 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -210,6 +210,10 @@ int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
{
write_lock(&chan_list_lock);
+ /* Override the defaults (which are for conn-oriented) */
+ chan->omtu = L2CAP_DEFAULT_MTU;
+ chan->chan_type = L2CAP_CHAN_FIXED;
+
chan->scid = scid;
write_unlock(&chan_list_lock);
@@ -542,7 +546,10 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
l2cap_chan_hold(chan);
- hci_conn_hold(conn->hcon);
+ /* Only keep a reference for fixed channels if they requested it */
+ if (chan->chan_type != L2CAP_CHAN_FIXED ||
+ test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
+ hci_conn_hold(conn->hcon);
list_add(&chan->list, &conn->chan_l);
}
@@ -562,6 +569,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
+ chan->ops->teardown(chan, err);
+
if (conn) {
struct amp_mgr *mgr = conn->hcon->amp_mgr;
/* Delete from channel list */
@@ -571,7 +580,12 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
chan->conn = NULL;
- if (chan->scid != L2CAP_CID_A2MP)
+ /* Reference was only held for non-fixed channels or
+ * fixed channels that explicitly requested it using the
+ * FLAG_HOLD_HCI_CONN flag.
+ */
+ if (chan->chan_type != L2CAP_CHAN_FIXED ||
+ test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
hci_conn_drop(conn->hcon);
if (mgr && mgr->bredr_chan == chan)
@@ -585,8 +599,6 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
amp_disconnect_logical_link(hs_hchan);
}
- chan->ops->teardown(chan, err);
-
if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
return;
@@ -619,9 +631,11 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
}
EXPORT_SYMBOL_GPL(l2cap_chan_del);
-void l2cap_conn_update_id_addr(struct hci_conn *hcon)
+static void l2cap_conn_update_id_addr(struct work_struct *work)
{
- struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
+ id_addr_update_work);
+ struct hci_conn *hcon = conn->hcon;
struct l2cap_chan *chan;
mutex_lock(&conn->chan_lock);
@@ -1082,6 +1096,9 @@ static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
{
+ if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
+ return true;
+
return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
}
@@ -1266,6 +1283,24 @@ static void l2cap_start_connection(struct l2cap_chan *chan)
}
}
+static void l2cap_request_info(struct l2cap_conn *conn)
+{
+ struct l2cap_info_req req;
+
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
+ return;
+
+ req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
+ conn->info_ident = l2cap_get_ident(conn);
+
+ schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
+
+ l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
+ sizeof(req), &req);
+}
+
static void l2cap_do_start(struct l2cap_chan *chan)
{
struct l2cap_conn *conn = chan->conn;
@@ -1275,26 +1310,17 @@ static void l2cap_do_start(struct l2cap_chan *chan)
return;
}
- if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
- if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
- return;
-
- if (l2cap_chan_check_security(chan, true) &&
- __l2cap_no_conn_pending(chan)) {
- l2cap_start_connection(chan);
- }
- } else {
- struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
-
- conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
- conn->info_ident = l2cap_get_ident(conn);
+ if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
+ l2cap_request_info(conn);
+ return;
+ }
- schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
+ if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
+ return;
- l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
- sizeof(req), &req);
- }
+ if (l2cap_chan_check_security(chan, true) &&
+ __l2cap_no_conn_pending(chan))
+ l2cap_start_connection(chan);
}
static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
@@ -1353,6 +1379,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
l2cap_chan_lock(chan);
if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
+ l2cap_chan_ready(chan);
l2cap_chan_unlock(chan);
continue;
}
@@ -1417,71 +1444,18 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
mutex_unlock(&conn->chan_lock);
}
-/* Find socket with cid and source/destination bdaddr.
- * Returns closest match, locked.
- */
-static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
- bdaddr_t *src,
- bdaddr_t *dst)
-{
- struct l2cap_chan *c, *c1 = NULL;
-
- read_lock(&chan_list_lock);
-
- list_for_each_entry(c, &chan_list, global_l) {
- if (state && c->state != state)
- continue;
-
- if (c->scid == cid) {
- int src_match, dst_match;
- int src_any, dst_any;
-
- /* Exact match. */
- src_match = !bacmp(&c->src, src);
- dst_match = !bacmp(&c->dst, dst);
- if (src_match && dst_match) {
- read_unlock(&chan_list_lock);
- return c;
- }
-
- /* Closest match */
- src_any = !bacmp(&c->src, BDADDR_ANY);
- dst_any = !bacmp(&c->dst, BDADDR_ANY);
- if ((src_match && dst_any) || (src_any && dst_match) ||
- (src_any && dst_any))
- c1 = c;
- }
- }
-
- read_unlock(&chan_list_lock);
-
- return c1;
-}
-
static void l2cap_le_conn_ready(struct l2cap_conn *conn)
{
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
- struct l2cap_chan *chan, *pchan;
- u8 dst_type;
- BT_DBG("");
-
- /* Check if we have socket listening on cid */
- pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_ATT,
- &hcon->src, &hcon->dst);
- if (!pchan)
- return;
-
- /* Client ATT sockets should override the server one */
- if (__l2cap_get_chan_by_dcid(conn, L2CAP_CID_ATT))
- return;
-
- dst_type = bdaddr_type(hcon, hcon->dst_type);
+ BT_DBG("%s conn %p", hdev->name, conn);
- /* If device is blocked, do not create a channel for it */
- if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
- return;
+ /* For outgoing pairing which doesn't necessarily have an
+ * associated socket (e.g. mgmt_pair_device).
+ */
+ if (hcon->out)
+ smp_conn_security(hcon, hcon->pending_sec_level);
/* For LE slave connections, make sure the connection interval
* is in the range of the minium and maximum interval that has
@@ -1501,22 +1475,6 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
}
-
- l2cap_chan_lock(pchan);
-
- chan = pchan->ops->new_connection(pchan);
- if (!chan)
- goto clean;
-
- bacpy(&chan->src, &hcon->src);
- bacpy(&chan->dst, &hcon->dst);
- chan->src_type = bdaddr_type(hcon, hcon->src_type);
- chan->dst_type = dst_type;
-
- __l2cap_chan_add(conn, chan);
-
-clean:
- l2cap_chan_unlock(pchan);
}
static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -1526,17 +1484,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
BT_DBG("conn %p", conn);
- /* For outgoing pairing which doesn't necessarily have an
- * associated socket (e.g. mgmt_pair_device).
- */
- if (hcon->out && hcon->type == LE_LINK)
- smp_conn_security(hcon, hcon->pending_sec_level);
+ if (hcon->type == ACL_LINK)
+ l2cap_request_info(conn);
mutex_lock(&conn->chan_lock);
- if (hcon->type == LE_LINK)
- l2cap_le_conn_ready(conn);
-
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
@@ -1549,8 +1501,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
if (hcon->type == LE_LINK) {
l2cap_le_start(chan);
} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
- l2cap_chan_ready(chan);
-
+ if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
+ l2cap_chan_ready(chan);
} else if (chan->state == BT_CONNECT) {
l2cap_do_start(chan);
}
@@ -1560,6 +1512,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
mutex_unlock(&conn->chan_lock);
+ if (hcon->type == LE_LINK)
+ l2cap_le_conn_ready(conn);
+
queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
}
@@ -1695,8 +1650,14 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
if (work_pending(&conn->pending_rx_work))
cancel_work_sync(&conn->pending_rx_work);
+ if (work_pending(&conn->id_addr_update_work))
+ cancel_work_sync(&conn->id_addr_update_work);
+
l2cap_unregister_all_users(conn);
+ /* Force the connection to be immediately dropped */
+ hcon->disc_timeout = 0;
+
mutex_lock(&conn->chan_lock);
/* Kill channels */
@@ -1719,29 +1680,11 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
cancel_delayed_work_sync(&conn->info_timer);
- if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
- cancel_delayed_work_sync(&conn->security_timer);
- smp_chan_destroy(conn);
- }
-
hcon->l2cap_data = NULL;
conn->hchan = NULL;
l2cap_conn_put(conn);
}
-static void security_timeout(struct work_struct *work)
-{
- struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
- security_timer.work);
-
- BT_DBG("conn %p", conn);
-
- if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
- smp_chan_destroy(conn);
- l2cap_conn_del(conn->hcon, ETIMEDOUT);
- }
-}
-
static void l2cap_conn_free(struct kref *ref)
{
struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
@@ -1750,9 +1693,10 @@ static void l2cap_conn_free(struct kref *ref)
kfree(conn);
}
-void l2cap_conn_get(struct l2cap_conn *conn)
+struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
{
kref_get(&conn->ref);
+ return conn;
}
EXPORT_SYMBOL(l2cap_conn_get);
@@ -1794,6 +1738,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
src_match = !bacmp(&c->src, src);
dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
+ l2cap_chan_hold(c);
read_unlock(&chan_list_lock);
return c;
}
@@ -1807,6 +1752,9 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
}
}
+ if (c1)
+ l2cap_chan_hold(c1);
+
read_unlock(&chan_list_lock);
return c1;
@@ -2027,10 +1975,12 @@ static void l2cap_ertm_resend(struct l2cap_chan *chan)
tx_skb->data + L2CAP_HDR_SIZE);
}
+ /* Update FCS */
if (chan->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
- put_unaligned_le16(fcs, skb_put(tx_skb,
- L2CAP_FCS_SIZE));
+ u16 fcs = crc16(0, (u8 *) tx_skb->data,
+ tx_skb->len - L2CAP_FCS_SIZE);
+ put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
+ L2CAP_FCS_SIZE);
}
l2cap_do_send(chan, tx_skb);
@@ -2334,7 +2284,6 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
} else {
sar = L2CAP_SAR_START;
sdu_len = len;
- pdu_len -= L2CAP_SDULEN_SIZE;
}
while (len > 0) {
@@ -2349,10 +2298,8 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
__skb_queue_tail(seg_queue, skb);
len -= pdu_len;
- if (sdu_len) {
+ if (sdu_len)
sdu_len = 0;
- pdu_len += L2CAP_SDULEN_SIZE;
- }
if (len <= pdu_len) {
sar = L2CAP_SAR_END;
@@ -2418,12 +2365,8 @@ static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
- pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
-
- pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
-
sdu_len = len;
- pdu_len -= L2CAP_SDULEN_SIZE;
+ pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
while (len > 0) {
if (len <= pdu_len)
@@ -3884,6 +3827,7 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
response:
l2cap_chan_unlock(pchan);
mutex_unlock(&conn->chan_lock);
+ l2cap_chan_put(pchan);
sendresp:
rsp.scid = cpu_to_le16(scid);
@@ -5487,6 +5431,11 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
l2cap_state_change(chan, BT_CONNECT2);
+ /* The following result value is actually not defined
+ * for LE CoC but we use it to let the function know
+ * that it should bail out after doing its cleanup
+ * instead of sending a response.
+ */
result = L2CAP_CR_PEND;
chan->ops->defer(chan);
} else {
@@ -5497,6 +5446,7 @@ static int l2cap_le_connect_req(struct l2cap_conn *conn,
response_unlock:
l2cap_chan_unlock(pchan);
mutex_unlock(&conn->chan_lock);
+ l2cap_chan_put(pchan);
if (result == L2CAP_CR_PEND)
return 0;
@@ -6845,12 +6795,12 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
struct l2cap_chan *chan;
if (hcon->type != ACL_LINK)
- goto drop;
+ goto free_skb;
chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
ACL_LINK);
if (!chan)
- goto drop;
+ goto free_skb;
BT_DBG("chan %p, len %d", chan, skb->len);
@@ -6864,36 +6814,14 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
bt_cb(skb)->psm = psm;
- if (!chan->ops->recv(chan, skb))
- return;
-
-drop:
- kfree_skb(skb);
-}
-
-static void l2cap_att_channel(struct l2cap_conn *conn,
- struct sk_buff *skb)
-{
- struct hci_conn *hcon = conn->hcon;
- struct l2cap_chan *chan;
-
- if (hcon->type != LE_LINK)
- goto drop;
-
- chan = l2cap_global_chan_by_scid(BT_CONNECTED, L2CAP_CID_ATT,
- &hcon->src, &hcon->dst);
- if (!chan)
- goto drop;
-
- BT_DBG("chan %p, len %d", chan, skb->len);
-
- if (chan->imtu < skb->len)
- goto drop;
-
- if (!chan->ops->recv(chan, skb))
+ if (!chan->ops->recv(chan, skb)) {
+ l2cap_chan_put(chan);
return;
+ }
drop:
+ l2cap_chan_put(chan);
+free_skb:
kfree_skb(skb);
}
@@ -6942,19 +6870,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
l2cap_conless_channel(conn, psm, skb);
break;
- case L2CAP_CID_ATT:
- l2cap_att_channel(conn, skb);
- break;
-
case L2CAP_CID_LE_SIGNALING:
l2cap_le_sig_channel(conn, skb);
break;
- case L2CAP_CID_SMP:
- if (smp_sig_channel(conn, skb))
- l2cap_conn_del(conn->hcon, EACCES);
- break;
-
default:
l2cap_data_channel(conn, cid, skb);
break;
@@ -6993,8 +6912,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
kref_init(&conn->ref);
hcon->l2cap_data = conn;
- conn->hcon = hcon;
- hci_conn_get(conn->hcon);
+ conn->hcon = hci_conn_get(hcon);
conn->hchan = hchan;
BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
@@ -7023,13 +6941,11 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
INIT_LIST_HEAD(&conn->chan_l);
INIT_LIST_HEAD(&conn->users);
- if (hcon->type == LE_LINK)
- INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
- else
- INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
+ INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
skb_queue_head_init(&conn->pending_rx);
INIT_WORK(&conn->pending_rx_work, process_pending_rx);
+ INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
@@ -7064,8 +6980,6 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
hci_dev_lock(hdev);
- l2cap_chan_lock(chan);
-
if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
chan->chan_type != L2CAP_CHAN_RAW) {
err = -EINVAL;
@@ -7162,19 +7076,20 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
goto done;
}
+ mutex_lock(&conn->chan_lock);
+ l2cap_chan_lock(chan);
+
if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
hci_conn_drop(hcon);
err = -EBUSY;
- goto done;
+ goto chan_unlock;
}
/* Update source addr of the socket */
bacpy(&chan->src, &hcon->src);
chan->src_type = bdaddr_type(hcon, hcon->src_type);
- l2cap_chan_unlock(chan);
- l2cap_chan_add(conn, chan);
- l2cap_chan_lock(chan);
+ __l2cap_chan_add(conn, chan);
/* l2cap_chan_add takes its own ref so we can drop this one */
hci_conn_drop(hcon);
@@ -7200,8 +7115,10 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
err = 0;
-done:
+chan_unlock:
l2cap_chan_unlock(chan);
+ mutex_unlock(&conn->chan_lock);
+done:
hci_dev_unlock(hdev);
hci_dev_put(hdev);
return err;
@@ -7239,19 +7156,99 @@ int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
return exact ? lm1 : lm2;
}
+/* Find the next fixed channel in BT_LISTEN state, continue iteration
+ * from an existing channel in the list or from the beginning of the
+ * global list (by passing NULL as first parameter).
+ */
+static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
+ bdaddr_t *src, u8 link_type)
+{
+ read_lock(&chan_list_lock);
+
+ if (c)
+ c = list_next_entry(c, global_l);
+ else
+ c = list_entry(chan_list.next, typeof(*c), global_l);
+
+ list_for_each_entry_from(c, &chan_list, global_l) {
+ if (c->chan_type != L2CAP_CHAN_FIXED)
+ continue;
+ if (c->state != BT_LISTEN)
+ continue;
+ if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
+ continue;
+ if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
+ continue;
+ if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
+ continue;
+
+ l2cap_chan_hold(c);
+ read_unlock(&chan_list_lock);
+ return c;
+ }
+
+ read_unlock(&chan_list_lock);
+
+ return NULL;
+}
+
void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
{
+ struct hci_dev *hdev = hcon->hdev;
struct l2cap_conn *conn;
+ struct l2cap_chan *pchan;
+ u8 dst_type;
BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
- if (!status) {
- conn = l2cap_conn_add(hcon);
- if (conn)
- l2cap_conn_ready(conn);
- } else {
+ if (status) {
l2cap_conn_del(hcon, bt_to_errno(status));
+ return;
}
+
+ conn = l2cap_conn_add(hcon);
+ if (!conn)
+ return;
+
+ dst_type = bdaddr_type(hcon, hcon->dst_type);
+
+ /* If device is blocked, do not create channels for it */
+ if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
+ return;
+
+ /* Find fixed channels and notify them of the new connection. We
+ * use multiple individual lookups, continuing each time where
+ * we left off, because the list lock would prevent calling the
+ * potentially sleeping l2cap_chan_lock() function.
+ */
+ pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
+ while (pchan) {
+ struct l2cap_chan *chan, *next;
+
+ /* Client fixed channels should override server ones */
+ if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
+ goto next;
+
+ l2cap_chan_lock(pchan);
+ chan = pchan->ops->new_connection(pchan);
+ if (chan) {
+ bacpy(&chan->src, &hcon->src);
+ bacpy(&chan->dst, &hcon->dst);
+ chan->src_type = bdaddr_type(hcon, hcon->src_type);
+ chan->dst_type = dst_type;
+
+ __l2cap_chan_add(conn, chan);
+ }
+
+ l2cap_chan_unlock(pchan);
+next:
+ next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
+ hcon->type);
+ l2cap_chan_put(pchan);
+ pchan = next;
+ }
+
+ l2cap_conn_ready(conn);
}
int l2cap_disconn_ind(struct hci_conn *hcon)
@@ -7299,12 +7296,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
- if (hcon->type == LE_LINK) {
- if (!status && encrypt)
- smp_distribute_keys(conn);
- cancel_delayed_work(&conn->security_timer);
- }
-
mutex_lock(&conn->chan_lock);
list_for_each_entry(chan, &conn->chan_l, list) {
@@ -7318,15 +7309,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
continue;
}
- if (chan->scid == L2CAP_CID_ATT) {
- if (!status && encrypt) {
- chan->sec_level = hcon->sec_level;
- l2cap_chan_ready(chan);
- }
-
- l2cap_chan_unlock(chan);
- continue;
- }
+ if (!status && encrypt)
+ chan->sec_level = hcon->sec_level;
if (!__l2cap_no_conn_pending(chan)) {
l2cap_chan_unlock(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 1884f72083c2..31f106e61ca2 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -99,15 +99,6 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (!bdaddr_type_is_valid(la.l2_bdaddr_type))
return -EINVAL;
- if (la.l2_cid) {
- /* When the socket gets created it defaults to
- * CHAN_CONN_ORIENTED, so we need to overwrite the
- * default here.
- */
- chan->chan_type = L2CAP_CHAN_FIXED;
- chan->omtu = L2CAP_DEFAULT_MTU;
- }
-
if (bdaddr_type_is_le(la.l2_bdaddr_type)) {
/* We only allow ATT user space socket */
if (la.l2_cid &&
@@ -155,6 +146,14 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
case L2CAP_CHAN_RAW:
chan->sec_level = BT_SECURITY_SDP;
break;
+ case L2CAP_CHAN_FIXED:
+ /* Fixed channels default to the L2CAP core not holding a
+ * hci_conn reference for them. For fixed channels mapping to
+ * L2CAP sockets we do want to hold a reference so set the
+ * appropriate flag to request it.
+ */
+ set_bit(FLAG_HOLD_HCI_CONN, &chan->flags);
+ break;
}
bacpy(&chan->src, &la.l2_bdaddr);
@@ -790,6 +789,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
if (chan->scid == L2CAP_CID_ATT) {
if (smp_conn_security(conn->hcon, sec.level))
break;
+ set_bit(FLAG_PENDING_SECURITY, &chan->flags);
sk->sk_state = BT_CONFIG;
chan->state = BT_CONFIG;
@@ -1359,6 +1359,11 @@ static void l2cap_sock_resume_cb(struct l2cap_chan *chan)
{
struct sock *sk = chan->data;
+ if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) {
+ sk->sk_state = BT_CONNECTED;
+ chan->state = BT_CONNECTED;
+ }
+
clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
sk->sk_state_change(sk);
}
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 941ad7530eda..b36bc0415854 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -135,40 +135,34 @@ int bt_to_errno(__u16 code)
}
EXPORT_SYMBOL(bt_to_errno);
-int bt_info(const char *format, ...)
+void bt_info(const char *format, ...)
{
struct va_format vaf;
va_list args;
- int r;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
- r = pr_info("%pV", &vaf);
+ pr_info("%pV", &vaf);
va_end(args);
-
- return r;
}
EXPORT_SYMBOL(bt_info);
-int bt_err(const char *format, ...)
+void bt_err(const char *format, ...)
{
struct va_format vaf;
va_list args;
- int r;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
- r = pr_err("%pV", &vaf);
+ pr_err("%pV", &vaf);
va_end(args);
-
- return r;
}
EXPORT_SYMBOL(bt_err);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index b8554d429d88..efb71b022ab6 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -129,9 +129,6 @@ static const u16 mgmt_events[] = {
#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
-#define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
- !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
-
struct pending_cmd {
struct list_head list;
u16 opcode;
@@ -1536,9 +1533,11 @@ static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
/* When the discoverable mode gets changed, make sure
* that class of device has the limited discoverable
- * bit correctly set.
+ * bit correctly set. Also update page scan based on whitelist
+ * entries.
*/
hci_req_init(&req, hdev);
+ hci_update_page_scan(hdev, &req);
update_class(&req);
hci_req_run(&req, NULL);
@@ -1785,6 +1784,7 @@ static void set_connectable_complete(struct hci_dev *hdev, u8 status)
if (conn_changed || discov_changed) {
new_settings(hdev, cmd->sk);
+ hci_update_page_scan(hdev, NULL);
if (discov_changed)
mgmt_update_adv_data(hdev);
hci_update_background_scan(hdev);
@@ -1818,6 +1818,7 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
return err;
if (changed) {
+ hci_update_page_scan(hdev, NULL);
hci_update_background_scan(hdev);
return new_settings(hdev, sk);
}
@@ -2787,7 +2788,6 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_disconnect *cp = data;
struct mgmt_rp_disconnect rp;
- struct hci_cp_disconnect dc;
struct pending_cmd *cmd;
struct hci_conn *conn;
int err;
@@ -2835,10 +2835,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- dc.handle = cpu_to_le16(conn->handle);
- dc.reason = HCI_ERROR_REMOTE_USER_TERM;
-
- err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
+ err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
if (err < 0)
mgmt_pending_remove(cmd);
@@ -3062,6 +3059,7 @@ static void pairing_complete(struct pending_cmd *cmd, u8 status)
conn->disconn_cfm_cb = NULL;
hci_conn_drop(conn);
+ hci_conn_put(conn);
mgmt_pending_remove(cmd);
}
@@ -3211,7 +3209,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
}
conn->io_capability = cp->io_cap;
- cmd->user_data = conn;
+ cmd->user_data = hci_conn_get(conn);
if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
hci_conn_security(conn, sec_level, auth_type, true))
@@ -4381,27 +4379,6 @@ unlock:
return err;
}
-static void set_bredr_scan(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- u8 scan = 0;
-
- /* Ensure that fast connectable is disabled. This function will
- * not do anything if the page scan parameters are already what
- * they should be.
- */
- write_fast_connectable(req, false);
-
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
- !list_empty(&hdev->whitelist))
- scan |= SCAN_PAGE;
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- scan |= SCAN_INQUIRY;
-
- if (scan)
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-}
-
static void set_bredr_complete(struct hci_dev *hdev, u8 status)
{
struct pending_cmd *cmd;
@@ -4507,9 +4484,8 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
hci_req_init(&req, hdev);
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
- !list_empty(&hdev->whitelist))
- set_bredr_scan(&req);
+ write_fast_connectable(&req, false);
+ hci_update_page_scan(hdev, &req);
/* Since only the advertising data flags will change, there
* is no need to update the scan response data.
@@ -4935,6 +4911,7 @@ static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
match->mgmt_status, &rp, sizeof(rp));
hci_conn_drop(conn);
+ hci_conn_put(conn);
mgmt_pending_remove(cmd);
}
@@ -5091,7 +5068,7 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
}
hci_conn_hold(conn);
- cmd->user_data = conn;
+ cmd->user_data = hci_conn_get(conn);
conn->conn_info_timestamp = jiffies;
} else {
@@ -5155,8 +5132,10 @@ send_rsp:
cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
&rp, sizeof(rp));
mgmt_pending_remove(cmd);
- if (conn)
+ if (conn) {
hci_conn_drop(conn);
+ hci_conn_put(conn);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -5219,7 +5198,7 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
if (conn) {
hci_conn_hold(conn);
- cmd->user_data = conn;
+ cmd->user_data = hci_conn_get(conn);
hci_cp.handle = cpu_to_le16(conn->handle);
hci_cp.which = 0x01; /* Piconet clock */
@@ -5235,27 +5214,6 @@ unlock:
return err;
}
-/* Helper for Add/Remove Device commands */
-static void update_page_scan(struct hci_dev *hdev, u8 scan)
-{
- if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
- return;
-
- if (!hdev_is_powered(hdev))
- return;
-
- /* If HCI_CONNECTABLE is set then Add/Remove Device should not
- * make any changes to page scanning.
- */
- if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
- return;
-
- if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
- scan |= SCAN_INQUIRY;
-
- hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
-}
-
static void device_added(struct sock *sk, struct hci_dev *hdev,
bdaddr_t *bdaddr, u8 type, u8 action)
{
@@ -5291,8 +5249,6 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
hci_dev_lock(hdev);
if (cp->addr.type == BDADDR_BREDR) {
- bool update_scan;
-
/* Only incoming connections action is supported for now */
if (cp->action != 0x01) {
err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
@@ -5301,15 +5257,12 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- update_scan = list_empty(&hdev->whitelist);
-
err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
cp->addr.type);
if (err)
goto unlock;
- if (update_scan)
- update_page_scan(hdev, SCAN_PAGE);
+ hci_update_page_scan(hdev, NULL);
goto added;
}
@@ -5392,8 +5345,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- if (list_empty(&hdev->whitelist))
- update_page_scan(hdev, SCAN_DISABLED);
+ hci_update_page_scan(hdev, NULL);
device_removed(sk, hdev, &cp->addr.bdaddr,
cp->addr.type);
@@ -5444,7 +5396,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
kfree(b);
}
- update_page_scan(hdev, SCAN_DISABLED);
+ hci_update_page_scan(hdev, NULL);
list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
@@ -5969,8 +5921,8 @@ static int powered_update_hci(struct hci_dev *hdev)
sizeof(link_sec), &link_sec);
if (lmp_bredr_capable(hdev)) {
- if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
- set_bredr_scan(&req);
+ write_fast_connectable(&req, false);
+ hci_update_page_scan(hdev, &req);
update_class(&req);
update_name(&req);
update_eir(&req);
@@ -6281,25 +6233,35 @@ static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
mgmt_pending_remove(cmd);
}
+bool mgmt_powering_down(struct hci_dev *hdev)
+{
+ struct pending_cmd *cmd;
+ struct mgmt_mode *cp;
+
+ cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
+ if (!cmd)
+ return false;
+
+ cp = cmd->param;
+ if (!cp->val)
+ return true;
+
+ return false;
+}
+
void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 reason,
bool mgmt_connected)
{
struct mgmt_ev_device_disconnected ev;
- struct pending_cmd *power_off;
struct sock *sk = NULL;
- power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
- if (power_off) {
- struct mgmt_mode *cp = power_off->param;
-
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (!cp->val && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
+ /* The connection is still in hci_conn_hash so test for 1
+ * instead of 0 to know if this is the last one.
+ */
+ if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
+ cancel_delayed_work(&hdev->power_off);
+ queue_work(hdev->req_workqueue, &hdev->power_off.work);
}
if (!mgmt_connected)
@@ -6359,19 +6321,13 @@ void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 status)
{
struct mgmt_ev_connect_failed ev;
- struct pending_cmd *power_off;
-
- power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
- if (power_off) {
- struct mgmt_mode *cp = power_off->param;
- /* The connection is still in hci_conn_hash so test for 1
- * instead of 0 to know if this is the last one.
- */
- if (!cp->val && hci_conn_count(hdev) == 1) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
+ /* The connection is still in hci_conn_hash so test for 1
+ * instead of 0 to know if this is the last one.
+ */
+ if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
+ cancel_delayed_work(&hdev->power_off);
+ queue_work(hdev->req_workqueue, &hdev->power_off.work);
}
bacpy(&ev.addr.bdaddr, bdaddr);
@@ -6529,16 +6485,23 @@ int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
}
-void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
- u8 addr_type, u8 status)
+void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
{
struct mgmt_ev_auth_failed ev;
+ struct pending_cmd *cmd;
+ u8 status = mgmt_status(hci_status);
- bacpy(&ev.addr.bdaddr, bdaddr);
- ev.addr.type = link_to_bdaddr(link_type, addr_type);
- ev.status = mgmt_status(status);
+ bacpy(&ev.addr.bdaddr, &conn->dst);
+ ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
+ ev.status = status;
- mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
+ cmd = find_pairing(conn);
+
+ mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
+ cmd ? cmd->sk : NULL);
+
+ if (cmd)
+ pairing_complete(cmd, status);
}
void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index fd3294300803..f09b6b65cf6b 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -31,9 +31,12 @@
#include "smp.h"
+#define SMP_ALLOW_CMD(smp, code) set_bit(code, &smp->allow_cmd)
+
#define SMP_TIMEOUT msecs_to_jiffies(30000)
#define AUTH_REQ_MASK 0x07
+#define KEY_DIST_MASK 0x07
enum {
SMP_FLAG_TK_VALID,
@@ -44,7 +47,10 @@ enum {
};
struct smp_chan {
- struct l2cap_conn *conn;
+ struct l2cap_conn *conn;
+ struct delayed_work security_timer;
+ unsigned long allow_cmd; /* Bitmask of allowed commands */
+
u8 preq[7]; /* SMP Pairing Request */
u8 prsp[7]; /* SMP Pairing Response */
u8 prnd[16]; /* SMP Pairing Random (local) */
@@ -139,12 +145,18 @@ static int smp_ah(struct crypto_blkcipher *tfm, u8 irk[16], u8 r[3], u8 res[3])
return 0;
}
-bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16],
- bdaddr_t *bdaddr)
+bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr)
{
+ struct l2cap_chan *chan = hdev->smp_data;
+ struct crypto_blkcipher *tfm;
u8 hash[3];
int err;
+ if (!chan || !chan->data)
+ return false;
+
+ tfm = chan->data;
+
BT_DBG("RPA %pMR IRK %*phN", bdaddr, 16, irk);
err = smp_ah(tfm, irk, &bdaddr->b[3], hash);
@@ -154,10 +166,17 @@ bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16],
return !memcmp(bdaddr->b, hash, 3);
}
-int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa)
+int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa)
{
+ struct l2cap_chan *chan = hdev->smp_data;
+ struct crypto_blkcipher *tfm;
int err;
+ if (!chan || !chan->data)
+ return -EOPNOTSUPP;
+
+ tfm = chan->data;
+
get_random_bytes(&rpa->b[3], 3);
rpa->b[5] &= 0x3f; /* Clear two most significant bits */
@@ -235,47 +254,38 @@ static int smp_s1(struct smp_chan *smp, u8 k[16], u8 r1[16], u8 r2[16],
return err;
}
-static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
- u16 dlen, void *data)
+static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
{
- struct sk_buff *skb;
- struct l2cap_hdr *lh;
- int len;
-
- len = L2CAP_HDR_SIZE + sizeof(code) + dlen;
-
- if (len > conn->mtu)
- return NULL;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp;
+ struct kvec iv[2];
+ struct msghdr msg;
- skb = bt_skb_alloc(len, GFP_ATOMIC);
- if (!skb)
- return NULL;
+ if (!chan)
+ return;
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->len = cpu_to_le16(sizeof(code) + dlen);
- lh->cid = cpu_to_le16(L2CAP_CID_SMP);
+ BT_DBG("code 0x%2.2x", code);
- memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
+ iv[0].iov_base = &code;
+ iv[0].iov_len = 1;
- memcpy(skb_put(skb, dlen), data, dlen);
+ iv[1].iov_base = data;
+ iv[1].iov_len = len;
- return skb;
-}
+ memset(&msg, 0, sizeof(msg));
-static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
-{
- struct sk_buff *skb = smp_build_cmd(conn, code, len, data);
+ msg.msg_iov = (struct iovec *) &iv;
+ msg.msg_iovlen = 2;
- BT_DBG("code 0x%2.2x", code);
+ l2cap_chan_send(chan, &msg, 1 + len);
- if (!skb)
+ if (!chan->data)
return;
- skb->priority = HCI_PRIO_MAX;
- hci_send_acl(conn->hchan, skb, 0);
+ smp = chan->data;
- cancel_delayed_work_sync(&conn->security_timer);
- schedule_delayed_work(&conn->security_timer, SMP_TIMEOUT);
+ cancel_delayed_work_sync(&smp->security_timer);
+ schedule_delayed_work(&smp->security_timer, SMP_TIMEOUT);
}
static __u8 authreq_to_seclevel(__u8 authreq)
@@ -302,7 +312,8 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
struct smp_cmd_pairing *req,
struct smp_cmd_pairing *rsp, __u8 authreq)
{
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
struct hci_conn *hcon = conn->hcon;
struct hci_dev *hdev = hcon->hdev;
u8 local_dist = 0, remote_dist = 0;
@@ -345,7 +356,8 @@ static void build_pairing_cmd(struct l2cap_conn *conn,
static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
{
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
(max_key_size < SMP_MIN_ENC_KEY_SIZE))
@@ -356,21 +368,60 @@ static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
return 0;
}
+static void smp_chan_destroy(struct l2cap_conn *conn)
+{
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
+ bool complete;
+
+ BUG_ON(!smp);
+
+ cancel_delayed_work_sync(&smp->security_timer);
+
+ complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
+ mgmt_smp_complete(conn->hcon, complete);
+
+ kfree(smp->csrk);
+ kfree(smp->slave_csrk);
+
+ crypto_free_blkcipher(smp->tfm_aes);
+
+ /* If pairing failed clean up any keys we might have */
+ if (!complete) {
+ if (smp->ltk) {
+ list_del(&smp->ltk->list);
+ kfree(smp->ltk);
+ }
+
+ if (smp->slave_ltk) {
+ list_del(&smp->slave_ltk->list);
+ kfree(smp->slave_ltk);
+ }
+
+ if (smp->remote_irk) {
+ list_del(&smp->remote_irk->list);
+ kfree(smp->remote_irk);
+ }
+ }
+
+ chan->data = NULL;
+ kfree(smp);
+ hci_conn_drop(conn->hcon);
+}
+
static void smp_failure(struct l2cap_conn *conn, u8 reason)
{
struct hci_conn *hcon = conn->hcon;
+ struct l2cap_chan *chan = conn->smp;
if (reason)
smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
&reason);
clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
- mgmt_auth_failed(hcon->hdev, &hcon->dst, hcon->type, hcon->dst_type,
- HCI_ERROR_AUTH_FAILURE);
-
- cancel_delayed_work_sync(&conn->security_timer);
+ mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE);
- if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
+ if (chan->data)
smp_chan_destroy(conn);
}
@@ -405,7 +456,8 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
u8 local_io, u8 remote_io)
{
struct hci_conn *hcon = conn->hcon;
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
u8 method;
u32 passkey = 0;
int ret = 0;
@@ -442,8 +494,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
}
/* Not Just Works/Confirm results in MITM Authentication */
- if (method != JUST_CFM)
+ if (method != JUST_CFM) {
set_bit(SMP_FLAG_MITM_AUTH, &smp->flags);
+ if (hcon->pending_sec_level < BT_SECURITY_HIGH)
+ hcon->pending_sec_level = BT_SECURITY_HIGH;
+ }
/* If both devices have Keyoard-Display I/O, the master
* Confirms and the slave Enters the passkey.
@@ -503,6 +558,11 @@ static u8 smp_confirm(struct smp_chan *smp)
smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
+ if (conn->hcon->out)
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
+ else
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+
return 0;
}
@@ -574,82 +634,262 @@ static u8 smp_random(struct smp_chan *smp)
return 0;
}
-static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
+static void smp_notify_keys(struct l2cap_conn *conn)
{
- struct smp_chan *smp;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ struct smp_cmd_pairing *req = (void *) &smp->preq[1];
+ struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1];
+ bool persistent;
- smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
- if (!smp) {
- clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
- return NULL;
+ if (smp->remote_irk) {
+ mgmt_new_irk(hdev, smp->remote_irk);
+ /* Now that user space can be considered to know the
+ * identity address track the connection based on it
+ * from now on.
+ */
+ bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
+ hcon->dst_type = smp->remote_irk->addr_type;
+ queue_work(hdev->workqueue, &conn->id_addr_update_work);
+
+ /* When receiving an indentity resolving key for
+ * a remote device that does not use a resolvable
+ * private address, just remove the key so that
+ * it is possible to use the controller white
+ * list for scanning.
+ *
+ * Userspace will have been told to not store
+ * this key at this point. So it is safe to
+ * just remove it.
+ */
+ if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
+ list_del(&smp->remote_irk->list);
+ kfree(smp->remote_irk);
+ smp->remote_irk = NULL;
+ }
}
- smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(smp->tfm_aes)) {
- BT_ERR("Unable to create ECB crypto context");
- kfree(smp);
- clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags);
- return NULL;
+ /* The LTKs and CSRKs should be persistent only if both sides
+ * had the bonding bit set in their authentication requests.
+ */
+ persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING);
+
+ if (smp->csrk) {
+ smp->csrk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->csrk->bdaddr, &hcon->dst);
+ mgmt_new_csrk(hdev, smp->csrk, persistent);
}
- smp->conn = conn;
- conn->smp_chan = smp;
+ if (smp->slave_csrk) {
+ smp->slave_csrk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->slave_csrk->bdaddr, &hcon->dst);
+ mgmt_new_csrk(hdev, smp->slave_csrk, persistent);
+ }
- hci_conn_hold(conn->hcon);
+ if (smp->ltk) {
+ smp->ltk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->ltk->bdaddr, &hcon->dst);
+ mgmt_new_ltk(hdev, smp->ltk, persistent);
+ }
- return smp;
+ if (smp->slave_ltk) {
+ smp->slave_ltk->bdaddr_type = hcon->dst_type;
+ bacpy(&smp->slave_ltk->bdaddr, &hcon->dst);
+ mgmt_new_ltk(hdev, smp->slave_ltk, persistent);
+ }
}
-void smp_chan_destroy(struct l2cap_conn *conn)
+static void smp_allow_key_dist(struct smp_chan *smp)
{
- struct smp_chan *smp = conn->smp_chan;
- bool complete;
+ /* Allow the first expected phase 3 PDU. The rest of the PDUs
+ * will be allowed in each PDU handler to ensure we receive
+ * them in the correct order.
+ */
+ if (smp->remote_key_dist & SMP_DIST_ENC_KEY)
+ SMP_ALLOW_CMD(smp, SMP_CMD_ENCRYPT_INFO);
+ else if (smp->remote_key_dist & SMP_DIST_ID_KEY)
+ SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_INFO);
+ else if (smp->remote_key_dist & SMP_DIST_SIGN)
+ SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO);
+}
- BUG_ON(!smp);
+static void smp_distribute_keys(struct smp_chan *smp)
+{
+ struct smp_cmd_pairing *req, *rsp;
+ struct l2cap_conn *conn = smp->conn;
+ struct hci_conn *hcon = conn->hcon;
+ struct hci_dev *hdev = hcon->hdev;
+ __u8 *keydist;
- complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags);
- mgmt_smp_complete(conn->hcon, complete);
+ BT_DBG("conn %p", conn);
- kfree(smp->csrk);
- kfree(smp->slave_csrk);
+ rsp = (void *) &smp->prsp[1];
- crypto_free_blkcipher(smp->tfm_aes);
+ /* The responder sends its keys first */
+ if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) {
+ smp_allow_key_dist(smp);
+ return;
+ }
- /* If pairing failed clean up any keys we might have */
- if (!complete) {
- if (smp->ltk) {
- list_del(&smp->ltk->list);
- kfree(smp->ltk);
- }
+ req = (void *) &smp->preq[1];
- if (smp->slave_ltk) {
- list_del(&smp->slave_ltk->list);
- kfree(smp->slave_ltk);
- }
+ if (hcon->out) {
+ keydist = &rsp->init_key_dist;
+ *keydist &= req->init_key_dist;
+ } else {
+ keydist = &rsp->resp_key_dist;
+ *keydist &= req->resp_key_dist;
+ }
- if (smp->remote_irk) {
- list_del(&smp->remote_irk->list);
- kfree(smp->remote_irk);
+ BT_DBG("keydist 0x%x", *keydist);
+
+ if (*keydist & SMP_DIST_ENC_KEY) {
+ struct smp_cmd_encrypt_info enc;
+ struct smp_cmd_master_ident ident;
+ struct smp_ltk *ltk;
+ u8 authenticated;
+ __le16 ediv;
+ __le64 rand;
+
+ get_random_bytes(enc.ltk, sizeof(enc.ltk));
+ get_random_bytes(&ediv, sizeof(ediv));
+ get_random_bytes(&rand, sizeof(rand));
+
+ smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
+
+ authenticated = hcon->sec_level == BT_SECURITY_HIGH;
+ ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type,
+ SMP_LTK_SLAVE, authenticated, enc.ltk,
+ smp->enc_key_size, ediv, rand);
+ smp->slave_ltk = ltk;
+
+ ident.ediv = ediv;
+ ident.rand = rand;
+
+ smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
+
+ *keydist &= ~SMP_DIST_ENC_KEY;
+ }
+
+ if (*keydist & SMP_DIST_ID_KEY) {
+ struct smp_cmd_ident_addr_info addrinfo;
+ struct smp_cmd_ident_info idinfo;
+
+ memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk));
+
+ smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);
+
+ /* The hci_conn contains the local identity address
+ * after the connection has been established.
+ *
+ * This is true even when the connection has been
+ * established using a resolvable random address.
+ */
+ bacpy(&addrinfo.bdaddr, &hcon->src);
+ addrinfo.addr_type = hcon->src_type;
+
+ smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
+ &addrinfo);
+
+ *keydist &= ~SMP_DIST_ID_KEY;
+ }
+
+ if (*keydist & SMP_DIST_SIGN) {
+ struct smp_cmd_sign_info sign;
+ struct smp_csrk *csrk;
+
+ /* Generate a new random key */
+ get_random_bytes(sign.csrk, sizeof(sign.csrk));
+
+ csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
+ if (csrk) {
+ csrk->master = 0x00;
+ memcpy(csrk->val, sign.csrk, sizeof(csrk->val));
}
+ smp->slave_csrk = csrk;
+
+ smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
+
+ *keydist &= ~SMP_DIST_SIGN;
}
- kfree(smp);
- conn->smp_chan = NULL;
- hci_conn_drop(conn->hcon);
+ /* If there are still keys to be received wait for them */
+ if (smp->remote_key_dist & KEY_DIST_MASK) {
+ smp_allow_key_dist(smp);
+ return;
+ }
+
+ set_bit(SMP_FLAG_COMPLETE, &smp->flags);
+ smp_notify_keys(conn);
+
+ smp_chan_destroy(conn);
+}
+
+static void smp_timeout(struct work_struct *work)
+{
+ struct smp_chan *smp = container_of(work, struct smp_chan,
+ security_timer.work);
+ struct l2cap_conn *conn = smp->conn;
+
+ BT_DBG("conn %p", conn);
+
+ hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM);
+}
+
+static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
+{
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp;
+
+ smp = kzalloc(sizeof(*smp), GFP_ATOMIC);
+ if (!smp)
+ return NULL;
+
+ smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(smp->tfm_aes)) {
+ BT_ERR("Unable to create ECB crypto context");
+ kfree(smp);
+ return NULL;
+ }
+
+ smp->conn = conn;
+ chan->data = smp;
+
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_FAIL);
+
+ INIT_DELAYED_WORK(&smp->security_timer, smp_timeout);
+
+ hci_conn_hold(conn->hcon);
+
+ return smp;
}
int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_chan *chan;
struct smp_chan *smp;
u32 value;
+ int err;
BT_DBG("");
- if (!conn || !test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
+ if (!conn)
+ return -ENOTCONN;
+
+ chan = conn->smp;
+ if (!chan)
return -ENOTCONN;
- smp = conn->smp_chan;
+ l2cap_chan_lock(chan);
+ if (!chan->data) {
+ err = -ENOTCONN;
+ goto unlock;
+ }
+
+ smp = chan->data;
switch (mgmt_op) {
case MGMT_OP_USER_PASSKEY_REPLY:
@@ -664,12 +904,16 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
case MGMT_OP_USER_PASSKEY_NEG_REPLY:
case MGMT_OP_USER_CONFIRM_NEG_REPLY:
smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED);
- return 0;
+ err = 0;
+ goto unlock;
default:
smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED);
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto unlock;
}
+ err = 0;
+
/* If it is our turn to send Pairing Confirm, do so now */
if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) {
u8 rsp = smp_confirm(smp);
@@ -677,12 +921,15 @@ int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey)
smp_failure(conn, rsp);
}
- return 0;
+unlock:
+ l2cap_chan_unlock(chan);
+ return err;
}
static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing rsp, *req = (void *) skb->data;
+ struct l2cap_chan *chan = conn->smp;
struct hci_dev *hdev = conn->hcon->hdev;
struct smp_chan *smp;
u8 key_size, auth, sec_level;
@@ -696,26 +943,30 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (conn->hcon->role != HCI_ROLE_SLAVE)
return SMP_CMD_NOTSUPP;
- if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags))
+ if (!chan->data)
smp = smp_chan_create(conn);
else
- smp = conn->smp_chan;
+ smp = chan->data;
if (!smp)
return SMP_UNSPECIFIED;
+ /* We didn't start the pairing, so match remote */
+ auth = req->auth_req & AUTH_REQ_MASK;
+
if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
- (req->auth_req & SMP_AUTH_BONDING))
+ (auth & SMP_AUTH_BONDING))
return SMP_PAIRING_NOTSUPP;
smp->preq[0] = SMP_CMD_PAIRING_REQ;
memcpy(&smp->preq[1], req, sizeof(*req));
skb_pull(skb, sizeof(*req));
- /* We didn't start the pairing, so match remote */
- auth = req->auth_req;
+ if (conn->hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
+ sec_level = BT_SECURITY_MEDIUM;
+ else
+ sec_level = authreq_to_seclevel(auth);
- sec_level = authreq_to_seclevel(auth);
if (sec_level > conn->hcon->pending_sec_level)
conn->hcon->pending_sec_level = sec_level;
@@ -741,6 +992,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(&smp->prsp[1], &rsp, sizeof(rsp));
smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM);
/* Request setup of TK */
ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability);
@@ -753,8 +1005,9 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
- u8 key_size, auth = SMP_AUTH_NONE;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
+ u8 key_size, auth;
int ret;
BT_DBG("conn %p", conn);
@@ -773,6 +1026,8 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
if (check_enc_key_size(conn, key_size))
return SMP_ENC_KEY_SIZE;
+ auth = rsp->auth_req & AUTH_REQ_MASK;
+
/* If we need MITM check that it can be acheived */
if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
u8 method;
@@ -793,11 +1048,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
*/
smp->remote_key_dist &= rsp->resp_key_dist;
- if ((req->auth_req & SMP_AUTH_BONDING) &&
- (rsp->auth_req & SMP_AUTH_BONDING))
- auth = SMP_AUTH_BONDING;
-
- auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM;
+ auth |= req->auth_req;
ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability);
if (ret)
@@ -814,7 +1065,8 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
@@ -824,10 +1076,14 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf));
skb_pull(skb, sizeof(smp->pcnf));
- if (conn->hcon->out)
+ if (conn->hcon->out) {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
smp->prnd);
- else if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM);
+ return 0;
+ }
+
+ if (test_bit(SMP_FLAG_TK_VALID, &smp->flags))
return smp_confirm(smp);
else
set_bit(SMP_FLAG_CFM_PENDING, &smp->flags);
@@ -837,7 +1093,8 @@ static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
{
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
BT_DBG("conn %p", conn);
@@ -860,7 +1117,7 @@ static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level)
if (!key)
return false;
- if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated)
+ if (smp_ltk_sec_level(key) < sec_level)
return false;
if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags))
@@ -903,7 +1160,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
struct smp_cmd_pairing cp;
struct hci_conn *hcon = conn->hcon;
struct smp_chan *smp;
- u8 sec_level;
+ u8 sec_level, auth;
BT_DBG("conn %p", conn);
@@ -913,7 +1170,13 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (hcon->role != HCI_ROLE_MASTER)
return SMP_CMD_NOTSUPP;
- sec_level = authreq_to_seclevel(rp->auth_req);
+ auth = rp->auth_req & AUTH_REQ_MASK;
+
+ if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT)
+ sec_level = BT_SECURITY_MEDIUM;
+ else
+ sec_level = authreq_to_seclevel(auth);
+
if (smp_sufficient_security(hcon, sec_level))
return 0;
@@ -923,26 +1186,24 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
return 0;
- if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
- return 0;
-
smp = smp_chan_create(conn);
if (!smp)
return SMP_UNSPECIFIED;
if (!test_bit(HCI_BONDABLE, &hcon->hdev->dev_flags) &&
- (rp->auth_req & SMP_AUTH_BONDING))
+ (auth & SMP_AUTH_BONDING))
return SMP_PAIRING_NOTSUPP;
skb_pull(skb, sizeof(*rp));
memset(&cp, 0, sizeof(cp));
- build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
+ build_pairing_cmd(conn, &cp, NULL, auth);
smp->preq[0] = SMP_CMD_PAIRING_REQ;
memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
return 0;
}
@@ -950,8 +1211,10 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
{
struct l2cap_conn *conn = hcon->l2cap_data;
+ struct l2cap_chan *chan;
struct smp_chan *smp;
__u8 authreq;
+ int ret;
BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
@@ -959,6 +1222,8 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
if (!conn)
return 1;
+ chan = conn->smp;
+
if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags))
return 1;
@@ -972,12 +1237,19 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
return 0;
- if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
- return 0;
+ l2cap_chan_lock(chan);
+
+ /* If SMP is already in progress ignore this request */
+ if (chan->data) {
+ ret = 0;
+ goto unlock;
+ }
smp = smp_chan_create(conn);
- if (!smp)
- return 1;
+ if (!smp) {
+ ret = 1;
+ goto unlock;
+ }
authreq = seclevel_to_authreq(sec_level);
@@ -996,30 +1268,34 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
memcpy(&smp->preq[1], &cp, sizeof(cp));
smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP);
} else {
struct smp_cmd_security_req cp;
cp.auth_req = authreq;
smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
+ SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ);
}
set_bit(SMP_FLAG_INITIATOR, &smp->flags);
+ ret = 0;
- return 0;
+unlock:
+ l2cap_chan_unlock(chan);
+ return ret;
}
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_encrypt_info *rp = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
BT_DBG("conn %p", conn);
if (skb->len < sizeof(*rp))
return SMP_INVALID_PARAMS;
- /* Ignore this PDU if it wasn't requested */
- if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
- return 0;
+ SMP_ALLOW_CMD(smp, SMP_CMD_MASTER_IDENT);
skb_pull(skb, sizeof(*rp));
@@ -1031,7 +1307,8 @@ static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_master_ident *rp = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
struct hci_dev *hdev = conn->hcon->hdev;
struct hci_conn *hcon = conn->hcon;
struct smp_ltk *ltk;
@@ -1042,13 +1319,14 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
if (skb->len < sizeof(*rp))
return SMP_INVALID_PARAMS;
- /* Ignore this PDU if it wasn't requested */
- if (!(smp->remote_key_dist & SMP_DIST_ENC_KEY))
- return 0;
-
/* Mark the information as received */
smp->remote_key_dist &= ~SMP_DIST_ENC_KEY;
+ if (smp->remote_key_dist & SMP_DIST_ID_KEY)
+ SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_INFO);
+ else if (smp->remote_key_dist & SMP_DIST_SIGN)
+ SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO);
+
skb_pull(skb, sizeof(*rp));
hci_dev_lock(hdev);
@@ -1057,8 +1335,8 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
authenticated, smp->tk, smp->enc_key_size,
rp->ediv, rp->rand);
smp->ltk = ltk;
- if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
- smp_distribute_keys(conn);
+ if (!(smp->remote_key_dist & KEY_DIST_MASK))
+ smp_distribute_keys(smp);
hci_dev_unlock(hdev);
return 0;
@@ -1067,16 +1345,15 @@ static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_ident_info *info = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
BT_DBG("");
if (skb->len < sizeof(*info))
return SMP_INVALID_PARAMS;
- /* Ignore this PDU if it wasn't requested */
- if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
- return 0;
+ SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_ADDR_INFO);
skb_pull(skb, sizeof(*info));
@@ -1089,7 +1366,8 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
struct sk_buff *skb)
{
struct smp_cmd_ident_addr_info *info = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
struct hci_conn *hcon = conn->hcon;
bdaddr_t rpa;
@@ -1098,13 +1376,12 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
if (skb->len < sizeof(*info))
return SMP_INVALID_PARAMS;
- /* Ignore this PDU if it wasn't requested */
- if (!(smp->remote_key_dist & SMP_DIST_ID_KEY))
- return 0;
-
/* Mark the information as received */
smp->remote_key_dist &= ~SMP_DIST_ID_KEY;
+ if (smp->remote_key_dist & SMP_DIST_SIGN)
+ SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO);
+
skb_pull(skb, sizeof(*info));
hci_dev_lock(hcon->hdev);
@@ -1133,7 +1410,8 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
smp->id_addr_type, smp->irk, &rpa);
distribute:
- smp_distribute_keys(conn);
+ if (!(smp->remote_key_dist & KEY_DIST_MASK))
+ smp_distribute_keys(smp);
hci_dev_unlock(hcon->hdev);
@@ -1143,7 +1421,8 @@ distribute:
static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
{
struct smp_cmd_sign_info *rp = (void *) skb->data;
- struct smp_chan *smp = conn->smp_chan;
+ struct l2cap_chan *chan = conn->smp;
+ struct smp_chan *smp = chan->data;
struct hci_dev *hdev = conn->hcon->hdev;
struct smp_csrk *csrk;
@@ -1152,10 +1431,6 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
if (skb->len < sizeof(*rp))
return SMP_INVALID_PARAMS;
- /* Ignore this PDU if it wasn't requested */
- if (!(smp->remote_key_dist & SMP_DIST_SIGN))
- return 0;
-
/* Mark the information as received */
smp->remote_key_dist &= ~SMP_DIST_SIGN;
@@ -1168,16 +1443,17 @@ static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb)
memcpy(csrk->val, rp->csrk, sizeof(csrk->val));
}
smp->csrk = csrk;
- if (!(smp->remote_key_dist & SMP_DIST_SIGN))
- smp_distribute_keys(conn);
+ smp_distribute_keys(smp);
hci_dev_unlock(hdev);
return 0;
}
-int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
+static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb)
{
+ struct l2cap_conn *conn = chan->conn;
struct hci_conn *hcon = conn->hcon;
+ struct smp_chan *smp;
__u8 code, reason;
int err = 0;
@@ -1186,13 +1462,10 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
return 0;
}
- if (skb->len < 1) {
- kfree_skb(skb);
+ if (skb->len < 1)
return -EILSEQ;
- }
if (!test_bit(HCI_LE_ENABLED, &hcon->hdev->dev_flags)) {
- err = -EOPNOTSUPP;
reason = SMP_PAIRING_NOTSUPP;
goto done;
}
@@ -1200,18 +1473,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
code = skb->data[0];
skb_pull(skb, sizeof(code));
- /*
- * The SMP context must be initialized for all other PDUs except
- * pairing and security requests. If we get any other PDU when
- * not initialized simply disconnect (done if this function
- * returns an error).
+ smp = chan->data;
+
+ if (code > SMP_CMD_MAX)
+ goto drop;
+
+ if (smp && !test_and_clear_bit(code, &smp->allow_cmd))
+ goto drop;
+
+ /* If we don't have a context the only allowed commands are
+ * pairing request and security request.
*/
- if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
- !conn->smp_chan) {
- BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
- kfree_skb(skb);
- return -EOPNOTSUPP;
- }
+ if (!smp && code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ)
+ goto drop;
switch (code) {
case SMP_CMD_PAIRING_REQ:
@@ -1220,7 +1494,6 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
case SMP_CMD_PAIRING_FAIL:
smp_failure(conn, 0);
- reason = 0;
err = -EPERM;
break;
@@ -1262,197 +1535,217 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
default:
BT_DBG("Unknown command code 0x%2.2x", code);
-
reason = SMP_CMD_NOTSUPP;
- err = -EOPNOTSUPP;
goto done;
}
done:
- if (reason)
- smp_failure(conn, reason);
+ if (!err) {
+ if (reason)
+ smp_failure(conn, reason);
+ kfree_skb(skb);
+ }
- kfree_skb(skb);
return err;
+
+drop:
+ BT_ERR("%s unexpected SMP command 0x%02x from %pMR", hcon->hdev->name,
+ code, &hcon->dst);
+ kfree_skb(skb);
+ return 0;
}
-static void smp_notify_keys(struct l2cap_conn *conn)
+static void smp_teardown_cb(struct l2cap_chan *chan, int err)
{
- struct smp_chan *smp = conn->smp_chan;
- struct hci_conn *hcon = conn->hcon;
- struct hci_dev *hdev = hcon->hdev;
- struct smp_cmd_pairing *req = (void *) &smp->preq[1];
- struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1];
- bool persistent;
+ struct l2cap_conn *conn = chan->conn;
- if (smp->remote_irk) {
- mgmt_new_irk(hdev, smp->remote_irk);
- /* Now that user space can be considered to know the
- * identity address track the connection based on it
- * from now on.
- */
- bacpy(&hcon->dst, &smp->remote_irk->bdaddr);
- hcon->dst_type = smp->remote_irk->addr_type;
- l2cap_conn_update_id_addr(hcon);
+ BT_DBG("chan %p", chan);
- /* When receiving an indentity resolving key for
- * a remote device that does not use a resolvable
- * private address, just remove the key so that
- * it is possible to use the controller white
- * list for scanning.
- *
- * Userspace will have been told to not store
- * this key at this point. So it is safe to
- * just remove it.
- */
- if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
- list_del(&smp->remote_irk->list);
- kfree(smp->remote_irk);
- smp->remote_irk = NULL;
- }
- }
+ if (chan->data)
+ smp_chan_destroy(conn);
- /* The LTKs and CSRKs should be persistent only if both sides
- * had the bonding bit set in their authentication requests.
- */
- persistent = !!((req->auth_req & rsp->auth_req) & SMP_AUTH_BONDING);
+ conn->smp = NULL;
+ l2cap_chan_put(chan);
+}
- if (smp->csrk) {
- smp->csrk->bdaddr_type = hcon->dst_type;
- bacpy(&smp->csrk->bdaddr, &hcon->dst);
- mgmt_new_csrk(hdev, smp->csrk, persistent);
- }
+static void smp_resume_cb(struct l2cap_chan *chan)
+{
+ struct smp_chan *smp = chan->data;
+ struct l2cap_conn *conn = chan->conn;
+ struct hci_conn *hcon = conn->hcon;
- if (smp->slave_csrk) {
- smp->slave_csrk->bdaddr_type = hcon->dst_type;
- bacpy(&smp->slave_csrk->bdaddr, &hcon->dst);
- mgmt_new_csrk(hdev, smp->slave_csrk, persistent);
- }
+ BT_DBG("chan %p", chan);
- if (smp->ltk) {
- smp->ltk->bdaddr_type = hcon->dst_type;
- bacpy(&smp->ltk->bdaddr, &hcon->dst);
- mgmt_new_ltk(hdev, smp->ltk, persistent);
- }
+ if (!smp)
+ return;
- if (smp->slave_ltk) {
- smp->slave_ltk->bdaddr_type = hcon->dst_type;
- bacpy(&smp->slave_ltk->bdaddr, &hcon->dst);
- mgmt_new_ltk(hdev, smp->slave_ltk, persistent);
- }
+ if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags))
+ return;
+
+ cancel_delayed_work(&smp->security_timer);
+
+ smp_distribute_keys(smp);
}
-int smp_distribute_keys(struct l2cap_conn *conn)
+static void smp_ready_cb(struct l2cap_chan *chan)
{
- struct smp_cmd_pairing *req, *rsp;
- struct smp_chan *smp = conn->smp_chan;
- struct hci_conn *hcon = conn->hcon;
- struct hci_dev *hdev = hcon->hdev;
- __u8 *keydist;
+ struct l2cap_conn *conn = chan->conn;
- BT_DBG("conn %p", conn);
+ BT_DBG("chan %p", chan);
- if (!test_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
- return 0;
+ conn->smp = chan;
+ l2cap_chan_hold(chan);
+}
- rsp = (void *) &smp->prsp[1];
+static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ int err;
- /* The responder sends its keys first */
- if (hcon->out && (smp->remote_key_dist & 0x07))
- return 0;
+ BT_DBG("chan %p", chan);
- req = (void *) &smp->preq[1];
+ err = smp_sig_channel(chan, skb);
+ if (err) {
+ struct smp_chan *smp = chan->data;
- if (hcon->out) {
- keydist = &rsp->init_key_dist;
- *keydist &= req->init_key_dist;
- } else {
- keydist = &rsp->resp_key_dist;
- *keydist &= req->resp_key_dist;
+ if (smp)
+ cancel_delayed_work_sync(&smp->security_timer);
+
+ hci_disconnect(chan->conn->hcon, HCI_ERROR_AUTH_FAILURE);
}
- BT_DBG("keydist 0x%x", *keydist);
+ return err;
+}
- if (*keydist & SMP_DIST_ENC_KEY) {
- struct smp_cmd_encrypt_info enc;
- struct smp_cmd_master_ident ident;
- struct smp_ltk *ltk;
- u8 authenticated;
- __le16 ediv;
- __le64 rand;
+static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan,
+ unsigned long hdr_len,
+ unsigned long len, int nb)
+{
+ struct sk_buff *skb;
- get_random_bytes(enc.ltk, sizeof(enc.ltk));
- get_random_bytes(&ediv, sizeof(ediv));
- get_random_bytes(&rand, sizeof(rand));
+ skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
- smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
+ skb->priority = HCI_PRIO_MAX;
+ bt_cb(skb)->chan = chan;
- authenticated = hcon->sec_level == BT_SECURITY_HIGH;
- ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type,
- SMP_LTK_SLAVE, authenticated, enc.ltk,
- smp->enc_key_size, ediv, rand);
- smp->slave_ltk = ltk;
+ return skb;
+}
- ident.ediv = ediv;
- ident.rand = rand;
+static const struct l2cap_ops smp_chan_ops = {
+ .name = "Security Manager",
+ .ready = smp_ready_cb,
+ .recv = smp_recv_cb,
+ .alloc_skb = smp_alloc_skb_cb,
+ .teardown = smp_teardown_cb,
+ .resume = smp_resume_cb,
+
+ .new_connection = l2cap_chan_no_new_connection,
+ .state_change = l2cap_chan_no_state_change,
+ .close = l2cap_chan_no_close,
+ .defer = l2cap_chan_no_defer,
+ .suspend = l2cap_chan_no_suspend,
+ .set_shutdown = l2cap_chan_no_set_shutdown,
+ .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
+ .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
+};
- smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
+static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan)
+{
+ struct l2cap_chan *chan;
- *keydist &= ~SMP_DIST_ENC_KEY;
- }
+ BT_DBG("pchan %p", pchan);
- if (*keydist & SMP_DIST_ID_KEY) {
- struct smp_cmd_ident_addr_info addrinfo;
- struct smp_cmd_ident_info idinfo;
+ chan = l2cap_chan_create();
+ if (!chan)
+ return NULL;
- memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk));
+ chan->chan_type = pchan->chan_type;
+ chan->ops = &smp_chan_ops;
+ chan->scid = pchan->scid;
+ chan->dcid = chan->scid;
+ chan->imtu = pchan->imtu;
+ chan->omtu = pchan->omtu;
+ chan->mode = pchan->mode;
- smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);
+ BT_DBG("created chan %p", chan);
- /* The hci_conn contains the local identity address
- * after the connection has been established.
- *
- * This is true even when the connection has been
- * established using a resolvable random address.
- */
- bacpy(&addrinfo.bdaddr, &hcon->src);
- addrinfo.addr_type = hcon->src_type;
+ return chan;
+}
- smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
- &addrinfo);
+static const struct l2cap_ops smp_root_chan_ops = {
+ .name = "Security Manager Root",
+ .new_connection = smp_new_conn_cb,
+
+ /* None of these are implemented for the root channel */
+ .close = l2cap_chan_no_close,
+ .alloc_skb = l2cap_chan_no_alloc_skb,
+ .recv = l2cap_chan_no_recv,
+ .state_change = l2cap_chan_no_state_change,
+ .teardown = l2cap_chan_no_teardown,
+ .ready = l2cap_chan_no_ready,
+ .defer = l2cap_chan_no_defer,
+ .suspend = l2cap_chan_no_suspend,
+ .resume = l2cap_chan_no_resume,
+ .set_shutdown = l2cap_chan_no_set_shutdown,
+ .get_sndtimeo = l2cap_chan_no_get_sndtimeo,
+ .memcpy_fromiovec = l2cap_chan_no_memcpy_fromiovec,
+};
- *keydist &= ~SMP_DIST_ID_KEY;
- }
+int smp_register(struct hci_dev *hdev)
+{
+ struct l2cap_chan *chan;
+ struct crypto_blkcipher *tfm_aes;
- if (*keydist & SMP_DIST_SIGN) {
- struct smp_cmd_sign_info sign;
- struct smp_csrk *csrk;
+ BT_DBG("%s", hdev->name);
- /* Generate a new random key */
- get_random_bytes(sign.csrk, sizeof(sign.csrk));
+ tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm_aes)) {
+ int err = PTR_ERR(tfm_aes);
+ BT_ERR("Unable to create crypto context");
+ return err;
+ }
- csrk = kzalloc(sizeof(*csrk), GFP_KERNEL);
- if (csrk) {
- csrk->master = 0x00;
- memcpy(csrk->val, sign.csrk, sizeof(csrk->val));
- }
- smp->slave_csrk = csrk;
+ chan = l2cap_chan_create();
+ if (!chan) {
+ crypto_free_blkcipher(tfm_aes);
+ return -ENOMEM;
+ }
- smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
+ chan->data = tfm_aes;
- *keydist &= ~SMP_DIST_SIGN;
- }
+ l2cap_add_scid(chan, L2CAP_CID_SMP);
- /* If there are still keys to be received wait for them */
- if ((smp->remote_key_dist & 0x07))
- return 0;
+ l2cap_chan_set_defaults(chan);
- clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags);
- cancel_delayed_work_sync(&conn->security_timer);
- set_bit(SMP_FLAG_COMPLETE, &smp->flags);
- smp_notify_keys(conn);
+ bacpy(&chan->src, &hdev->bdaddr);
+ chan->src_type = BDADDR_LE_PUBLIC;
+ chan->state = BT_LISTEN;
+ chan->mode = L2CAP_MODE_BASIC;
+ chan->imtu = L2CAP_DEFAULT_MTU;
+ chan->ops = &smp_root_chan_ops;
- smp_chan_destroy(conn);
+ hdev->smp_data = chan;
return 0;
}
+
+void smp_unregister(struct hci_dev *hdev)
+{
+ struct l2cap_chan *chan = hdev->smp_data;
+ struct crypto_blkcipher *tfm_aes;
+
+ if (!chan)
+ return;
+
+ BT_DBG("%s chan %p", hdev->name, chan);
+
+ tfm_aes = chan->data;
+ if (tfm_aes) {
+ chan->data = NULL;
+ crypto_free_blkcipher(tfm_aes);
+ }
+
+ hdev->smp_data = NULL;
+ l2cap_chan_put(chan);
+}
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 796f4f45f92f..86a683a8b491 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -102,6 +102,8 @@ struct smp_cmd_security_req {
__u8 auth_req;
} __packed;
+#define SMP_CMD_MAX 0x0b
+
#define SMP_PASSKEY_ENTRY_FAILED 0x01
#define SMP_OOB_NOT_AVAIL 0x02
#define SMP_AUTH_REQUIREMENTS 0x03
@@ -123,17 +125,23 @@ enum {
SMP_LTK_SLAVE,
};
+static inline u8 smp_ltk_sec_level(struct smp_ltk *key)
+{
+ if (key->authenticated)
+ return BT_SECURITY_HIGH;
+
+ return BT_SECURITY_MEDIUM;
+}
+
/* SMP Commands */
bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level);
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
-int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb);
-int smp_distribute_keys(struct l2cap_conn *conn);
int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey);
-void smp_chan_destroy(struct l2cap_conn *conn);
+bool smp_irk_matches(struct hci_dev *hdev, u8 irk[16], bdaddr_t *bdaddr);
+int smp_generate_rpa(struct hci_dev *hdev, u8 irk[16], bdaddr_t *rpa);
-bool smp_irk_matches(struct crypto_blkcipher *tfm, u8 irk[16],
- bdaddr_t *bdaddr);
-int smp_generate_rpa(struct crypto_blkcipher *tfm, u8 irk[16], bdaddr_t *rpa);
+int smp_register(struct hci_dev *hdev);
+void smp_unregister(struct hci_dev *hdev);
#endif /* __SMP_H */
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index 8590b942bffa..fd7ee03c59b3 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -10,7 +10,9 @@ bridge-y := br.o br_device.o br_fdb.o br_forward.o br_if.o br_input.o \
bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
-bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
+bridge-$(subst m,y,$(CONFIG_BRIDGE_NETFILTER)) += br_nf_core.o
+
+obj-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o br_mdb.o
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 1a755a1e5410..44425aff7cba 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -161,7 +161,7 @@ static int __init br_init(void)
if (err)
goto err_out1;
- err = br_netfilter_init();
+ err = br_nf_core_init();
if (err)
goto err_out2;
@@ -179,11 +179,16 @@ static int __init br_init(void)
br_fdb_test_addr_hook = br_fdb_test_addr;
#endif
+ pr_info("bridge: automatic filtering via arp/ip/ip6tables has been "
+ "deprecated. Update your scripts to load br_netfilter if you "
+ "need this.\n");
+
return 0;
+
err_out4:
unregister_netdevice_notifier(&br_device_notifier);
err_out3:
- br_netfilter_fini();
+ br_nf_core_fini();
err_out2:
unregister_pernet_subsys(&br_net_ops);
err_out1:
@@ -196,20 +201,17 @@ err_out:
static void __exit br_deinit(void)
{
stp_proto_unregister(&br_stp_proto);
-
br_netlink_fini();
unregister_netdevice_notifier(&br_device_notifier);
brioctl_set(NULL);
-
unregister_pernet_subsys(&br_net_ops);
rcu_barrier(); /* Wait for completion of call_rcu()'s */
- br_netfilter_fini();
+ br_nf_core_fini();
#if IS_ENABLED(CONFIG_ATM_LANE)
br_fdb_test_addr_hook = NULL;
#endif
-
br_fdb_fini();
}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 568cccd39a3d..ffd379db5938 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -36,7 +36,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
u16 vid = 0;
rcu_read_lock();
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
br_nf_pre_routing_finish_bridge_slow(skb);
rcu_read_unlock();
@@ -88,12 +88,17 @@ out:
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
+ int err;
br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!br->stats)
return -ENOMEM;
- return 0;
+ err = br_vlan_init(br);
+ if (err)
+ free_percpu(br->stats);
+
+ return err;
}
static int br_dev_open(struct net_device *dev)
@@ -167,7 +172,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* remember the MTU in the rtable for PMTU */
dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
#endif
@@ -389,5 +394,4 @@ void br_dev_setup(struct net_device *dev)
br_netfilter_rtable_init(br);
br_stp_timer_init(br);
br_multicast_init(br);
- br_vlan_init(br);
}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 056b67b0e277..992ec49a96aa 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -49,6 +49,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
return 0;
}
+EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
int br_forward_finish(struct sk_buff *skb)
{
@@ -56,6 +57,7 @@ int br_forward_finish(struct sk_buff *skb)
br_dev_queue_push_xmit);
}
+EXPORT_SYMBOL_GPL(br_forward_finish);
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 078d336a1f37..ed307db7a12b 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -252,12 +252,12 @@ static void del_nbp(struct net_bridge_port *p)
br_fdb_delete_by_port(br, p, 1);
nbp_update_port_count(br);
+ netdev_upper_dev_unlink(dev, br->dev);
+
dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev);
- netdev_upper_dev_unlink(dev, br->dev);
-
br_multicast_del_port(p);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
@@ -332,7 +332,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
p->port_no = index;
p->flags = BR_LEARNING | BR_FLOOD;
br_init_port(p);
- p->state = BR_STATE_DISABLED;
+ br_set_state(p, BR_STATE_DISABLED);
br_stp_port_timer_init(p);
br_multicast_add_port(p);
@@ -476,16 +476,16 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err3;
- err = netdev_master_upper_dev_link(dev, br->dev);
+ err = netdev_rx_handler_register(dev, br_handle_frame, p);
if (err)
goto err4;
- err = netdev_rx_handler_register(dev, br_handle_frame, p);
+ dev->priv_flags |= IFF_BRIDGE_PORT;
+
+ err = netdev_master_upper_dev_link(dev, br->dev);
if (err)
goto err5;
- dev->priv_flags |= IFF_BRIDGE_PORT;
-
dev_disable_lro(dev);
list_add_rcu(&p->list, &br->port_list);
@@ -500,6 +500,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (br_fdb_insert(br, p, dev->dev_addr, 0))
netdev_err(dev, "failed insert local address bridge forwarding table\n");
+ if (nbp_vlan_init(p))
+ netdev_err(dev, "failed to initialize vlan filtering on this port\n");
+
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
@@ -520,7 +523,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
return 0;
err5:
- netdev_upper_dev_unlink(dev, br->dev);
+ dev->priv_flags &= ~IFF_BRIDGE_PORT;
+ netdev_rx_handler_unregister(dev);
err4:
br_netpoll_disable(p);
err3:
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 366c43649079..6fd5522df696 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -140,6 +140,7 @@ drop:
kfree_skb(skb);
goto out;
}
+EXPORT_SYMBOL_GPL(br_handle_frame_finish);
/* note: already called with rcu_read_lock */
static int br_handle_local_finish(struct sk_buff *skb)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 7751c92c8c57..648d79ccf462 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1822,7 +1822,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
if (query->startup_sent < br->multicast_startup_query_count)
query->startup_sent++;
- rcu_assign_pointer(querier, NULL);
+ RCU_INIT_POINTER(querier, NULL);
br_multicast_send_query(br, NULL, query);
spin_unlock(&br->multicast_lock);
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 4063898cf8aa..1bada53bb195 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -111,66 +111,6 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
pppoe_proto(skb) == htons(PPP_IPV6) && \
brnf_filter_pppoe_tagged)
-static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu)
-{
-}
-
-static void fake_redirect(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb)
-{
-}
-
-static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
-{
- return NULL;
-}
-
-static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
- struct sk_buff *skb,
- const void *daddr)
-{
- return NULL;
-}
-
-static unsigned int fake_mtu(const struct dst_entry *dst)
-{
- return dst->dev->mtu;
-}
-
-static struct dst_ops fake_dst_ops = {
- .family = AF_INET,
- .protocol = cpu_to_be16(ETH_P_IP),
- .update_pmtu = fake_update_pmtu,
- .redirect = fake_redirect,
- .cow_metrics = fake_cow_metrics,
- .neigh_lookup = fake_neigh_lookup,
- .mtu = fake_mtu,
-};
-
-/*
- * Initialize bogus route table used to keep netfilter happy.
- * Currently, we fill in the PMTU entry because netfilter
- * refragmentation needs it, and the rt_flags entry because
- * ipt_REJECT needs it. Future netfilter modules might
- * require us to fill additional fields.
- */
-static const u32 br_dst_default_metrics[RTAX_MAX] = {
- [RTAX_MTU - 1] = 1500,
-};
-
-void br_netfilter_rtable_init(struct net_bridge *br)
-{
- struct rtable *rt = &br->fake_rtable;
-
- atomic_set(&rt->dst.__refcnt, 1);
- rt->dst.dev = br->dev;
- rt->dst.path = &rt->dst;
- dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
- rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE;
- rt->dst.ops = &fake_dst_ops;
-}
-
static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
{
struct net_bridge_port *port;
@@ -245,14 +185,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
skb->nf_bridge->data, header_size);
}
-static inline void nf_bridge_update_protocol(struct sk_buff *skb)
-{
- if (skb->nf_bridge->mask & BRNF_8021Q)
- skb->protocol = htons(ETH_P_8021Q);
- else if (skb->nf_bridge->mask & BRNF_PPPoE)
- skb->protocol = htons(ETH_P_PPP_SES);
-}
-
/* When handing a packet over to the IP layer
* check whether we have a skb that is in the
* expected format
@@ -320,26 +252,6 @@ drop:
return -1;
}
-/* Fill in the header for fragmented IP packets handled by
- * the IPv4 connection tracking code.
- */
-int nf_bridge_copy_header(struct sk_buff *skb)
-{
- int err;
- unsigned int header_size;
-
- nf_bridge_update_protocol(skb);
- header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
- err = skb_cow_head(skb, header_size);
- if (err)
- return err;
-
- skb_copy_to_linear_data_offset(skb, -header_size,
- skb->nf_bridge->data, header_size);
- __skb_push(skb, nf_bridge_encap_header_len(skb));
- return 0;
-}
-
/* PF_BRIDGE/PRE_ROUTING *********************************************/
/* Undo the changes made for ip6tables PREROUTING and continue the
* bridge PRE_ROUTING hook. */
@@ -955,6 +867,11 @@ static unsigned int ip_sabotage_in(const struct nf_hook_ops *ops,
return NF_ACCEPT;
}
+void br_netfilter_enable(void)
+{
+}
+EXPORT_SYMBOL_GPL(br_netfilter_enable);
+
/* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
* br_dev_queue_push_xmit is called afterwards */
static struct nf_hook_ops br_nf_ops[] __read_mostly = {
@@ -1070,38 +987,42 @@ static struct ctl_table brnf_table[] = {
};
#endif
-int __init br_netfilter_init(void)
+static int __init br_netfilter_init(void)
{
int ret;
- ret = dst_entries_init(&fake_dst_ops);
+ ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
if (ret < 0)
return ret;
- ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
- if (ret < 0) {
- dst_entries_destroy(&fake_dst_ops);
- return ret;
- }
#ifdef CONFIG_SYSCTL
brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
if (brnf_sysctl_header == NULL) {
printk(KERN_WARNING
"br_netfilter: can't register to sysctl.\n");
- nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
- dst_entries_destroy(&fake_dst_ops);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err1;
}
#endif
printk(KERN_NOTICE "Bridge firewalling registered\n");
return 0;
+err1:
+ nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
+ return ret;
}
-void br_netfilter_fini(void)
+static void __exit br_netfilter_fini(void)
{
nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(brnf_sysctl_header);
#endif
- dst_entries_destroy(&fake_dst_ops);
}
+
+module_init(br_netfilter_init);
+module_exit(br_netfilter_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@gnu.org>");
+MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
+MODULE_DESCRIPTION("Linux ethernet netfilter firewall bridge");
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index cb5fcf62f663..2ff9706647f2 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -257,9 +257,6 @@ static int br_afspec(struct net_bridge *br,
} else
err = br_vlan_add(br, vinfo->vid, vinfo->flags);
- if (err)
- break;
-
break;
case RTM_DELLINK:
@@ -276,7 +273,7 @@ static int br_afspec(struct net_bridge *br,
return err;
}
-static const struct nla_policy ifla_brport_policy[IFLA_BRPORT_MAX + 1] = {
+static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_STATE] = { .type = NLA_U8 },
[IFLA_BRPORT_COST] = { .type = NLA_U32 },
[IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
@@ -304,7 +301,7 @@ static int br_set_port_state(struct net_bridge_port *p, u8 state)
(!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
return -ENETDOWN;
- p->state = state;
+ br_set_state(p, state);
br_log_state(p);
br_port_state_selection(p->br);
return 0;
@@ -382,7 +379,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
if (p && protinfo) {
if (protinfo->nla_type & NLA_F_NESTED) {
err = nla_parse_nested(tb, IFLA_BRPORT_MAX,
- protinfo, ifla_brport_policy);
+ protinfo, br_port_policy);
if (err)
return err;
@@ -461,6 +458,88 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
return register_netdevice(dev);
}
+static int br_port_slave_changelink(struct net_device *brdev,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ if (!data)
+ return 0;
+ return br_setport(br_port_get_rtnl(dev), data);
+}
+
+static int br_port_fill_slave_info(struct sk_buff *skb,
+ const struct net_device *brdev,
+ const struct net_device *dev)
+{
+ return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
+}
+
+static size_t br_port_get_slave_size(const struct net_device *brdev,
+ const struct net_device *dev)
+{
+ return br_port_info_size();
+}
+
+static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
+ [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
+ [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
+ [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
+};
+
+static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct net_bridge *br = netdev_priv(brdev);
+ int err;
+
+ if (!data)
+ return 0;
+
+ if (data[IFLA_BR_FORWARD_DELAY]) {
+ err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BR_HELLO_TIME]) {
+ err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
+ if (err)
+ return err;
+ }
+
+ if (data[IFLA_BR_MAX_AGE]) {
+ err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static size_t br_get_size(const struct net_device *brdev)
+{
+ return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
+ nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
+ 0;
+}
+
+static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
+{
+ struct net_bridge *br = netdev_priv(brdev);
+ u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
+ u32 hello_time = jiffies_to_clock_t(br->hello_time);
+ u32 age_time = jiffies_to_clock_t(br->max_age);
+
+ if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
+ nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
+ nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static size_t br_get_link_af_size(const struct net_device *dev)
{
struct net_port_vlans *pv;
@@ -485,12 +564,23 @@ static struct rtnl_af_ops br_af_ops = {
};
struct rtnl_link_ops br_link_ops __read_mostly = {
- .kind = "bridge",
- .priv_size = sizeof(struct net_bridge),
- .setup = br_dev_setup,
- .validate = br_validate,
- .newlink = br_dev_newlink,
- .dellink = br_dev_delete,
+ .kind = "bridge",
+ .priv_size = sizeof(struct net_bridge),
+ .setup = br_dev_setup,
+ .maxtype = IFLA_BRPORT_MAX,
+ .policy = br_policy,
+ .validate = br_validate,
+ .newlink = br_dev_newlink,
+ .changelink = br_changelink,
+ .dellink = br_dev_delete,
+ .get_size = br_get_size,
+ .fill_info = br_fill_info,
+
+ .slave_maxtype = IFLA_BRPORT_MAX,
+ .slave_policy = br_port_policy,
+ .slave_changelink = br_port_slave_changelink,
+ .get_slave_size = br_port_get_slave_size,
+ .fill_slave_info = br_port_fill_slave_info,
};
int __init br_netlink_init(void)
@@ -512,7 +602,7 @@ out_af:
return err;
}
-void __exit br_netlink_fini(void)
+void br_netlink_fini(void)
{
br_mdb_uninit();
rtnl_af_unregister(&br_af_ops);
diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c
new file mode 100644
index 000000000000..387cb3bd017c
--- /dev/null
+++ b/net/bridge/br_nf_core.c
@@ -0,0 +1,96 @@
+/*
+ * Handle firewalling core
+ * Linux ethernet bridge
+ *
+ * Authors:
+ * Lennert Buytenhek <buytenh@gnu.org>
+ * Bart De Schuymer <bdschuym@pandora.be>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Lennert dedicates this file to Kerstin Wurdinger.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/in_route.h>
+#include <linux/inetdevice.h>
+#include <net/route.h>
+
+#include "br_private.h"
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+#endif
+
+static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+}
+
+static void fake_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
+{
+}
+
+static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
+{
+ return NULL;
+}
+
+static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
+{
+ return NULL;
+}
+
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+ return dst->dev->mtu;
+}
+
+static struct dst_ops fake_dst_ops = {
+ .family = AF_INET,
+ .protocol = cpu_to_be16(ETH_P_IP),
+ .update_pmtu = fake_update_pmtu,
+ .redirect = fake_redirect,
+ .cow_metrics = fake_cow_metrics,
+ .neigh_lookup = fake_neigh_lookup,
+ .mtu = fake_mtu,
+};
+
+/*
+ * Initialize bogus route table used to keep netfilter happy.
+ * Currently, we fill in the PMTU entry because netfilter
+ * refragmentation needs it, and the rt_flags entry because
+ * ipt_REJECT needs it. Future netfilter modules might
+ * require us to fill additional fields.
+ */
+static const u32 br_dst_default_metrics[RTAX_MAX] = {
+ [RTAX_MTU - 1] = 1500,
+};
+
+void br_netfilter_rtable_init(struct net_bridge *br)
+{
+ struct rtable *rt = &br->fake_rtable;
+
+ atomic_set(&rt->dst.__refcnt, 1);
+ rt->dst.dev = br->dev;
+ rt->dst.path = &rt->dst;
+ dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
+ rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE;
+ rt->dst.ops = &fake_dst_ops;
+}
+
+int __init br_nf_core_init(void)
+{
+ return dst_entries_init(&fake_dst_ops);
+}
+
+void br_nf_core_fini(void)
+{
+ dst_entries_destroy(&fake_dst_ops);
+}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2398369c6dda..4d783d071305 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -221,7 +221,7 @@ struct net_bridge
struct pcpu_sw_netstats __percpu *stats;
spinlock_t hash_lock;
struct hlist_head hash[BR_HASH_SIZE];
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct rtable fake_rtable;
bool nf_call_iptables;
bool nf_call_ip6tables;
@@ -299,6 +299,7 @@ struct net_bridge
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
u8 vlan_enabled;
__be16 vlan_proto;
+ u16 default_pvid;
struct net_port_vlans __rcu *vlan_info;
#endif
};
@@ -608,11 +609,13 @@ bool br_vlan_find(struct net_bridge *br, u16 vid);
void br_recalculate_fwd_mask(struct net_bridge *br);
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int br_vlan_set_proto(struct net_bridge *br, unsigned long val);
-void br_vlan_init(struct net_bridge *br);
+int br_vlan_init(struct net_bridge *br);
+int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val);
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
void nbp_vlan_flush(struct net_bridge_port *port);
bool nbp_vlan_find(struct net_bridge_port *port, u16 vid);
+int nbp_vlan_init(struct net_bridge_port *port);
static inline struct net_port_vlans *br_get_vlan_info(
const struct net_bridge *br)
@@ -645,11 +648,11 @@ static inline int br_vlan_get_tag(const struct sk_buff *skb, u16 *vid)
static inline u16 br_get_pvid(const struct net_port_vlans *v)
{
- /* Return just the VID if it is set, or VLAN_N_VID (invalid vid) if
- * vid wasn't set
- */
+ if (!v)
+ return 0;
+
smp_rmb();
- return v->pvid ?: VLAN_N_VID;
+ return v->pvid;
}
static inline int br_vlan_enabled(struct net_bridge *br)
@@ -708,8 +711,9 @@ static inline void br_recalculate_fwd_mask(struct net_bridge *br)
{
}
-static inline void br_vlan_init(struct net_bridge *br)
+static inline int br_vlan_init(struct net_bridge *br)
{
+ return 0;
}
static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
@@ -742,13 +746,18 @@ static inline bool nbp_vlan_find(struct net_bridge_port *port, u16 vid)
return false;
}
+static inline int nbp_vlan_init(struct net_bridge_port *port)
+{
+ return 0;
+}
+
static inline u16 br_vlan_get_tag(const struct sk_buff *skb, u16 *tag)
{
return 0;
}
static inline u16 br_get_pvid(const struct net_port_vlans *v)
{
- return VLAN_N_VID; /* Returns invalid vid */
+ return 0;
}
static inline int br_vlan_enabled(struct net_bridge *br)
@@ -758,18 +767,19 @@ static inline int br_vlan_enabled(struct net_bridge *br)
#endif
/* br_netfilter.c */
-#ifdef CONFIG_BRIDGE_NETFILTER
-int br_netfilter_init(void);
-void br_netfilter_fini(void);
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+int br_nf_core_init(void);
+void br_nf_core_fini(void);
void br_netfilter_rtable_init(struct net_bridge *);
#else
-#define br_netfilter_init() (0)
-#define br_netfilter_fini() do { } while (0)
+static inline int br_nf_core_init(void) { return 0; }
+static inline void br_nf_core_fini(void) {}
#define br_netfilter_rtable_init(x)
#endif
/* br_stp.c */
void br_log_state(const struct net_bridge_port *p);
+void br_set_state(struct net_bridge_port *p, unsigned int state);
struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no);
void br_init_port(struct net_bridge_port *p);
void br_become_designated_port(struct net_bridge_port *p);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 3c86f0538cbb..2b047bcf42a4 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -36,6 +36,11 @@ void br_log_state(const struct net_bridge_port *p)
br_port_state_names[p->state]);
}
+void br_set_state(struct net_bridge_port *p, unsigned int state)
+{
+ p->state = state;
+}
+
/* called under bridge lock */
struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no)
{
@@ -107,7 +112,7 @@ static void br_root_port_block(const struct net_bridge *br,
br_notice(br, "port %u(%s) tried to become root port (blocked)",
(unsigned int) p->port_no, p->dev->name);
- p->state = BR_STATE_LISTENING;
+ br_set_state(p, BR_STATE_LISTENING);
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
@@ -387,7 +392,7 @@ static void br_make_blocking(struct net_bridge_port *p)
p->state == BR_STATE_LEARNING)
br_topology_change_detection(p->br);
- p->state = BR_STATE_BLOCKING;
+ br_set_state(p, BR_STATE_BLOCKING);
br_log_state(p);
br_ifinfo_notify(RTM_NEWLINK, p);
@@ -404,13 +409,13 @@ static void br_make_forwarding(struct net_bridge_port *p)
return;
if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) {
- p->state = BR_STATE_FORWARDING;
+ br_set_state(p, BR_STATE_FORWARDING);
br_topology_change_detection(br);
del_timer(&p->forward_delay_timer);
} else if (br->stp_enabled == BR_KERNEL_STP)
- p->state = BR_STATE_LISTENING;
+ br_set_state(p, BR_STATE_LISTENING);
else
- p->state = BR_STATE_LEARNING;
+ br_set_state(p, BR_STATE_LEARNING);
br_multicast_enable_port(p);
br_log_state(p);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 189ba1e7d851..41146872c1b4 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -37,7 +37,7 @@ void br_init_port(struct net_bridge_port *p)
{
p->port_id = br_make_port_id(p->priority, p->port_no);
br_become_designated_port(p);
- p->state = BR_STATE_BLOCKING;
+ br_set_state(p, BR_STATE_BLOCKING);
p->topology_change_ack = 0;
p->config_pending = 0;
}
@@ -100,7 +100,7 @@ void br_stp_disable_port(struct net_bridge_port *p)
wasroot = br_is_root_bridge(br);
br_become_designated_port(p);
- p->state = BR_STATE_DISABLED;
+ br_set_state(p, BR_STATE_DISABLED);
p->topology_change_ack = 0;
p->config_pending = 0;
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 558c46d19e05..4fcaa67750fd 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -87,11 +87,11 @@ static void br_forward_delay_timer_expired(unsigned long arg)
(unsigned int) p->port_no, p->dev->name);
spin_lock(&br->lock);
if (p->state == BR_STATE_LISTENING) {
- p->state = BR_STATE_LEARNING;
+ br_set_state(p, BR_STATE_LEARNING);
mod_timer(&p->forward_delay_timer,
jiffies + br->forward_delay);
} else if (p->state == BR_STATE_LEARNING) {
- p->state = BR_STATE_FORWARDING;
+ br_set_state(p, BR_STATE_FORWARDING);
if (br_is_designated_for_some_port(br))
br_topology_change_detection(br);
netif_carrier_on(br->dev);
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c9e2572b15f4..4c97fc50fb70 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -629,7 +629,7 @@ static ssize_t multicast_startup_query_interval_store(
}
static DEVICE_ATTR_RW(multicast_startup_query_interval);
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static ssize_t nf_call_iptables_show(
struct device *d, struct device_attribute *attr, char *buf)
{
@@ -725,6 +725,22 @@ static ssize_t vlan_protocol_store(struct device *d,
return store_bridge_parm(d, buf, len, br_vlan_set_proto);
}
static DEVICE_ATTR_RW(vlan_protocol);
+
+static ssize_t default_pvid_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct net_bridge *br = to_bridge(d);
+ return sprintf(buf, "%d\n", br->default_pvid);
+}
+
+static ssize_t default_pvid_store(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_bridge_parm(d, buf, len, br_vlan_set_default_pvid);
+}
+static DEVICE_ATTR_RW(default_pvid);
#endif
static struct attribute *bridge_attrs[] = {
@@ -763,7 +779,7 @@ static struct attribute *bridge_attrs[] = {
&dev_attr_multicast_query_response_interval.attr,
&dev_attr_multicast_startup_query_interval.attr,
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
&dev_attr_nf_call_iptables.attr,
&dev_attr_nf_call_ip6tables.attr,
&dev_attr_nf_call_arptables.attr,
@@ -771,6 +787,7 @@ static struct attribute *bridge_attrs[] = {
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
&dev_attr_vlan_filtering.attr,
&dev_attr_vlan_protocol.attr,
+ &dev_attr_default_pvid.attr,
#endif
NULL
};
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 3ba57fcdcd13..150048fb99b0 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -223,7 +223,7 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
* See if pvid is set on this port. That tells us which
* vlan untagged or priority-tagged traffic belongs to.
*/
- if (pvid == VLAN_N_VID)
+ if (!pvid)
goto drop;
/* PVID is set on this port. Any untagged or priority-tagged
@@ -292,7 +292,7 @@ bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
if (!*vid) {
*vid = br_get_pvid(v);
- if (*vid == VLAN_N_VID)
+ if (!*vid)
return false;
return true;
@@ -499,9 +499,141 @@ err_filt:
goto unlock;
}
-void br_vlan_init(struct net_bridge *br)
+static bool vlan_default_pvid(struct net_port_vlans *pv, u16 vid)
+{
+ return pv && vid == pv->pvid && test_bit(vid, pv->untagged_bitmap);
+}
+
+static void br_vlan_disable_default_pvid(struct net_bridge *br)
+{
+ struct net_bridge_port *p;
+ u16 pvid = br->default_pvid;
+
+ /* Disable default_pvid on all ports where it is still
+ * configured.
+ */
+ if (vlan_default_pvid(br_get_vlan_info(br), pvid))
+ br_vlan_delete(br, pvid);
+
+ list_for_each_entry(p, &br->port_list, list) {
+ if (vlan_default_pvid(nbp_get_vlan_info(p), pvid))
+ nbp_vlan_delete(p, pvid);
+ }
+
+ br->default_pvid = 0;
+}
+
+static int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
+{
+ struct net_bridge_port *p;
+ u16 old_pvid;
+ int err = 0;
+ unsigned long *changed;
+
+ changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!changed)
+ return -ENOMEM;
+
+ old_pvid = br->default_pvid;
+
+ /* Update default_pvid config only if we do not conflict with
+ * user configuration.
+ */
+ if ((!old_pvid || vlan_default_pvid(br_get_vlan_info(br), old_pvid)) &&
+ !br_vlan_find(br, pvid)) {
+ err = br_vlan_add(br, pvid,
+ BRIDGE_VLAN_INFO_PVID |
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ if (err)
+ goto out;
+ br_vlan_delete(br, old_pvid);
+ set_bit(0, changed);
+ }
+
+ list_for_each_entry(p, &br->port_list, list) {
+ /* Update default_pvid config only if we do not conflict with
+ * user configuration.
+ */
+ if ((old_pvid &&
+ !vlan_default_pvid(nbp_get_vlan_info(p), old_pvid)) ||
+ nbp_vlan_find(p, pvid))
+ continue;
+
+ err = nbp_vlan_add(p, pvid,
+ BRIDGE_VLAN_INFO_PVID |
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ if (err)
+ goto err_port;
+ nbp_vlan_delete(p, old_pvid);
+ set_bit(p->port_no, changed);
+ }
+
+ br->default_pvid = pvid;
+
+out:
+ kfree(changed);
+ return err;
+
+err_port:
+ list_for_each_entry_continue_reverse(p, &br->port_list, list) {
+ if (!test_bit(p->port_no, changed))
+ continue;
+
+ if (old_pvid)
+ nbp_vlan_add(p, old_pvid,
+ BRIDGE_VLAN_INFO_PVID |
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ nbp_vlan_delete(p, pvid);
+ }
+
+ if (test_bit(0, changed)) {
+ if (old_pvid)
+ br_vlan_add(br, old_pvid,
+ BRIDGE_VLAN_INFO_PVID |
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ br_vlan_delete(br, pvid);
+ }
+ goto out;
+}
+
+int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
+{
+ u16 pvid = val;
+ int err = 0;
+
+ if (val >= VLAN_VID_MASK)
+ return -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (pvid == br->default_pvid)
+ goto unlock;
+
+ /* Only allow default pvid change when filtering is disabled */
+ if (br->vlan_enabled) {
+ pr_info_once("Please disable vlan filtering to change default_pvid\n");
+ err = -EPERM;
+ goto unlock;
+ }
+
+ if (!pvid)
+ br_vlan_disable_default_pvid(br);
+ else
+ err = __br_vlan_set_default_pvid(br, pvid);
+
+unlock:
+ rtnl_unlock();
+ return err;
+}
+
+int br_vlan_init(struct net_bridge *br)
{
br->vlan_proto = htons(ETH_P_8021Q);
+ br->default_pvid = 1;
+ return br_vlan_add(br, 1,
+ BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED);
}
/* Must be protected by RTNL.
@@ -593,3 +725,12 @@ out:
rcu_read_unlock();
return found;
}
+
+int nbp_vlan_init(struct net_bridge_port *p)
+{
+ return p->br->default_pvid ?
+ nbp_vlan_add(p, p->br->default_pvid,
+ BRIDGE_VLAN_INFO_PVID |
+ BRIDGE_VLAN_INFO_UNTAGGED) :
+ 0;
+}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 6d69631b9f4d..d9a8c05d995d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -26,6 +26,7 @@
#include <asm/uaccess.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
+#include <linux/audit.h>
#include <net/sock.h>
/* needed for logical [in,out]-dev filtering */
#include "../br_private.h"
@@ -1058,6 +1059,20 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
vfree(table);
vfree(counterstmp);
+
+#ifdef CONFIG_AUDIT
+ if (audit_enabled) {
+ struct audit_buffer *ab;
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_NETFILTER_CFG);
+ if (ab) {
+ audit_log_format(ab, "table=%s family=%u entries=%u",
+ repl->name, AF_BRIDGE, repl->nentries);
+ audit_log_end(ab);
+ }
+ }
+#endif
return ret;
free_unlock:
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index 5bcc0d8b31f2..da17a5eab8b4 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -34,9 +34,11 @@ static struct nft_af_info nft_af_bridge __read_mostly = {
.owner = THIS_MODULE,
.nops = 1,
.hooks = {
+ [NF_BR_PRE_ROUTING] = nft_do_chain_bridge,
[NF_BR_LOCAL_IN] = nft_do_chain_bridge,
[NF_BR_FORWARD] = nft_do_chain_bridge,
[NF_BR_LOCAL_OUT] = nft_do_chain_bridge,
+ [NF_BR_POST_ROUTING] = nft_do_chain_bridge,
},
};
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index ee3ffe93e14e..a76479535df2 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -14,21 +14,106 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
+#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/ipv6/nf_reject.h>
static void nft_reject_bridge_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1],
const struct nft_pktinfo *pkt)
{
+ struct nft_reject *priv = nft_expr_priv(expr);
+ struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
+
switch (eth_hdr(pkt->skb)->h_proto) {
case htons(ETH_P_IP):
- return nft_reject_ipv4_eval(expr, data, pkt);
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nf_send_unreach(pkt->skb, priv->icmp_code);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nf_send_reset(pkt->skb, pkt->ops->hooknum);
+ break;
+ case NFT_REJECT_ICMPX_UNREACH:
+ nf_send_unreach(pkt->skb,
+ nft_reject_icmp_code(priv->icmp_code));
+ break;
+ }
+ break;
case htons(ETH_P_IPV6):
- return nft_reject_ipv6_eval(expr, data, pkt);
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nf_send_unreach6(net, pkt->skb, priv->icmp_code,
+ pkt->ops->hooknum);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+ break;
+ case NFT_REJECT_ICMPX_UNREACH:
+ nf_send_unreach6(net, pkt->skb,
+ nft_reject_icmpv6_code(priv->icmp_code),
+ pkt->ops->hooknum);
+ break;
+ }
+ break;
default:
/* No explicit way to reject this protocol, drop it. */
- data[NFT_REG_VERDICT].verdict = NF_DROP;
break;
}
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+
+static int nft_reject_bridge_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_reject *priv = nft_expr_priv(expr);
+ int icmp_code;
+
+ if (tb[NFTA_REJECT_TYPE] == NULL)
+ return -EINVAL;
+
+ priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ case NFT_REJECT_ICMPX_UNREACH:
+ if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+ return -EINVAL;
+
+ icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+ if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
+ icmp_code > NFT_REJECT_ICMPX_MAX)
+ return -EINVAL;
+
+ priv->icmp_code = icmp_code;
+ break;
+ case NFT_REJECT_TCP_RST:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int nft_reject_bridge_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ const struct nft_reject *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
+ goto nla_put_failure;
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ case NFT_REJECT_ICMPX_UNREACH:
+ if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+ goto nla_put_failure;
+ break;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -1;
}
static struct nft_expr_type nft_reject_bridge_type;
@@ -36,8 +121,8 @@ static const struct nft_expr_ops nft_reject_bridge_ops = {
.type = &nft_reject_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_bridge_eval,
- .init = nft_reject_init,
- .dump = nft_reject_dump,
+ .init = nft_reject_bridge_init,
+ .dump = nft_reject_bridge_dump,
};
static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
diff --git a/net/core/dev.c b/net/core/dev.c
index cf8a95f48cff..3c5bdaa44486 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -897,23 +897,25 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
EXPORT_SYMBOL(dev_getfirstbyhwtype);
/**
- * dev_get_by_flags_rcu - find any device with given flags
+ * __dev_get_by_flags - find any device with given flags
* @net: the applicable net namespace
* @if_flags: IFF_* values
* @mask: bitmask of bits in if_flags to check
*
* Search for any interface with the given flags. Returns NULL if a device
* is not found or a pointer to the device. Must be called inside
- * rcu_read_lock(), and result refcount is unchanged.
+ * rtnl_lock(), and result refcount is unchanged.
*/
-struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
- unsigned short mask)
+struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
+ unsigned short mask)
{
struct net_device *dev, *ret;
+ ASSERT_RTNL();
+
ret = NULL;
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
if (((dev->flags ^ if_flags) & mask) == 0) {
ret = dev;
break;
@@ -921,7 +923,7 @@ struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags
}
return ret;
}
-EXPORT_SYMBOL(dev_get_by_flags_rcu);
+EXPORT_SYMBOL(__dev_get_by_flags);
/**
* dev_valid_name - check if name is okay for network device
@@ -2177,6 +2179,53 @@ static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
return (struct dev_kfree_skb_cb *)skb->cb;
}
+void netif_schedule_queue(struct netdev_queue *txq)
+{
+ rcu_read_lock();
+ if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
+ struct Qdisc *q = rcu_dereference(txq->qdisc);
+
+ __netif_schedule(q);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(netif_schedule_queue);
+
+/**
+ * netif_wake_subqueue - allow sending packets on subqueue
+ * @dev: network device
+ * @queue_index: sub queue index
+ *
+ * Resume individual transmit queue of a device with multiple transmit queues.
+ */
+void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
+ struct Qdisc *q;
+
+ rcu_read_lock();
+ q = rcu_dereference(txq->qdisc);
+ __netif_schedule(q);
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL(netif_wake_subqueue);
+
+void netif_tx_wake_queue(struct netdev_queue *dev_queue)
+{
+ if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
+ struct Qdisc *q;
+
+ rcu_read_lock();
+ q = rcu_dereference(dev_queue->qdisc);
+ __netif_schedule(q);
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL(netif_tx_wake_queue);
+
void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
{
unsigned long flags;
@@ -2373,16 +2422,6 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
rcu_read_lock();
list_for_each_entry_rcu(ptype, &offload_base, list) {
if (ptype->type == type && ptype->callbacks.gso_segment) {
- if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
- int err;
-
- err = ptype->callbacks.gso_send_check(skb);
- segs = ERR_PTR(err);
- if (err || skb_gso_ok(skb, features))
- break;
- __skb_push(skb, (skb->data -
- skb_network_header(skb)));
- }
segs = ptype->callbacks.gso_segment(skb, features);
break;
}
@@ -2485,52 +2524,6 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
return 0;
}
-struct dev_gso_cb {
- void (*destructor)(struct sk_buff *skb);
-};
-
-#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
-
-static void dev_gso_skb_destructor(struct sk_buff *skb)
-{
- struct dev_gso_cb *cb;
-
- kfree_skb_list(skb->next);
- skb->next = NULL;
-
- cb = DEV_GSO_CB(skb);
- if (cb->destructor)
- cb->destructor(skb);
-}
-
-/**
- * dev_gso_segment - Perform emulated hardware segmentation on skb.
- * @skb: buffer to segment
- * @features: device features as applicable to this skb
- *
- * This function segments the given skb and stores the list of segments
- * in skb->next.
- */
-static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
-{
- struct sk_buff *segs;
-
- segs = skb_gso_segment(skb, features);
-
- /* Verifying header integrity only. */
- if (!segs)
- return 0;
-
- if (IS_ERR(segs))
- return PTR_ERR(segs);
-
- skb->next = segs;
- DEV_GSO_CB(skb)->destructor = skb->destructor;
- skb->destructor = dev_gso_skb_destructor;
-
- return 0;
-}
-
/* If MPLS offload request, verify we are testing hardware MPLS features
* instead of standard features for the netdev.
*/
@@ -2574,10 +2567,12 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
netdev_features_t netif_skb_features(struct sk_buff *skb)
{
+ const struct net_device *dev = skb->dev;
+ netdev_features_t features = dev->features;
+ u16 gso_segs = skb_shinfo(skb)->gso_segs;
__be16 protocol = skb->protocol;
- netdev_features_t features = skb->dev->features;
- if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+ if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
features &= ~NETIF_F_GSO_MASK;
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
@@ -2588,7 +2583,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
}
features = netdev_intersect_features(features,
- skb->dev->vlan_features |
+ dev->vlan_features |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
@@ -2605,119 +2600,149 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_skb_features);
-int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq)
+static int xmit_one(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq, bool more)
{
- const struct net_device_ops *ops = dev->netdev_ops;
- int rc = NETDEV_TX_OK;
- unsigned int skb_len;
+ unsigned int len;
+ int rc;
- if (likely(!skb->next)) {
- netdev_features_t features;
+ if (!list_empty(&ptype_all))
+ dev_queue_xmit_nit(skb, dev);
- /*
- * If device doesn't need skb->dst, release it right now while
- * its hot in this cpu cache
- */
- if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
- skb_dst_drop(skb);
+ len = skb->len;
+ trace_net_dev_start_xmit(skb, dev);
+ rc = netdev_start_xmit(skb, dev, txq, more);
+ trace_net_dev_xmit(skb, rc, dev, len);
- features = netif_skb_features(skb);
-
- if (vlan_tx_tag_present(skb) &&
- !vlan_hw_offload_capable(features, skb->vlan_proto)) {
- skb = __vlan_put_tag(skb, skb->vlan_proto,
- vlan_tx_tag_get(skb));
- if (unlikely(!skb))
- goto out;
+ return rc;
+}
- skb->vlan_tci = 0;
- }
+struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
+ struct netdev_queue *txq, int *ret)
+{
+ struct sk_buff *skb = first;
+ int rc = NETDEV_TX_OK;
- /* If encapsulation offload request, verify we are testing
- * hardware encapsulation features instead of standard
- * features for the netdev
- */
- if (skb->encapsulation)
- features &= dev->hw_enc_features;
+ while (skb) {
+ struct sk_buff *next = skb->next;
- if (netif_needs_gso(skb, features)) {
- if (unlikely(dev_gso_segment(skb, features)))
- goto out_kfree_skb;
- if (skb->next)
- goto gso;
- } else {
- if (skb_needs_linearize(skb, features) &&
- __skb_linearize(skb))
- goto out_kfree_skb;
+ skb->next = NULL;
+ rc = xmit_one(skb, dev, txq, next != NULL);
+ if (unlikely(!dev_xmit_complete(rc))) {
+ skb->next = next;
+ goto out;
+ }
- /* If packet is not checksummed and device does not
- * support checksumming for this protocol, complete
- * checksumming here.
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (skb->encapsulation)
- skb_set_inner_transport_header(skb,
- skb_checksum_start_offset(skb));
- else
- skb_set_transport_header(skb,
- skb_checksum_start_offset(skb));
- if (!(features & NETIF_F_ALL_CSUM) &&
- skb_checksum_help(skb))
- goto out_kfree_skb;
- }
+ skb = next;
+ if (netif_xmit_stopped(txq) && skb) {
+ rc = NETDEV_TX_BUSY;
+ break;
}
+ }
- if (!list_empty(&ptype_all))
- dev_queue_xmit_nit(skb, dev);
+out:
+ *ret = rc;
+ return skb;
+}
- skb_len = skb->len;
- trace_net_dev_start_xmit(skb, dev);
- rc = ops->ndo_start_xmit(skb, dev);
- trace_net_dev_xmit(skb, rc, dev, skb_len);
- if (rc == NETDEV_TX_OK)
- txq_trans_update(txq);
- return rc;
+static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (vlan_tx_tag_present(skb) &&
+ !vlan_hw_offload_capable(features, skb->vlan_proto)) {
+ skb = __vlan_put_tag(skb, skb->vlan_proto,
+ vlan_tx_tag_get(skb));
+ if (skb)
+ skb->vlan_tci = 0;
}
+ return skb;
+}
-gso:
- do {
- struct sk_buff *nskb = skb->next;
+static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
+{
+ netdev_features_t features;
- skb->next = nskb->next;
- nskb->next = NULL;
+ if (skb->next)
+ return skb;
- if (!list_empty(&ptype_all))
- dev_queue_xmit_nit(nskb, dev);
-
- skb_len = nskb->len;
- trace_net_dev_start_xmit(nskb, dev);
- rc = ops->ndo_start_xmit(nskb, dev);
- trace_net_dev_xmit(nskb, rc, dev, skb_len);
- if (unlikely(rc != NETDEV_TX_OK)) {
- if (rc & ~NETDEV_TX_MASK)
- goto out_kfree_gso_skb;
- nskb->next = skb->next;
- skb->next = nskb;
- return rc;
+ features = netif_skb_features(skb);
+ skb = validate_xmit_vlan(skb, features);
+ if (unlikely(!skb))
+ goto out_null;
+
+ /* If encapsulation offload request, verify we are testing
+ * hardware encapsulation features instead of standard
+ * features for the netdev
+ */
+ if (skb->encapsulation)
+ features &= dev->hw_enc_features;
+
+ if (netif_needs_gso(skb, features)) {
+ struct sk_buff *segs;
+
+ segs = skb_gso_segment(skb, features);
+ if (IS_ERR(segs)) {
+ segs = NULL;
+ } else if (segs) {
+ consume_skb(skb);
+ skb = segs;
}
- txq_trans_update(txq);
- if (unlikely(netif_xmit_stopped(txq) && skb->next))
- return NETDEV_TX_BUSY;
- } while (skb->next);
+ } else {
+ if (skb_needs_linearize(skb, features) &&
+ __skb_linearize(skb))
+ goto out_kfree_skb;
-out_kfree_gso_skb:
- if (likely(skb->next == NULL)) {
- skb->destructor = DEV_GSO_CB(skb)->destructor;
- consume_skb(skb);
- return rc;
+ /* If packet is not checksummed and device does not
+ * support checksumming for this protocol, complete
+ * checksumming here.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->encapsulation)
+ skb_set_inner_transport_header(skb,
+ skb_checksum_start_offset(skb));
+ else
+ skb_set_transport_header(skb,
+ skb_checksum_start_offset(skb));
+ if (!(features & NETIF_F_ALL_CSUM) &&
+ skb_checksum_help(skb))
+ goto out_kfree_skb;
+ }
}
+
+ return skb;
+
out_kfree_skb:
kfree_skb(skb);
-out:
- return rc;
+out_null:
+ return NULL;
+}
+
+struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *next, *head = NULL, *tail;
+
+ for (; skb != NULL; skb = next) {
+ next = skb->next;
+ skb->next = NULL;
+
+ /* in case skb wont be segmented, point to itself */
+ skb->prev = skb;
+
+ skb = validate_xmit_skb(skb, dev);
+ if (!skb)
+ continue;
+
+ if (!head)
+ head = skb;
+ else
+ tail->next = skb;
+ /* If skb was segmented, skb->prev points to
+ * the last segment. If not, it still contains skb.
+ */
+ tail = skb->prev;
+ }
+ return head;
}
-EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
static void qdisc_pkt_len_init(struct sk_buff *skb)
{
@@ -2780,12 +2805,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
* waiting to be sent out; and the qdisc is not running -
* xmit the skb directly.
*/
- if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
- skb_dst_force(skb);
qdisc_bstats_update(q, skb);
- if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
+ if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
if (unlikely(contended)) {
spin_unlock(&q->busylock);
contended = false;
@@ -2796,7 +2819,6 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
rc = NET_XMIT_SUCCESS;
} else {
- skb_dst_force(skb);
rc = q->enqueue(skb, q) & NET_XMIT_MASK;
if (qdisc_run_begin(q)) {
if (unlikely(contended)) {
@@ -2893,6 +2915,14 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
skb_update_prio(skb);
+ /* If device/qdisc don't need skb->dst, release it right now while
+ * its hot in this cpu cache.
+ */
+ if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
+ skb_dst_drop(skb);
+ else
+ skb_dst_force(skb);
+
txq = netdev_pick_tx(dev, skb, accel_priv);
q = rcu_dereference_bh(txq->qdisc);
@@ -2925,11 +2955,15 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
goto recursion_alert;
+ skb = validate_xmit_skb(skb, dev);
+ if (!skb)
+ goto drop;
+
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
- rc = dev_hard_start_xmit(skb, dev, txq);
+ skb = dev_hard_start_xmit(skb, dev, txq, &rc);
__this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
@@ -2950,10 +2984,11 @@ recursion_alert:
}
rc = -ENETDOWN;
+drop:
rcu_read_unlock_bh();
atomic_long_inc(&dev->tx_dropped);
- kfree_skb(skb);
+ kfree_skb_list(skb);
return rc;
out:
rcu_read_unlock_bh();
@@ -3130,8 +3165,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
}
if (map) {
- tcpu = map->cpus[((u64) hash * map->len) >> 32];
-
+ tcpu = map->cpus[reciprocal_scale(hash, map->len)];
if (cpu_online(tcpu)) {
cpu = tcpu;
goto done;
@@ -3467,7 +3501,7 @@ static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
- q = rxq->qdisc;
+ q = rcu_dereference(rxq->qdisc);
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
@@ -3484,7 +3518,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
{
struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
- if (!rxq || rxq->qdisc == &noop_qdisc)
+ if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
goto out;
if (*pt_prev) {
@@ -3965,11 +3999,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
if (!(skb->dev->features & NETIF_F_GRO))
goto normal;
- if (skb_is_gso(skb) || skb_has_frag_list(skb))
+ if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
goto normal;
gro_list_prepare(napi, skb);
- NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
@@ -3983,6 +4016,22 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
NAPI_GRO_CB(skb)->free = 0;
NAPI_GRO_CB(skb)->udp_mark = 0;
+ /* Setup for GRO checksum validation */
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ NAPI_GRO_CB(skb)->csum = skb->csum;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+ NAPI_GRO_CB(skb)->csum_cnt = 0;
+ break;
+ case CHECKSUM_UNNECESSARY:
+ NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
+ NAPI_GRO_CB(skb)->csum_valid = 0;
+ break;
+ default:
+ NAPI_GRO_CB(skb)->csum_cnt = 0;
+ NAPI_GRO_CB(skb)->csum_valid = 0;
+ }
+
pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
break;
}
@@ -4212,6 +4261,31 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
}
EXPORT_SYMBOL(napi_gro_frags);
+/* Compute the checksum from gro_offset and return the folded value
+ * after adding in any pseudo checksum.
+ */
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
+{
+ __wsum wsum;
+ __sum16 sum;
+
+ wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
+
+ /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
+ sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
+ if (likely(!sum)) {
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !skb->csum_complete_sw)
+ netdev_rx_csum_fault(skb->dev);
+ }
+
+ NAPI_GRO_CB(skb)->csum = wsum;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+
+ return sum;
+}
+EXPORT_SYMBOL(__skb_gro_checksum_complete);
+
/*
* net_rps_action_and_irq_enable sends any pending IPI's for rps.
* Note: called with local irq disabled, but exits with local irq enabled.
@@ -6589,6 +6663,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->gso_max_size = GSO_MAX_SIZE;
dev->gso_max_segs = GSO_MAX_SEGS;
+ dev->gso_min_segs = 0;
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
@@ -6598,7 +6673,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
INIT_LIST_HEAD(&dev->adj_list.lower);
INIT_LIST_HEAD(&dev->all_adj_list.upper);
INIT_LIST_HEAD(&dev->all_adj_list.lower);
- dev->priv_flags = IFF_XMIT_DST_RELEASE;
+ dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
dev->num_tx_queues = txqs;
@@ -7020,53 +7095,45 @@ const char *netdev_drivername(const struct net_device *dev)
return empty;
}
-static int __netdev_printk(const char *level, const struct net_device *dev,
- struct va_format *vaf)
+static void __netdev_printk(const char *level, const struct net_device *dev,
+ struct va_format *vaf)
{
- int r;
-
if (dev && dev->dev.parent) {
- r = dev_printk_emit(level[1] - '0',
- dev->dev.parent,
- "%s %s %s%s: %pV",
- dev_driver_string(dev->dev.parent),
- dev_name(dev->dev.parent),
- netdev_name(dev), netdev_reg_state(dev),
- vaf);
+ dev_printk_emit(level[1] - '0',
+ dev->dev.parent,
+ "%s %s %s%s: %pV",
+ dev_driver_string(dev->dev.parent),
+ dev_name(dev->dev.parent),
+ netdev_name(dev), netdev_reg_state(dev),
+ vaf);
} else if (dev) {
- r = printk("%s%s%s: %pV", level, netdev_name(dev),
- netdev_reg_state(dev), vaf);
+ printk("%s%s%s: %pV",
+ level, netdev_name(dev), netdev_reg_state(dev), vaf);
} else {
- r = printk("%s(NULL net_device): %pV", level, vaf);
+ printk("%s(NULL net_device): %pV", level, vaf);
}
-
- return r;
}
-int netdev_printk(const char *level, const struct net_device *dev,
- const char *format, ...)
+void netdev_printk(const char *level, const struct net_device *dev,
+ const char *format, ...)
{
struct va_format vaf;
va_list args;
- int r;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
- r = __netdev_printk(level, dev, &vaf);
+ __netdev_printk(level, dev, &vaf);
va_end(args);
-
- return r;
}
EXPORT_SYMBOL(netdev_printk);
#define define_netdev_printk_level(func, level) \
-int func(const struct net_device *dev, const char *fmt, ...) \
+void func(const struct net_device *dev, const char *fmt, ...) \
{ \
- int r; \
struct va_format vaf; \
va_list args; \
\
@@ -7075,11 +7142,9 @@ int func(const struct net_device *dev, const char *fmt, ...) \
vaf.fmt = fmt; \
vaf.va = &args; \
\
- r = __netdev_printk(level, dev, &vaf); \
+ __netdev_printk(level, dev, &vaf); \
\
va_end(args); \
- \
- return r; \
} \
EXPORT_SYMBOL(func);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index cf999e09bcd2..72e899a3efda 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -365,11 +365,8 @@ void dev_load(struct net *net, const char *name)
no_module = !dev;
if (no_module && capable(CAP_NET_ADMIN))
no_module = request_module("netdev-%s", name);
- if (no_module && capable(CAP_SYS_MODULE)) {
- if (!request_module("%s", name))
- pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
- name);
- }
+ if (no_module && capable(CAP_SYS_MODULE))
+ request_module("%s", name);
}
EXPORT_SYMBOL(dev_load);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 17cb912793fa..1600aa24d36b 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1621,6 +1621,81 @@ static int ethtool_get_module_eeprom(struct net_device *dev,
modinfo.eeprom_len);
}
+static int ethtool_tunable_valid(const struct ethtool_tunable *tuna)
+{
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ case ETHTOOL_TX_COPYBREAK:
+ if (tuna->len != sizeof(u32) ||
+ tuna->type_id != ETHTOOL_TUNABLE_U32)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
+{
+ int ret;
+ struct ethtool_tunable tuna;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ void *data;
+
+ if (!ops->get_tunable)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
+ return -EFAULT;
+ ret = ethtool_tunable_valid(&tuna);
+ if (ret)
+ return ret;
+ data = kmalloc(tuna.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+ ret = ops->get_tunable(dev, &tuna, data);
+ if (ret)
+ goto out;
+ useraddr += sizeof(tuna);
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, data, tuna.len))
+ goto out;
+ ret = 0;
+
+out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_set_tunable(struct net_device *dev, void __user *useraddr)
+{
+ int ret;
+ struct ethtool_tunable tuna;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ void *data;
+
+ if (!ops->set_tunable)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
+ return -EFAULT;
+ ret = ethtool_tunable_valid(&tuna);
+ if (ret)
+ return ret;
+ data = kmalloc(tuna.len, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+ useraddr += sizeof(tuna);
+ ret = -EFAULT;
+ if (copy_from_user(data, useraddr, tuna.len))
+ goto out;
+ ret = ops->set_tunable(dev, &tuna, data);
+
+out:
+ kfree(data);
+ return ret;
+}
+
/* The main entry point in this file. Called from net/core/dev_ioctl.c */
int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1670,6 +1745,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GCHANNELS:
case ETHTOOL_GET_TS_INFO:
case ETHTOOL_GEEE:
+ case ETHTOOL_GTUNABLE:
break;
default:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -1857,6 +1933,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GMODULEEEPROM:
rc = ethtool_get_module_eeprom(dev, useraddr);
break;
+ case ETHTOOL_GTUNABLE:
+ rc = ethtool_get_tunable(dev, useraddr);
+ break;
+ case ETHTOOL_STUNABLE:
+ rc = ethtool_set_tunable(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index d814b8a89d0f..fcd3f6742a6a 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -87,33 +87,9 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(sk_filter);
-/* Helper to find the offset of pkt_type in sk_buff structure. We want
- * to make sure its still a 3bit field starting at a byte boundary;
- * taken from arch/x86/net/bpf_jit_comp.c.
- */
-#ifdef __BIG_ENDIAN_BITFIELD
-#define PKT_TYPE_MAX (7 << 5)
-#else
-#define PKT_TYPE_MAX 7
-#endif
-static unsigned int pkt_type_offset(void)
-{
- struct sk_buff skb_probe = { .pkt_type = ~0, };
- u8 *ct = (u8 *) &skb_probe;
- unsigned int off;
-
- for (off = 0; off < sizeof(struct sk_buff); off++) {
- if (ct[off] == PKT_TYPE_MAX)
- return off;
- }
-
- pr_err_once("Please fix %s, as pkt_type couldn't be found!\n", __func__);
- return -1;
-}
-
static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
{
- return __skb_get_poff((struct sk_buff *)(unsigned long) ctx);
+ return skb_get_poff((struct sk_buff *)(unsigned long) ctx);
}
static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
@@ -190,11 +166,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
break;
case SKF_AD_OFF + SKF_AD_PKTTYPE:
- *insn = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
- pkt_type_offset());
- if (insn->off < 0)
- return false;
- insn++;
+ *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_A, BPF_REG_CTX,
+ PKT_TYPE_OFFSET());
*insn = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
insn++;
@@ -933,7 +906,7 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
/* Expand fp for appending the new filter representation. */
old_fp = fp;
- fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
+ fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
if (!fp) {
/* The old_fp is still around in case we couldn't
* allocate new memory, so uncharge on that one.
@@ -972,7 +945,7 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
int err;
fp->bpf_func = NULL;
- fp->jited = 0;
+ fp->jited = false;
err = bpf_check_classic(fp->insns, fp->len);
if (err) {
@@ -1013,7 +986,7 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
if (fprog->filter == NULL)
return -EINVAL;
- fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
+ fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
if (!fp)
return -ENOMEM;
@@ -1069,12 +1042,12 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
if (fprog->filter == NULL)
return -EINVAL;
- prog = kmalloc(bpf_fsize, GFP_KERNEL);
+ prog = bpf_prog_alloc(bpf_fsize, 0);
if (!prog)
return -ENOMEM;
if (copy_from_user(prog->insns, fprog->filter, fsize)) {
- kfree(prog);
+ __bpf_prog_free(prog);
return -EFAULT;
}
@@ -1082,7 +1055,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
err = bpf_prog_store_orig_filter(prog, fprog);
if (err) {
- kfree(prog);
+ __bpf_prog_free(prog);
return -ENOMEM;
}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 5f362c1d0332..8560dea58803 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -13,6 +13,7 @@
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
#include <net/flow_keys.h>
+#include <scsi/fc/fc_fcoe.h>
/* copy saddr & daddr, possibly using 64bit load/store
* Equivalent to : flow->src = iph->saddr;
@@ -26,36 +27,61 @@ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *i
}
/**
- * skb_flow_get_ports - extract the upper layer ports and return them
- * @skb: buffer to extract the ports from
+ * __skb_flow_get_ports - extract the upper layer ports and return them
+ * @skb: sk_buff to extract the ports from
* @thoff: transport header offset
* @ip_proto: protocol for which to get port offset
+ * @data: raw buffer pointer to the packet, if NULL use skb->data
+ * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
*
* The function will try to retrieve the ports at offset thoff + poff where poff
* is the protocol port offset returned from proto_ports_offset
*/
-__be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
+__be32 __skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto,
+ void *data, int hlen)
{
int poff = proto_ports_offset(ip_proto);
+ if (!data) {
+ data = skb->data;
+ hlen = skb_headlen(skb);
+ }
+
if (poff >= 0) {
__be32 *ports, _ports;
- ports = skb_header_pointer(skb, thoff + poff,
- sizeof(_ports), &_ports);
+ ports = __skb_header_pointer(skb, thoff + poff,
+ sizeof(_ports), data, hlen, &_ports);
if (ports)
return *ports;
}
return 0;
}
-EXPORT_SYMBOL(skb_flow_get_ports);
+EXPORT_SYMBOL(__skb_flow_get_ports);
-bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
+/**
+ * __skb_flow_dissect - extract the flow_keys struct and return it
+ * @skb: sk_buff to extract the flow from, can be NULL if the rest are specified
+ * @data: raw buffer pointer to the packet, if NULL use skb->data
+ * @proto: protocol for which to get the flow, if @data is NULL use skb->protocol
+ * @nhoff: network header offset, if @data is NULL use skb_network_offset(skb)
+ * @hlen: packet header length, if @data is NULL use skb_headlen(skb)
+ *
+ * The function will try to retrieve the struct flow_keys from either the skbuff
+ * or a raw buffer specified by the rest parameters
+ */
+bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow,
+ void *data, __be16 proto, int nhoff, int hlen)
{
- int nhoff = skb_network_offset(skb);
u8 ip_proto;
- __be16 proto = skb->protocol;
+
+ if (!data) {
+ data = skb->data;
+ proto = skb->protocol;
+ nhoff = skb_network_offset(skb);
+ hlen = skb_headlen(skb);
+ }
memset(flow, 0, sizeof(*flow));
@@ -65,7 +91,7 @@ again:
const struct iphdr *iph;
struct iphdr _iph;
ip:
- iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph || iph->ihl < 5)
return false;
nhoff += iph->ihl * 4;
@@ -83,7 +109,7 @@ ip:
__be32 flow_label;
ipv6:
- iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
+ iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
if (!iph)
return false;
@@ -92,6 +118,13 @@ ipv6:
flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
nhoff += sizeof(struct ipv6hdr);
+ /* skip the flow label processing if skb is NULL. The
+ * assumption here is that if there is no skb we are not
+ * looking for flow info as much as we are length.
+ */
+ if (!skb)
+ break;
+
flow_label = ip6_flowlabel(iph);
if (flow_label) {
/* Awesome, IPv6 packet has a flow label so we can
@@ -113,7 +146,7 @@ ipv6:
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
- vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
+ vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan);
if (!vlan)
return false;
@@ -126,7 +159,7 @@ ipv6:
struct pppoe_hdr hdr;
__be16 proto;
} *hdr, _hdr;
- hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+ hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
proto = hdr->proto;
@@ -140,6 +173,9 @@ ipv6:
return false;
}
}
+ case htons(ETH_P_FCOE):
+ flow->thoff = (u16)(nhoff + FCOE_HEADER_LEN);
+ /* fall through */
default:
return false;
}
@@ -151,7 +187,7 @@ ipv6:
__be16 proto;
} *hdr, _hdr;
- hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
+ hdr = __skb_header_pointer(skb, nhoff, sizeof(_hdr), data, hlen, &_hdr);
if (!hdr)
return false;
/*
@@ -171,8 +207,9 @@ ipv6:
const struct ethhdr *eth;
struct ethhdr _eth;
- eth = skb_header_pointer(skb, nhoff,
- sizeof(_eth), &_eth);
+ eth = __skb_header_pointer(skb, nhoff,
+ sizeof(_eth),
+ data, hlen, &_eth);
if (!eth)
return false;
proto = eth->h_proto;
@@ -194,12 +231,12 @@ ipv6:
flow->n_proto = proto;
flow->ip_proto = ip_proto;
- flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
+ flow->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen);
flow->thoff = (u16) nhoff;
return true;
}
-EXPORT_SYMBOL(skb_flow_dissect);
+EXPORT_SYMBOL(__skb_flow_dissect);
static u32 hashrnd __read_mostly;
static __always_inline void __flow_hash_secret_init(void)
@@ -286,30 +323,22 @@ u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
qcount = dev->tc_to_txq[tc].count;
}
- return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset;
+ return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
}
EXPORT_SYMBOL(__skb_tx_hash);
-/* __skb_get_poff() returns the offset to the payload as far as it could
- * be dissected. The main user is currently BPF, so that we can dynamically
- * truncate packets without needing to push actual payload to the user
- * space and can analyze headers only, instead.
- */
-u32 __skb_get_poff(const struct sk_buff *skb)
+u32 __skb_get_poff(const struct sk_buff *skb, void *data,
+ const struct flow_keys *keys, int hlen)
{
- struct flow_keys keys;
- u32 poff = 0;
-
- if (!skb_flow_dissect(skb, &keys))
- return 0;
+ u32 poff = keys->thoff;
- poff += keys.thoff;
- switch (keys.ip_proto) {
+ switch (keys->ip_proto) {
case IPPROTO_TCP: {
const struct tcphdr *tcph;
struct tcphdr _tcph;
- tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
+ tcph = __skb_header_pointer(skb, poff, sizeof(_tcph),
+ data, hlen, &_tcph);
if (!tcph)
return poff;
@@ -343,6 +372,21 @@ u32 __skb_get_poff(const struct sk_buff *skb)
return poff;
}
+/* skb_get_poff() returns the offset to the payload as far as it could
+ * be dissected. The main user is currently BPF, so that we can dynamically
+ * truncate packets without needing to push actual payload to the user
+ * space and can analyze headers only, instead.
+ */
+u32 skb_get_poff(const struct sk_buff *skb)
+{
+ struct flow_keys keys;
+
+ if (!skb_flow_dissect(skb, &keys))
+ return 0;
+
+ return __skb_get_poff(skb, skb->data, &keys, skb_headlen(skb));
+}
+
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_XPS
@@ -359,9 +403,8 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
if (map->len == 1)
queue_index = map->queues[0];
else
- queue_index = map->queues[
- ((u64)skb_get_hash(skb) * map->len) >> 32];
-
+ queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
+ map->len)];
if (unlikely(queue_index >= dev->real_num_tx_queues))
queue_index = -1;
}
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9d33dfffca19..9dfb88a933e7 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -91,6 +91,8 @@ struct gen_estimator
u32 avpps;
struct rcu_head e_rcu;
struct rb_node node;
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ struct rcu_head head;
};
struct gen_estimator_head
@@ -115,9 +117,8 @@ static void est_timer(unsigned long arg)
rcu_read_lock();
list_for_each_entry_rcu(e, &elist[idx].list, list) {
- u64 nbytes;
+ struct gnet_stats_basic_packed b = {0};
u64 brate;
- u32 npackets;
u32 rate;
spin_lock(e->stats_lock);
@@ -125,15 +126,15 @@ static void est_timer(unsigned long arg)
if (e->bstats == NULL)
goto skip;
- nbytes = e->bstats->bytes;
- npackets = e->bstats->packets;
- brate = (nbytes - e->last_bytes)<<(7 - idx);
- e->last_bytes = nbytes;
+ __gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats);
+
+ brate = (b.bytes - e->last_bytes)<<(7 - idx);
+ e->last_bytes = b.bytes;
e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
e->rate_est->bps = (e->avbps+0xF)>>5;
- rate = (npackets - e->last_packets)<<(12 - idx);
- e->last_packets = npackets;
+ rate = (b.packets - e->last_packets)<<(12 - idx);
+ e->last_packets = b.packets;
e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
e->rate_est->pps = (e->avpps+0x1FF)>>10;
skip:
@@ -203,12 +204,14 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats
*
*/
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock,
struct nlattr *opt)
{
struct gen_estimator *est;
struct gnet_estimator *parm = nla_data(opt);
+ struct gnet_stats_basic_packed b = {0};
int idx;
if (nla_len(opt) < sizeof(*parm))
@@ -221,15 +224,18 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
if (est == NULL)
return -ENOBUFS;
+ __gnet_stats_copy_basic(&b, cpu_bstats, bstats);
+
idx = parm->interval + 2;
est->bstats = bstats;
est->rate_est = rate_est;
est->stats_lock = stats_lock;
est->ewma_log = parm->ewma_log;
- est->last_bytes = bstats->bytes;
+ est->last_bytes = b.bytes;
est->avbps = rate_est->bps<<5;
- est->last_packets = bstats->packets;
+ est->last_packets = b.packets;
est->avpps = rate_est->pps<<10;
+ est->cpu_bstats = cpu_bstats;
spin_lock_bh(&est_tree_lock);
if (!elist[idx].timer.function) {
@@ -290,11 +296,12 @@ EXPORT_SYMBOL(gen_kill_estimator);
* Returns 0 on success or a negative error code.
*/
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
spinlock_t *stats_lock, struct nlattr *opt)
{
gen_kill_estimator(bstats, rate_est);
- return gen_new_estimator(bstats, rate_est, stats_lock, opt);
+ return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt);
}
EXPORT_SYMBOL(gen_replace_estimator);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 2ddbce4cce14..0c08062d1796 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -97,6 +97,43 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
}
EXPORT_SYMBOL(gnet_stats_start_copy);
+static void
+__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
+ unsigned int start;
+ u64 bytes;
+ u32 packets;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&bcpu->syncp);
+ bytes = bcpu->bstats.bytes;
+ packets = bcpu->bstats.packets;
+ } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
+
+ bstats->bytes += bytes;
+ bstats->packets += packets;
+ }
+}
+
+void
+__gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
+{
+ if (cpu) {
+ __gnet_stats_copy_basic_cpu(bstats, cpu);
+ } else {
+ bstats->bytes = b->bytes;
+ bstats->packets = b->packets;
+ }
+}
+EXPORT_SYMBOL(__gnet_stats_copy_basic);
+
/**
* gnet_stats_copy_basic - copy basic statistics into statistic TLV
* @d: dumping handle
@@ -109,19 +146,25 @@ EXPORT_SYMBOL(gnet_stats_start_copy);
* if the room in the socket buffer was not sufficient.
*/
int
-gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b)
+gnet_stats_copy_basic(struct gnet_dump *d,
+ struct gnet_stats_basic_cpu __percpu *cpu,
+ struct gnet_stats_basic_packed *b)
{
+ struct gnet_stats_basic_packed bstats = {0};
+
+ __gnet_stats_copy_basic(&bstats, cpu, b);
+
if (d->compat_tc_stats) {
- d->tc_stats.bytes = b->bytes;
- d->tc_stats.packets = b->packets;
+ d->tc_stats.bytes = bstats.bytes;
+ d->tc_stats.packets = bstats.packets;
}
if (d->tail) {
struct gnet_stats_basic sb;
memset(&sb, 0, sizeof(sb));
- sb.bytes = b->bytes;
- sb.packets = b->packets;
+ sb.bytes = bstats.bytes;
+ sb.packets = bstats.packets;
return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
}
return 0;
@@ -172,29 +215,74 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
}
EXPORT_SYMBOL(gnet_stats_copy_rate_est);
+static void
+__gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *q)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
+
+ qstats->qlen = 0;
+ qstats->backlog += qcpu->backlog;
+ qstats->drops += qcpu->drops;
+ qstats->requeues += qcpu->requeues;
+ qstats->overlimits += qcpu->overlimits;
+ }
+}
+
+static void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
+ const struct gnet_stats_queue __percpu *cpu,
+ const struct gnet_stats_queue *q,
+ __u32 qlen)
+{
+ if (cpu) {
+ __gnet_stats_copy_queue_cpu(qstats, cpu);
+ } else {
+ qstats->qlen = q->qlen;
+ qstats->backlog = q->backlog;
+ qstats->drops = q->drops;
+ qstats->requeues = q->requeues;
+ qstats->overlimits = q->overlimits;
+ }
+
+ qstats->qlen = qlen;
+}
+
/**
* gnet_stats_copy_queue - copy queue statistics into statistics TLV
* @d: dumping handle
+ * @cpu_q: per cpu queue statistics
* @q: queue statistics
+ * @qlen: queue length statistics
*
* Appends the queue statistics to the top level TLV created by
- * gnet_stats_start_copy().
+ * gnet_stats_start_copy(). Using per cpu queue statistics if
+ * they are available.
*
* Returns 0 on success or -1 with the statistic lock released
* if the room in the socket buffer was not sufficient.
*/
int
-gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q)
+gnet_stats_copy_queue(struct gnet_dump *d,
+ struct gnet_stats_queue __percpu *cpu_q,
+ struct gnet_stats_queue *q, __u32 qlen)
{
+ struct gnet_stats_queue qstats = {0};
+
+ __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
+
if (d->compat_tc_stats) {
- d->tc_stats.drops = q->drops;
- d->tc_stats.qlen = q->qlen;
- d->tc_stats.backlog = q->backlog;
- d->tc_stats.overlimits = q->overlimits;
+ d->tc_stats.drops = qstats.drops;
+ d->tc_stats.qlen = qstats.qlen;
+ d->tc_stats.backlog = qstats.backlog;
+ d->tc_stats.overlimits = qstats.overlimits;
}
if (d->tail)
- return gnet_stats_copy(d, TCA_STATS_QUEUE, q, sizeof(*q));
+ return gnet_stats_copy(d, TCA_STATS_QUEUE,
+ &qstats, sizeof(qstats));
return 0;
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7c6b51a58968..7f155175bba8 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -224,7 +224,7 @@ static void net_free(struct net *net)
return;
}
#endif
- kfree(net->gen);
+ kfree(rcu_access_pointer(net->gen));
kmem_cache_free(net_cachep, net);
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 907fb5e36c02..e6645b4f330a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -72,7 +72,6 @@ module_param(carrier_timeout, uint, 0644);
static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
- const struct net_device_ops *ops = dev->netdev_ops;
int status = NETDEV_TX_OK;
netdev_features_t features;
@@ -92,9 +91,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb->vlan_tci = 0;
}
- status = ops->ndo_start_xmit(skb, dev);
- if (status == NETDEV_TX_OK)
- txq_trans_update(txq);
+ status = netdev_start_xmit(skb, dev, txq, false);
out:
return status;
@@ -116,7 +113,7 @@ static void queue_process(struct work_struct *work)
continue;
}
- txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ txq = skb_get_tx_queue(dev, skb);
local_irq_save(flags);
HARD_TX_LOCK(dev, txq, smp_processor_id());
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 8b849ddfef2e..443256bdcddc 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -202,6 +202,7 @@
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
#define F_NODE (1<<15) /* Node memory alloc*/
#define F_UDPCSUM (1<<16) /* Include UDP checksum */
+#define F_NO_TIMESTAMP (1<<17) /* Don't timestamp packets (default TS) */
/* Thread control flag bits */
#define T_STOP (1<<0) /* Stop run */
@@ -386,6 +387,7 @@ struct pktgen_dev {
u16 queue_map_min;
u16 queue_map_max;
__u32 skb_priority; /* skb priority field */
+ unsigned int burst; /* number of duplicated packets to burst */
int node; /* Memory node */
#ifdef CONFIG_XFRM
@@ -505,7 +507,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
pktgen_reset_all_threads(pn);
else
- pr_warning("Unknown command: %s\n", data);
+ pr_warn("Unknown command: %s\n", data);
return count;
}
@@ -612,6 +614,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->traffic_class)
seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
+ if (pkt_dev->burst > 1)
+ seq_printf(seq, " burst: %d\n", pkt_dev->burst);
+
if (pkt_dev->node >= 0)
seq_printf(seq, " node: %d\n", pkt_dev->node);
@@ -638,6 +643,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->flags & F_UDPCSUM)
seq_puts(seq, "UDPCSUM ");
+ if (pkt_dev->flags & F_NO_TIMESTAMP)
+ seq_puts(seq, "NO_TIMESTAMP ");
+
if (pkt_dev->flags & F_MPLS_RND)
seq_puts(seq, "MPLS_RND ");
@@ -857,14 +865,14 @@ static ssize_t pktgen_if_write(struct file *file,
pg_result = &(pkt_dev->result[0]);
if (count < 1) {
- pr_warning("wrong command format\n");
+ pr_warn("wrong command format\n");
return -EINVAL;
}
max = count;
tmp = count_trail_chars(user_buffer, max);
if (tmp < 0) {
- pr_warning("illegal format\n");
+ pr_warn("illegal format\n");
return tmp;
}
i = tmp;
@@ -1120,6 +1128,16 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->dst_mac_count);
return count;
}
+ if (!strcmp(name, "burst")) {
+ len = num_arg(&user_buffer[i], 10, &value);
+ if (len < 0)
+ return len;
+
+ i += len;
+ pkt_dev->burst = value < 1 ? 1 : value;
+ sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
+ return count;
+ }
if (!strcmp(name, "node")) {
len = num_arg(&user_buffer[i], 10, &value);
if (len < 0)
@@ -1243,6 +1261,9 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "!UDPCSUM") == 0)
pkt_dev->flags &= ~F_UDPCSUM;
+ else if (strcmp(f, "NO_TIMESTAMP") == 0)
+ pkt_dev->flags |= F_NO_TIMESTAMP;
+
else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
@@ -1251,6 +1272,7 @@ static ssize_t pktgen_if_write(struct file *file,
"MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, "
"MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, "
"QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, "
+ "NO_TIMESTAMP, "
#ifdef CONFIG_XFRM
"IPSEC, "
#endif
@@ -2048,15 +2070,15 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
ntxq = pkt_dev->odev->real_num_tx_queues;
if (ntxq <= pkt_dev->queue_map_min) {
- pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
- pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odevname);
+ pr_warn("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
+ pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
+ pkt_dev->odevname);
pkt_dev->queue_map_min = (ntxq ?: 1) - 1;
}
if (pkt_dev->queue_map_max >= ntxq) {
- pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
- pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
- pkt_dev->odevname);
+ pr_warn("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
+ pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
+ pkt_dev->odevname);
pkt_dev->queue_map_max = (ntxq ?: 1) - 1;
}
@@ -2685,9 +2707,14 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
pgh->pgh_magic = htonl(PKTGEN_MAGIC);
pgh->seq_num = htonl(pkt_dev->seq_num);
- do_gettimeofday(&timestamp);
- pgh->tv_sec = htonl(timestamp.tv_sec);
- pgh->tv_usec = htonl(timestamp.tv_usec);
+ if (pkt_dev->flags & F_NO_TIMESTAMP) {
+ pgh->tv_sec = 0;
+ pgh->tv_usec = 0;
+ } else {
+ do_gettimeofday(&timestamp);
+ pgh->tv_sec = htonl(timestamp.tv_sec);
+ pgh->tv_usec = htonl(timestamp.tv_usec);
+ }
}
static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
@@ -3160,8 +3187,8 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
if (!pkt_dev->running) {
- pr_warning("interface: %s is already stopped\n",
- pkt_dev->odevname);
+ pr_warn("interface: %s is already stopped\n",
+ pkt_dev->odevname);
return -EINVAL;
}
@@ -3284,11 +3311,9 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
+ unsigned int burst = ACCESS_ONCE(pkt_dev->burst);
struct net_device *odev = pkt_dev->odev;
- netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *)
- = odev->netdev_ops->ndo_start_xmit;
struct netdev_queue *txq;
- u16 queue_map;
int ret;
/* If device is offline, then don't send */
@@ -3326,8 +3351,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
if (pkt_dev->delay && pkt_dev->last_ok)
spin(pkt_dev, pkt_dev->next_tx);
- queue_map = skb_get_queue_mapping(pkt_dev->skb);
- txq = netdev_get_tx_queue(odev, queue_map);
+ txq = skb_get_tx_queue(odev, pkt_dev->skb);
local_bh_disable();
@@ -3338,16 +3362,19 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->last_ok = 0;
goto unlock;
}
- atomic_inc(&(pkt_dev->skb->users));
- ret = (*xmit)(pkt_dev->skb, odev);
+ atomic_add(burst, &pkt_dev->skb->users);
+
+xmit_more:
+ ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
switch (ret) {
case NETDEV_TX_OK:
- txq_trans_update(txq);
pkt_dev->last_ok = 1;
pkt_dev->sofar++;
pkt_dev->seq_num++;
pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
+ if (burst > 0 && !netif_xmit_frozen_or_drv_stopped(txq))
+ goto xmit_more;
break;
case NET_XMIT_DROP:
case NET_XMIT_CN:
@@ -3366,6 +3393,8 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
atomic_dec(&(pkt_dev->skb->users));
pkt_dev->last_ok = 0;
}
+ if (unlikely(burst))
+ atomic_sub(burst, &pkt_dev->skb->users);
unlock:
HARD_TX_UNLOCK(odev, txq);
@@ -3564,6 +3593,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->svlan_p = 0;
pkt_dev->svlan_cfi = 0;
pkt_dev->svlan_id = 0xffff;
+ pkt_dev->burst = 1;
pkt_dev->node = -1;
err = pktgen_setup_dev(t->net, pkt_dev, ifname);
@@ -3684,7 +3714,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
if (pkt_dev->running) {
- pr_warning("WARNING: trying to remove a running interface, stopping it now\n");
+ pr_warn("WARNING: trying to remove a running interface, stopping it now\n");
pktgen_stop_device(pkt_dev);
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f0493e3b7471..a6882686ca3a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1481,9 +1481,12 @@ static int do_set_master(struct net_device *dev, int ifindex)
return 0;
}
+#define DO_SETLINK_MODIFIED 0x01
+/* notify flag means notify + modified. */
+#define DO_SETLINK_NOTIFY 0x03
static int do_setlink(const struct sk_buff *skb,
struct net_device *dev, struct ifinfomsg *ifm,
- struct nlattr **tb, char *ifname, int modified)
+ struct nlattr **tb, char *ifname, int status)
{
const struct net_device_ops *ops = dev->netdev_ops;
int err;
@@ -1502,7 +1505,7 @@ static int do_setlink(const struct sk_buff *skb,
put_net(net);
if (err)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_MODIFIED;
}
if (tb[IFLA_MAP]) {
@@ -1531,7 +1534,7 @@ static int do_setlink(const struct sk_buff *skb,
if (err < 0)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
if (tb[IFLA_ADDRESS]) {
@@ -1551,19 +1554,19 @@ static int do_setlink(const struct sk_buff *skb,
kfree(sa);
if (err)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_MODIFIED;
}
if (tb[IFLA_MTU]) {
err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
if (err < 0)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_MODIFIED;
}
if (tb[IFLA_GROUP]) {
dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
/*
@@ -1575,7 +1578,7 @@ static int do_setlink(const struct sk_buff *skb,
err = dev_change_name(dev, ifname);
if (err < 0)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_MODIFIED;
}
if (tb[IFLA_IFALIAS]) {
@@ -1583,7 +1586,7 @@ static int do_setlink(const struct sk_buff *skb,
nla_len(tb[IFLA_IFALIAS]));
if (err < 0)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
if (tb[IFLA_BROADCAST]) {
@@ -1601,25 +1604,35 @@ static int do_setlink(const struct sk_buff *skb,
err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
if (err)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_MODIFIED;
}
if (tb[IFLA_CARRIER]) {
err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
if (err)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_MODIFIED;
}
- if (tb[IFLA_TXQLEN])
- dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
+ if (tb[IFLA_TXQLEN]) {
+ unsigned long value = nla_get_u32(tb[IFLA_TXQLEN]);
+
+ if (dev->tx_queue_len ^ value)
+ status |= DO_SETLINK_NOTIFY;
+
+ dev->tx_queue_len = value;
+ }
if (tb[IFLA_OPERSTATE])
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE]) {
+ unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
+
write_lock_bh(&dev_base_lock);
- dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+ if (dev->link_mode ^ value)
+ status |= DO_SETLINK_NOTIFY;
+ dev->link_mode = value;
write_unlock_bh(&dev_base_lock);
}
@@ -1634,7 +1647,7 @@ static int do_setlink(const struct sk_buff *skb,
err = do_setvfinfo(dev, attr);
if (err < 0)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
}
err = 0;
@@ -1664,7 +1677,7 @@ static int do_setlink(const struct sk_buff *skb,
err = ops->ndo_set_vf_port(dev, vf, port);
if (err < 0)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
}
err = 0;
@@ -1682,7 +1695,7 @@ static int do_setlink(const struct sk_buff *skb,
err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
if (err < 0)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
if (tb[IFLA_AF_SPEC]) {
@@ -1699,15 +1712,20 @@ static int do_setlink(const struct sk_buff *skb,
if (err < 0)
goto errout;
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
}
err = 0;
errout:
- if (err < 0 && modified)
- net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
- dev->name);
+ if (status & DO_SETLINK_MODIFIED) {
+ if (status & DO_SETLINK_NOTIFY)
+ netdev_state_change(dev);
+
+ if (err < 0)
+ net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
+ dev->name);
+ }
return err;
}
@@ -1989,7 +2007,7 @@ replay:
}
if (dev) {
- int modified = 0;
+ int status = 0;
if (nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
@@ -2004,7 +2022,7 @@ replay:
err = ops->changelink(dev, tb, data);
if (err < 0)
return err;
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
if (linkinfo[IFLA_INFO_SLAVE_DATA]) {
@@ -2015,10 +2033,10 @@ replay:
tb, slave_data);
if (err < 0)
return err;
- modified = 1;
+ status |= DO_SETLINK_NOTIFY;
}
- return do_setlink(skb, dev, ifm, tb, ifname, modified);
+ return do_setlink(skb, dev, ifm, tb, ifname, status);
}
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index ba71212f0251..51dd3193a33e 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -35,7 +35,7 @@ static u32 seq_scale(u32 seq)
* overlaps less than one time per MSL (2 minutes).
* Choosing a clock of 64 ns period is OK. (period of 274 s)
*/
- return seq + (ktime_to_ns(ktime_get_real()) >> 6);
+ return seq + (ktime_get_real_ns() >> 6);
}
#endif
@@ -135,7 +135,7 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
md5_transform(hash, net_secret);
seq = hash[0] | (((u64)hash[1]) << 32);
- seq += ktime_to_ns(ktime_get_real());
+ seq += ktime_get_real_ns();
seq &= (1ull << 48) - 1;
return seq;
@@ -163,7 +163,7 @@ u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
md5_transform(hash, secret);
seq = hash[0] | (((u64)hash[1]) << 32);
- seq += ktime_to_ns(ktime_get_real());
+ seq += ktime_get_real_ns();
seq &= (1ull << 48) - 1;
return seq;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 8d289697cc7a..7b3df0d518ab 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -257,16 +257,16 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
kmemcheck_annotate_variable(shinfo->destructor_arg);
if (flags & SKB_ALLOC_FCLONE) {
- struct sk_buff *child = skb + 1;
- atomic_t *fclone_ref = (atomic_t *) (child + 1);
+ struct sk_buff_fclones *fclones;
- kmemcheck_annotate_bitfield(child, flags1);
- kmemcheck_annotate_bitfield(child, flags2);
+ fclones = container_of(skb, struct sk_buff_fclones, skb1);
+
+ kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
skb->fclone = SKB_FCLONE_ORIG;
- atomic_set(fclone_ref, 1);
+ atomic_set(&fclones->fclone_ref, 1);
- child->fclone = SKB_FCLONE_UNAVAILABLE;
- child->pfmemalloc = pfmemalloc;
+ fclones->skb2.fclone = SKB_FCLONE_FREE;
+ fclones->skb2.pfmemalloc = pfmemalloc;
}
out:
return skb;
@@ -491,32 +491,33 @@ static void skb_free_head(struct sk_buff *skb)
static void skb_release_data(struct sk_buff *skb)
{
- if (!skb->cloned ||
- !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
- &skb_shinfo(skb)->dataref)) {
- if (skb_shinfo(skb)->nr_frags) {
- int i;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- skb_frag_unref(skb, i);
- }
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ int i;
- /*
- * If skb buf is from userspace, we need to notify the caller
- * the lower device DMA has done;
- */
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- struct ubuf_info *uarg;
+ if (skb->cloned &&
+ atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
+ &shinfo->dataref))
+ return;
- uarg = skb_shinfo(skb)->destructor_arg;
- if (uarg->callback)
- uarg->callback(uarg, true);
- }
+ for (i = 0; i < shinfo->nr_frags; i++)
+ __skb_frag_unref(&shinfo->frags[i]);
- if (skb_has_frag_list(skb))
- skb_drop_fraglist(skb);
+ /*
+ * If skb buf is from userspace, we need to notify the caller
+ * the lower device DMA has done;
+ */
+ if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) {
+ struct ubuf_info *uarg;
- skb_free_head(skb);
+ uarg = shinfo->destructor_arg;
+ if (uarg->callback)
+ uarg->callback(uarg, true);
}
+
+ if (shinfo->frag_list)
+ kfree_skb_list(shinfo->frag_list);
+
+ skb_free_head(skb);
}
/*
@@ -524,8 +525,7 @@ static void skb_release_data(struct sk_buff *skb)
*/
static void kfree_skbmem(struct sk_buff *skb)
{
- struct sk_buff *other;
- atomic_t *fclone_ref;
+ struct sk_buff_fclones *fclones;
switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE:
@@ -533,22 +533,28 @@ static void kfree_skbmem(struct sk_buff *skb)
break;
case SKB_FCLONE_ORIG:
- fclone_ref = (atomic_t *) (skb + 2);
- if (atomic_dec_and_test(fclone_ref))
- kmem_cache_free(skbuff_fclone_cache, skb);
+ fclones = container_of(skb, struct sk_buff_fclones, skb1);
+ if (atomic_dec_and_test(&fclones->fclone_ref))
+ kmem_cache_free(skbuff_fclone_cache, fclones);
break;
case SKB_FCLONE_CLONE:
- fclone_ref = (atomic_t *) (skb + 1);
- other = skb - 1;
+ fclones = container_of(skb, struct sk_buff_fclones, skb2);
- /* The clone portion is available for
- * fast-cloning again.
+ /* Warning : We must perform the atomic_dec_and_test() before
+ * setting skb->fclone back to SKB_FCLONE_FREE, otherwise
+ * skb_clone() could set clone_ref to 2 before our decrement.
+ * Anyway, if we are going to free the structure, no need to
+ * rewrite skb->fclone.
*/
- skb->fclone = SKB_FCLONE_UNAVAILABLE;
-
- if (atomic_dec_and_test(fclone_ref))
- kmem_cache_free(skbuff_fclone_cache, other);
+ if (atomic_dec_and_test(&fclones->fclone_ref)) {
+ kmem_cache_free(skbuff_fclone_cache, fclones);
+ } else {
+ /* The clone portion is available for
+ * fast-cloning again.
+ */
+ skb->fclone = SKB_FCLONE_FREE;
+ }
break;
}
}
@@ -566,7 +572,7 @@ static void skb_release_head_state(struct sk_buff *skb)
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_conntrack_put(skb->nfct);
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
nf_bridge_put(skb->nf_bridge);
#endif
/* XXX: IS this still necessary? - JHS */
@@ -674,57 +680,61 @@ void consume_skb(struct sk_buff *skb)
}
EXPORT_SYMBOL(consume_skb);
+/* Make sure a field is enclosed inside headers_start/headers_end section */
+#define CHECK_SKB_FIELD(field) \
+ BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
+ offsetof(struct sk_buff, headers_start)); \
+ BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
+ offsetof(struct sk_buff, headers_end)); \
+
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;
+ /* We do not copy old->sk */
new->dev = old->dev;
- new->transport_header = old->transport_header;
- new->network_header = old->network_header;
- new->mac_header = old->mac_header;
- new->inner_protocol = old->inner_protocol;
- new->inner_transport_header = old->inner_transport_header;
- new->inner_network_header = old->inner_network_header;
- new->inner_mac_header = old->inner_mac_header;
+ memcpy(new->cb, old->cb, sizeof(old->cb));
skb_dst_copy(new, old);
- skb_copy_hash(new, old);
- new->ooo_okay = old->ooo_okay;
- new->no_fcs = old->no_fcs;
- new->encapsulation = old->encapsulation;
- new->encap_hdr_csum = old->encap_hdr_csum;
- new->csum_valid = old->csum_valid;
- new->csum_complete_sw = old->csum_complete_sw;
#ifdef CONFIG_XFRM
new->sp = secpath_get(old->sp);
#endif
- memcpy(new->cb, old->cb, sizeof(old->cb));
- new->csum = old->csum;
- new->ignore_df = old->ignore_df;
- new->pkt_type = old->pkt_type;
- new->ip_summed = old->ip_summed;
- skb_copy_queue_mapping(new, old);
- new->priority = old->priority;
-#if IS_ENABLED(CONFIG_IP_VS)
- new->ipvs_property = old->ipvs_property;
+ __nf_copy(new, old, false);
+
+ /* Note : this field could be in headers_start/headers_end section
+ * It is not yet because we do not want to have a 16 bit hole
+ */
+ new->queue_mapping = old->queue_mapping;
+
+ memcpy(&new->headers_start, &old->headers_start,
+ offsetof(struct sk_buff, headers_end) -
+ offsetof(struct sk_buff, headers_start));
+ CHECK_SKB_FIELD(protocol);
+ CHECK_SKB_FIELD(csum);
+ CHECK_SKB_FIELD(hash);
+ CHECK_SKB_FIELD(priority);
+ CHECK_SKB_FIELD(skb_iif);
+ CHECK_SKB_FIELD(vlan_proto);
+ CHECK_SKB_FIELD(vlan_tci);
+ CHECK_SKB_FIELD(transport_header);
+ CHECK_SKB_FIELD(network_header);
+ CHECK_SKB_FIELD(mac_header);
+ CHECK_SKB_FIELD(inner_protocol);
+ CHECK_SKB_FIELD(inner_transport_header);
+ CHECK_SKB_FIELD(inner_network_header);
+ CHECK_SKB_FIELD(inner_mac_header);
+ CHECK_SKB_FIELD(mark);
+#ifdef CONFIG_NETWORK_SECMARK
+ CHECK_SKB_FIELD(secmark);
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ CHECK_SKB_FIELD(napi_id);
#endif
- new->pfmemalloc = old->pfmemalloc;
- new->protocol = old->protocol;
- new->mark = old->mark;
- new->skb_iif = old->skb_iif;
- __nf_copy(new, old);
#ifdef CONFIG_NET_SCHED
- new->tc_index = old->tc_index;
+ CHECK_SKB_FIELD(tc_index);
#ifdef CONFIG_NET_CLS_ACT
- new->tc_verd = old->tc_verd;
+ CHECK_SKB_FIELD(tc_verd);
#endif
#endif
- new->vlan_proto = old->vlan_proto;
- new->vlan_tci = old->vlan_tci;
- skb_copy_secmark(new, old);
-
-#ifdef CONFIG_NET_RX_BUSY_POLL
- new->napi_id = old->napi_id;
-#endif
}
/*
@@ -855,17 +865,22 @@ EXPORT_SYMBOL_GPL(skb_copy_ubufs);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
- struct sk_buff *n;
+ struct sk_buff_fclones *fclones = container_of(skb,
+ struct sk_buff_fclones,
+ skb1);
+ struct sk_buff *n = &fclones->skb2;
if (skb_orphan_frags(skb, gfp_mask))
return NULL;
- n = skb + 1;
if (skb->fclone == SKB_FCLONE_ORIG &&
- n->fclone == SKB_FCLONE_UNAVAILABLE) {
- atomic_t *fclone_ref = (atomic_t *) (n + 1);
+ n->fclone == SKB_FCLONE_FREE) {
n->fclone = SKB_FCLONE_CLONE;
- atomic_inc(fclone_ref);
+ /* As our fastclone was free, clone_ref must be 1 at this point.
+ * We could use atomic_inc() here, but it is faster
+ * to set the final value.
+ */
+ atomic_set(&fclones->fclone_ref, 2);
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
@@ -875,7 +890,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
return NULL;
kmemcheck_annotate_bitfield(n, flags1);
- kmemcheck_annotate_bitfield(n, flags2);
n->fclone = SKB_FCLONE_UNAVAILABLE;
}
@@ -3069,6 +3083,11 @@ perform_csum_check:
}
} while ((offset += len) < head_skb->len);
+ /* Some callers want to get the end of the list.
+ * Put it in segs->prev to avoid walking the list.
+ * (see validate_xmit_skb_list() for example)
+ */
+ segs->prev = tail;
return segs;
err:
@@ -3182,7 +3201,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_shinfo(nskb)->frag_list = p;
skb_shinfo(nskb)->gso_size = pinfo->gso_size;
pinfo->gso_size = 0;
- skb_header_release(p);
+ __skb_header_release(p);
NAPI_GRO_CB(nskb)->last = p;
nskb->data_len += p->len;
@@ -3214,7 +3233,7 @@ merge:
else
NAPI_GRO_CB(p)->last->next = skb;
NAPI_GRO_CB(p)->last = skb;
- skb_header_release(skb);
+ __skb_header_release(skb);
lp = p;
done:
@@ -3230,7 +3249,6 @@ done:
NAPI_GRO_CB(skb)->same_flow = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(skb_gro_receive);
void __init skb_init(void)
{
@@ -3240,8 +3258,7 @@ void __init skb_init(void)
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
- (2*sizeof(struct sk_buff)) +
- sizeof(atomic_t),
+ sizeof(struct sk_buff_fclones),
0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
@@ -3494,32 +3511,66 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_queue_err_skb);
-void __skb_tstamp_tx(struct sk_buff *orig_skb,
- struct skb_shared_hwtstamps *hwtstamps,
- struct sock *sk, int tstype)
+struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
{
- struct sock_exterr_skb *serr;
- struct sk_buff *skb;
- int err;
+ struct sk_buff_head *q = &sk->sk_error_queue;
+ struct sk_buff *skb, *skb_next;
+ int err = 0;
- if (!sk)
- return;
+ spin_lock_bh(&q->lock);
+ skb = __skb_dequeue(q);
+ if (skb && (skb_next = skb_peek(q)))
+ err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
+ spin_unlock_bh(&q->lock);
- if (hwtstamps) {
- *skb_hwtstamps(orig_skb) =
- *hwtstamps;
- } else {
- /*
- * no hardware time stamps available,
- * so keep the shared tx_flags and only
- * store software time stamp
- */
- orig_skb->tstamp = ktime_get_real();
+ sk->sk_err = err;
+ if (err)
+ sk->sk_error_report(sk);
+
+ return skb;
+}
+EXPORT_SYMBOL(sock_dequeue_err_skb);
+
+/**
+ * skb_clone_sk - create clone of skb, and take reference to socket
+ * @skb: the skb to clone
+ *
+ * This function creates a clone of a buffer that holds a reference on
+ * sk_refcnt. Buffers created via this function are meant to be
+ * returned using sock_queue_err_skb, or free via kfree_skb.
+ *
+ * When passing buffers allocated with this function to sock_queue_err_skb
+ * it is necessary to wrap the call with sock_hold/sock_put in order to
+ * prevent the socket from being released prior to being enqueued on
+ * the sk_error_queue.
+ */
+struct sk_buff *skb_clone_sk(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct sk_buff *clone;
+
+ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
+ return NULL;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (!clone) {
+ sock_put(sk);
+ return NULL;
}
- skb = skb_clone(orig_skb, GFP_ATOMIC);
- if (!skb)
- return;
+ clone->sk = sk;
+ clone->destructor = sock_efree;
+
+ return clone;
+}
+EXPORT_SYMBOL(skb_clone_sk);
+
+static void __skb_complete_tx_timestamp(struct sk_buff *skb,
+ struct sock *sk,
+ int tstype)
+{
+ struct sock_exterr_skb *serr;
+ int err;
serr = SKB_EXT_ERR(skb);
memset(serr, 0, sizeof(*serr));
@@ -3537,6 +3588,42 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
if (err)
kfree_skb(skb);
}
+
+void skb_complete_tx_timestamp(struct sk_buff *skb,
+ struct skb_shared_hwtstamps *hwtstamps)
+{
+ struct sock *sk = skb->sk;
+
+ /* take a reference to prevent skb_orphan() from freeing the socket */
+ sock_hold(sk);
+
+ *skb_hwtstamps(skb) = *hwtstamps;
+ __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+
+ sock_put(sk);
+}
+EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
+
+void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ struct skb_shared_hwtstamps *hwtstamps,
+ struct sock *sk, int tstype)
+{
+ struct sk_buff *skb;
+
+ if (!sk)
+ return;
+
+ if (hwtstamps)
+ *skb_hwtstamps(orig_skb) = *hwtstamps;
+ else
+ orig_skb->tstamp = ktime_get_real();
+
+ skb = skb_clone(orig_skb, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ __skb_complete_tx_timestamp(skb, sk, tstype);
+}
EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
void skb_tstamp_tx(struct sk_buff *orig_skb,
@@ -3561,9 +3648,14 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
serr->ee.ee_errno = ENOMSG;
serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
+ /* take a reference to prevent skb_orphan() from freeing the socket */
+ sock_hold(sk);
+
err = sock_queue_err_skb(sk, skb);
if (err)
kfree_skb(skb);
+
+ sock_put(sk);
}
EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
@@ -3864,7 +3956,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
return false;
if (len <= skb_tailroom(to)) {
- BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
+ if (len)
+ BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
*delta_truesize = 0;
return true;
}
@@ -4029,3 +4122,81 @@ err_free:
return NULL;
}
EXPORT_SYMBOL(skb_vlan_untag);
+
+/**
+ * alloc_skb_with_frags - allocate skb with page frags
+ *
+ * header_len: size of linear part
+ * data_len: needed length in frags
+ * max_page_order: max page order desired.
+ * errcode: pointer to error code if any
+ * gfp_mask: allocation mask
+ *
+ * This can be used to allocate a paged skb, given a maximal order for frags.
+ */
+struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
+ unsigned long data_len,
+ int max_page_order,
+ int *errcode,
+ gfp_t gfp_mask)
+{
+ int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ unsigned long chunk;
+ struct sk_buff *skb;
+ struct page *page;
+ gfp_t gfp_head;
+ int i;
+
+ *errcode = -EMSGSIZE;
+ /* Note this test could be relaxed, if we succeed to allocate
+ * high order pages...
+ */
+ if (npages > MAX_SKB_FRAGS)
+ return NULL;
+
+ gfp_head = gfp_mask;
+ if (gfp_head & __GFP_WAIT)
+ gfp_head |= __GFP_REPEAT;
+
+ *errcode = -ENOBUFS;
+ skb = alloc_skb(header_len, gfp_head);
+ if (!skb)
+ return NULL;
+
+ skb->truesize += npages << PAGE_SHIFT;
+
+ for (i = 0; npages > 0; i++) {
+ int order = max_page_order;
+
+ while (order) {
+ if (npages >= 1 << order) {
+ page = alloc_pages(gfp_mask |
+ __GFP_COMP |
+ __GFP_NOWARN |
+ __GFP_NORETRY,
+ order);
+ if (page)
+ goto fill_page;
+ /* Do not retry other high order allocations */
+ order = 1;
+ max_page_order = 0;
+ }
+ order--;
+ }
+ page = alloc_page(gfp_mask);
+ if (!page)
+ goto failure;
+fill_page:
+ chunk = min_t(unsigned long, data_len,
+ PAGE_SIZE << order);
+ skb_fill_page_desc(skb, i, page, 0, chunk);
+ data_len -= chunk;
+ npages -= 1 << order;
+ }
+ return skb;
+
+failure:
+ kfree_skb(skb);
+ return NULL;
+}
+EXPORT_SYMBOL(alloc_skb_with_frags);
diff --git a/net/core/sock.c b/net/core/sock.c
index 9c3f823e76a9..e5ad7d31c3c2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -437,7 +437,6 @@ static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
- int skb_len;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
@@ -459,13 +458,6 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
skb->dev = NULL;
skb_set_owner_r(skb, sk);
- /* Cache the SKB length before we tack it onto the receive
- * queue. Once it is added it no longer belongs to us and
- * may be freed by other threads of control pulling packets
- * from the queue.
- */
- skb_len = skb->len;
-
/* we escape from rcu protected region, make sure we dont leak
* a norefcounted dst
*/
@@ -1645,18 +1637,24 @@ void sock_rfree(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_rfree);
+void sock_efree(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+EXPORT_SYMBOL(sock_efree);
+
+#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
-#ifdef CONFIG_INET
if (sk->sk_state == TCP_TIME_WAIT)
inet_twsk_put(inet_twsk(sk));
else
-#endif
sock_put(sk);
}
EXPORT_SYMBOL(sock_edemux);
+#endif
kuid_t sock_i_uid(struct sock *sk)
{
@@ -1764,21 +1762,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
int *errcode, int max_page_order)
{
- struct sk_buff *skb = NULL;
- unsigned long chunk;
- gfp_t gfp_mask;
+ struct sk_buff *skb;
long timeo;
int err;
- int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- struct page *page;
- int i;
-
- err = -EMSGSIZE;
- if (npages > MAX_SKB_FRAGS)
- goto failure;
timeo = sock_sndtimeo(sk, noblock);
- while (!skb) {
+ for (;;) {
err = sock_error(sk);
if (err != 0)
goto failure;
@@ -1787,66 +1776,27 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
- if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- err = -EAGAIN;
- if (!timeo)
- goto failure;
- if (signal_pending(current))
- goto interrupted;
- timeo = sock_wait_for_wmem(sk, timeo);
- continue;
- }
-
- err = -ENOBUFS;
- gfp_mask = sk->sk_allocation;
- if (gfp_mask & __GFP_WAIT)
- gfp_mask |= __GFP_REPEAT;
+ if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
+ break;
- skb = alloc_skb(header_len, gfp_mask);
- if (!skb)
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ err = -EAGAIN;
+ if (!timeo)
goto failure;
-
- skb->truesize += data_len;
-
- for (i = 0; npages > 0; i++) {
- int order = max_page_order;
-
- while (order) {
- if (npages >= 1 << order) {
- page = alloc_pages(sk->sk_allocation |
- __GFP_COMP |
- __GFP_NOWARN |
- __GFP_NORETRY,
- order);
- if (page)
- goto fill_page;
- /* Do not retry other high order allocations */
- order = 1;
- max_page_order = 0;
- }
- order--;
- }
- page = alloc_page(sk->sk_allocation);
- if (!page)
- goto failure;
-fill_page:
- chunk = min_t(unsigned long, data_len,
- PAGE_SIZE << order);
- skb_fill_page_desc(skb, i, page, 0, chunk);
- data_len -= chunk;
- npages -= 1 << order;
- }
+ if (signal_pending(current))
+ goto interrupted;
+ timeo = sock_wait_for_wmem(sk, timeo);
}
-
- skb_set_owner_w(skb, sk);
+ skb = alloc_skb_with_frags(header_len, data_len, max_page_order,
+ errcode, sk->sk_allocation);
+ if (skb)
+ skb_set_owner_w(skb, sk);
return skb;
interrupted:
err = sock_intr_errno(timeo);
failure:
- kfree_skb(skb);
*errcode = err;
return NULL;
}
@@ -2498,11 +2448,11 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
int level, int type)
{
struct sock_exterr_skb *serr;
- struct sk_buff *skb, *skb2;
+ struct sk_buff *skb;
int copied, err;
err = -EAGAIN;
- skb = skb_dequeue(&sk->sk_error_queue);
+ skb = sock_dequeue_err_skb(sk);
if (skb == NULL)
goto out;
@@ -2523,16 +2473,6 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
- /* Reset and regenerate socket error */
- spin_lock_bh(&sk->sk_error_queue.lock);
- sk->sk_err = 0;
- if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
- sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_bh(&sk->sk_error_queue.lock);
- sk->sk_error_report(sk);
- } else
- spin_unlock_bh(&sk->sk_error_queue.lock);
-
out_free_skb:
kfree_skb(skb);
out:
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index a8770391ea5b..43d3dd62fcc8 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -36,10 +36,9 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
{
struct phy_device *phydev;
struct sk_buff *clone;
- struct sock *sk = skb->sk;
unsigned int type;
- if (!sk)
+ if (!skb->sk)
return;
type = classify(skb);
@@ -48,50 +47,14 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
phydev = skb->dev->phydev;
if (likely(phydev->drv->txtstamp)) {
- if (!atomic_inc_not_zero(&sk->sk_refcnt))
+ clone = skb_clone_sk(skb);
+ if (!clone)
return;
-
- clone = skb_clone(skb, GFP_ATOMIC);
- if (!clone) {
- sock_put(sk);
- return;
- }
-
- clone->sk = sk;
phydev->drv->txtstamp(phydev, clone, type);
}
}
EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp);
-void skb_complete_tx_timestamp(struct sk_buff *skb,
- struct skb_shared_hwtstamps *hwtstamps)
-{
- struct sock *sk = skb->sk;
- struct sock_exterr_skb *serr;
- int err;
-
- if (!hwtstamps) {
- sock_put(sk);
- kfree_skb(skb);
- return;
- }
-
- *skb_hwtstamps(skb) = *hwtstamps;
-
- serr = SKB_EXT_ERR(skb);
- memset(serr, 0, sizeof(*serr));
- serr->ee.ee_errno = ENOMSG;
- serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
- skb->sk = NULL;
-
- err = sock_queue_err_skb(sk, skb);
-
- sock_put(sk);
- if (err)
- kfree_skb(skb);
-}
-EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
-
bool skb_defer_rx_timestamp(struct sk_buff *skb)
{
struct phy_device *phydev;
diff --git a/net/core/utils.c b/net/core/utils.c
index eed34338736c..efc76dd9dcd1 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -306,16 +306,14 @@ EXPORT_SYMBOL(in6_pton);
void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
__be32 from, __be32 to, int pseudohdr)
{
- __be32 diff[] = { ~from, to };
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- *sum = csum_fold(csum_partial(diff, sizeof(diff),
- ~csum_unfold(*sum)));
+ *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), from),
+ to));
if (skb->ip_summed == CHECKSUM_COMPLETE && pseudohdr)
- skb->csum = ~csum_partial(diff, sizeof(diff),
- ~skb->csum);
+ skb->csum = ~csum_add(csum_sub(~(skb->csum), from), to);
} else if (pseudohdr)
- *sum = ~csum_fold(csum_partial(diff, sizeof(diff),
- csum_unfold(*sum)));
+ *sum = ~csum_fold(csum_add(csum_sub(csum_unfold(*sum), from),
+ to));
}
EXPORT_SYMBOL(inet_proto_csum_replace4);
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 597557254ddb..83498975165f 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -99,7 +99,7 @@ static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
kmem_cache_destroy(slab);
}
-static int ccid_activate(struct ccid_operations *ccid_ops)
+static int __init ccid_activate(struct ccid_operations *ccid_ops)
{
int err = -ENOBUFS;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 04cb17d4b0ce..ad2acfe1ca61 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -404,7 +404,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- if (ipv6_opt_accepted(sk, skb) ||
+ if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index de2c1e719305..1ad150ed57cf 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1082,7 +1082,7 @@ void dccp_shutdown(struct sock *sk, int how)
EXPORT_SYMBOL_GPL(dccp_shutdown);
-static inline int dccp_mib_init(void)
+static inline int __init dccp_mib_init(void)
{
dccp_statistics = alloc_percpu(struct dccp_mib);
if (!dccp_statistics)
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index ae011b46c071..25733d538147 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -127,6 +127,7 @@ Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/poll.h>
+#include <linux/jiffies.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
#include <net/dst.h>
@@ -598,7 +599,7 @@ int dn_destroy_timer(struct sock *sk)
if (sk->sk_socket)
return 0;
- if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) {
+ if (time_after_eq(jiffies, scp->stamp + HZ * decnet_time_wait)) {
dn_unhash_sock(sk);
sock_put(sk);
return 1;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 3b726f31c64c..4400da7739da 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -41,6 +41,7 @@
#include <linux/sysctl.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/jiffies.h>
#include <asm/uaccess.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
@@ -875,7 +876,7 @@ static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa)
{
/* First check time since device went up */
- if ((jiffies - dn_db->uptime) < DRDELAY)
+ if (time_before(jiffies, dn_db->uptime + DRDELAY))
return 0;
/* If there is no router, then yes... */
diff --git a/net/decnet/dn_timer.c b/net/decnet/dn_timer.c
index d9c150cc59a9..1d330fd43dc7 100644
--- a/net/decnet/dn_timer.c
+++ b/net/decnet/dn_timer.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <net/sock.h>
#include <linux/atomic.h>
+#include <linux/jiffies.h>
#include <net/flow.h>
#include <net/dn.h>
@@ -91,7 +92,7 @@ static void dn_slow_timer(unsigned long arg)
* since the last successful transmission.
*/
if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) {
- if ((jiffies - scp->stamp) >= scp->keepalive)
+ if (time_after_eq(jiffies, scp->stamp + scp->keepalive))
scp->keepalive_fxn(sk);
}
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index f5eede1d6cb8..a585fd6352eb 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -12,6 +12,9 @@ config NET_DSA
if NET_DSA
# tagging formats
+config NET_DSA_TAG_BRCM
+ bool
+
config NET_DSA_TAG_DSA
bool
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 7b9fcbbeda5d..da06ed1df620 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_NET_DSA) += dsa_core.o
dsa_core-y += dsa.o slave.o
# tagging formats
+dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
dsa_core-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
dsa_core-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 0a49632fac47..22f34cf4cb27 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -10,7 +10,6 @@
*/
#include <linux/list.h>
-#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -44,7 +43,7 @@ void unregister_switch_driver(struct dsa_switch_driver *drv)
EXPORT_SYMBOL_GPL(unregister_switch_driver);
static struct dsa_switch_driver *
-dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name)
+dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name)
{
struct dsa_switch_driver *ret;
struct list_head *list;
@@ -59,7 +58,7 @@ dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name)
drv = list_entry(list, struct dsa_switch_driver, list);
- name = drv->probe(bus, sw_addr);
+ name = drv->probe(host_dev, sw_addr);
if (name != NULL) {
ret = drv;
break;
@@ -76,7 +75,7 @@ dsa_switch_probe(struct mii_bus *bus, int sw_addr, char **_name)
/* basic switch operations **************************************************/
static struct dsa_switch *
dsa_switch_setup(struct dsa_switch_tree *dst, int index,
- struct device *parent, struct mii_bus *bus)
+ struct device *parent, struct device *host_dev)
{
struct dsa_chip_data *pd = dst->pd->chip + index;
struct dsa_switch_driver *drv;
@@ -89,7 +88,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
/*
* Probe for switch model.
*/
- drv = dsa_switch_probe(bus, pd->sw_addr, &name);
+ drv = dsa_switch_probe(host_dev, pd->sw_addr, &name);
if (drv == NULL) {
printk(KERN_ERR "%s[%d]: could not detect attached switch\n",
dst->master_netdev->name, index);
@@ -110,8 +109,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
ds->index = index;
ds->pd = dst->pd->chip + index;
ds->drv = drv;
- ds->master_mii_bus = bus;
-
+ ds->master_dev = host_dev;
/*
* Validate supplied switch configuration.
@@ -144,14 +142,44 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
goto out;
}
+ /* Make the built-in MII bus mask match the number of ports,
+ * switch drivers can override this later
+ */
+ ds->phys_mii_mask = ds->phys_port_mask;
+
/*
* If the CPU connects to this switch, set the switch tree
* tagging protocol to the preferred tagging format of this
* switch.
*/
- if (ds->dst->cpu_switch == index)
- ds->dst->tag_protocol = drv->tag_protocol;
+ if (dst->cpu_switch == index) {
+ switch (drv->tag_protocol) {
+#ifdef CONFIG_NET_DSA_TAG_DSA
+ case DSA_TAG_PROTO_DSA:
+ dst->rcv = dsa_netdev_ops.rcv;
+ break;
+#endif
+#ifdef CONFIG_NET_DSA_TAG_EDSA
+ case DSA_TAG_PROTO_EDSA:
+ dst->rcv = edsa_netdev_ops.rcv;
+ break;
+#endif
+#ifdef CONFIG_NET_DSA_TAG_TRAILER
+ case DSA_TAG_PROTO_TRAILER:
+ dst->rcv = trailer_netdev_ops.rcv;
+ break;
+#endif
+#ifdef CONFIG_NET_DSA_TAG_BRCM
+ case DSA_TAG_PROTO_BRCM:
+ dst->rcv = brcm_netdev_ops.rcv;
+ break;
+#endif
+ default:
+ break;
+ }
+ dst->tag_protocol = drv->tag_protocol;
+ }
/*
* Do basic register setup.
@@ -210,6 +238,51 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
{
}
+#ifdef CONFIG_PM_SLEEP
+static int dsa_switch_suspend(struct dsa_switch *ds)
+{
+ int i, ret = 0;
+
+ /* Suspend slave network devices */
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ if (!(ds->phys_port_mask & (1 << i)))
+ continue;
+
+ ret = dsa_slave_suspend(ds->ports[i]);
+ if (ret)
+ return ret;
+ }
+
+ if (ds->drv->suspend)
+ ret = ds->drv->suspend(ds);
+
+ return ret;
+}
+
+static int dsa_switch_resume(struct dsa_switch *ds)
+{
+ int i, ret = 0;
+
+ if (ds->drv->resume)
+ ret = ds->drv->resume(ds);
+
+ if (ret)
+ return ret;
+
+ /* Resume slave network devices */
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ if (!(ds->phys_port_mask & (1 << i)))
+ continue;
+
+ ret = dsa_slave_resume(ds->ports[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
/* link polling *************************************************************/
static void dsa_link_poll_work(struct work_struct *ugly)
@@ -256,7 +329,7 @@ static struct device *dev_find_class(struct device *parent, char *class)
return device_find_child(parent, class, dev_is_class);
}
-static struct mii_bus *dev_to_mii_bus(struct device *dev)
+struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
{
struct device *d;
@@ -272,6 +345,7 @@ static struct mii_bus *dev_to_mii_bus(struct device *dev)
return NULL;
}
+EXPORT_SYMBOL_GPL(dsa_host_dev_to_mii_bus);
static struct net_device *dev_to_net_device(struct device *dev)
{
@@ -410,7 +484,8 @@ static int dsa_of_probe(struct platform_device *pdev)
chip_index++;
cd = &pd->chip[chip_index];
- cd->mii_bus = &mdio_bus->dev;
+ cd->of_node = child;
+ cd->host_dev = &mdio_bus->dev;
sw_addr = of_get_property(child, "reg", NULL);
if (!sw_addr)
@@ -431,6 +506,8 @@ static int dsa_of_probe(struct platform_device *pdev)
if (!port_name)
continue;
+ cd->port_dn[port_index] = port;
+
cd->port_names[port_index] = kstrdup(port_name,
GFP_KERNEL);
if (!cd->port_names[port_index]) {
@@ -534,17 +611,9 @@ static int dsa_probe(struct platform_device *pdev)
dst->cpu_port = -1;
for (i = 0; i < pd->nr_chips; i++) {
- struct mii_bus *bus;
struct dsa_switch *ds;
- bus = dev_to_mii_bus(pd->chip[i].mii_bus);
- if (bus == NULL) {
- printk(KERN_ERR "%s[%d]: no mii bus found for "
- "dsa switch\n", dev->name, i);
- continue;
- }
-
- ds = dsa_switch_setup(dst, i, &pdev->dev, bus);
+ ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev);
if (IS_ERR(ds)) {
printk(KERN_ERR "%s[%d]: couldn't create dsa switch "
"instance (error %ld)\n", dev->name, i,
@@ -608,7 +677,62 @@ static void dsa_shutdown(struct platform_device *pdev)
{
}
+static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct dsa_switch_tree *dst = dev->dsa_ptr;
+
+ if (unlikely(dst == NULL)) {
+ kfree_skb(skb);
+ return 0;
+ }
+
+ return dst->rcv(skb, dev, pt, orig_dev);
+}
+
+static struct packet_type dsa_pack_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_XDSA),
+ .func = dsa_switch_rcv,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int dsa_suspend(struct device *d)
+{
+ struct platform_device *pdev = to_platform_device(d);
+ struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
+ int i, ret = 0;
+
+ for (i = 0; i < dst->pd->nr_chips; i++) {
+ struct dsa_switch *ds = dst->ds[i];
+
+ if (ds != NULL)
+ ret = dsa_switch_suspend(ds);
+ }
+
+ return ret;
+}
+
+static int dsa_resume(struct device *d)
+{
+ struct platform_device *pdev = to_platform_device(d);
+ struct dsa_switch_tree *dst = platform_get_drvdata(pdev);
+ int i, ret = 0;
+
+ for (i = 0; i < dst->pd->nr_chips; i++) {
+ struct dsa_switch *ds = dst->ds[i];
+
+ if (ds != NULL)
+ ret = dsa_switch_resume(ds);
+ }
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume);
+
static const struct of_device_id dsa_of_match_table[] = {
+ { .compatible = "brcm,bcm7445-switch-v4.0" },
{ .compatible = "marvell,dsa", },
{}
};
@@ -622,6 +746,7 @@ static struct platform_driver dsa_driver = {
.name = "dsa",
.owner = THIS_MODULE,
.of_match_table = dsa_of_match_table,
+ .pm = &dsa_pm_ops,
},
};
@@ -633,30 +758,15 @@ static int __init dsa_init_module(void)
if (rc)
return rc;
-#ifdef CONFIG_NET_DSA_TAG_DSA
- dev_add_pack(&dsa_packet_type);
-#endif
-#ifdef CONFIG_NET_DSA_TAG_EDSA
- dev_add_pack(&edsa_packet_type);
-#endif
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
- dev_add_pack(&trailer_packet_type);
-#endif
+ dev_add_pack(&dsa_pack_type);
+
return 0;
}
module_init(dsa_init_module);
static void __exit dsa_cleanup_module(void)
{
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
- dev_remove_pack(&trailer_packet_type);
-#endif
-#ifdef CONFIG_NET_DSA_TAG_EDSA
- dev_remove_pack(&edsa_packet_type);
-#endif
-#ifdef CONFIG_NET_DSA_TAG_DSA
- dev_remove_pack(&dsa_packet_type);
-#endif
+ dev_remove_pack(&dsa_pack_type);
platform_driver_unregister(&dsa_driver);
}
module_exit(dsa_cleanup_module);
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index d4cf5cc747e3..dc9756d3154c 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -12,7 +12,13 @@
#define __DSA_PRIV_H
#include <linux/phy.h>
-#include <net/dsa.h>
+#include <linux/netdevice.h>
+
+struct dsa_device_ops {
+ netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev);
+ int (*rcv)(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
+};
struct dsa_slave_priv {
/*
@@ -20,6 +26,8 @@ struct dsa_slave_priv {
* switch port.
*/
struct net_device *dev;
+ netdev_tx_t (*xmit)(struct sk_buff *skb,
+ struct net_device *dev);
/*
* Which switch this port is a part of, and the port index
@@ -33,28 +41,35 @@ struct dsa_slave_priv {
* to this port.
*/
struct phy_device *phy;
+ phy_interface_t phy_interface;
+ int old_link;
+ int old_pause;
+ int old_duplex;
};
/* dsa.c */
extern char dsa_driver_version[];
/* slave.c */
+extern const struct dsa_device_ops notag_netdev_ops;
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
struct net_device *dsa_slave_create(struct dsa_switch *ds,
struct device *parent,
int port, char *name);
+int dsa_slave_suspend(struct net_device *slave_dev);
+int dsa_slave_resume(struct net_device *slave_dev);
/* tag_dsa.c */
-netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev);
-extern struct packet_type dsa_packet_type;
+extern const struct dsa_device_ops dsa_netdev_ops;
/* tag_edsa.c */
-netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev);
-extern struct packet_type edsa_packet_type;
+extern const struct dsa_device_ops edsa_netdev_ops;
/* tag_trailer.c */
-netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev);
-extern struct packet_type trailer_packet_type;
+extern const struct dsa_device_ops trailer_netdev_ops;
+
+/* tag_brcm.c */
+extern const struct dsa_device_ops brcm_netdev_ops;
#endif
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 45a1e34c89e0..8030489d9cbe 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -9,9 +9,10 @@
*/
#include <linux/list.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
#include "dsa_priv.h"
/* slave mii_bus handling ***************************************************/
@@ -19,7 +20,7 @@ static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
{
struct dsa_switch *ds = bus->priv;
- if (ds->phys_port_mask & (1 << addr))
+ if (ds->phys_mii_mask & (1 << addr))
return ds->drv->phy_read(ds, addr, reg);
return 0xffff;
@@ -29,7 +30,7 @@ static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val)
{
struct dsa_switch *ds = bus->priv;
- if (ds->phys_port_mask & (1 << addr))
+ if (ds->phys_mii_mask & (1 << addr))
return ds->drv->phy_write(ds, addr, reg, val);
return 0;
@@ -43,7 +44,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
ds->slave_mii_bus->write = dsa_slave_phy_write;
snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
ds->index, ds->pd->sw_addr);
- ds->slave_mii_bus->parent = &ds->master_mii_bus->dev;
+ ds->slave_mii_bus->parent = ds->master_dev;
}
@@ -61,6 +62,7 @@ static int dsa_slave_open(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
+ struct dsa_switch *ds = p->parent;
int err;
if (!(master->flags & IFF_UP))
@@ -83,8 +85,20 @@ static int dsa_slave_open(struct net_device *dev)
goto clear_allmulti;
}
+ if (ds->drv->port_enable) {
+ err = ds->drv->port_enable(ds, p->port, p->phy);
+ if (err)
+ goto clear_promisc;
+ }
+
+ if (p->phy)
+ phy_start(p->phy);
+
return 0;
+clear_promisc:
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(master, 0);
clear_allmulti:
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(master, -1);
@@ -99,6 +113,10 @@ static int dsa_slave_close(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
+ struct dsa_switch *ds = p->parent;
+
+ if (p->phy)
+ phy_stop(p->phy);
dev_mc_unsync(master, dev);
dev_uc_unsync(master, dev);
@@ -110,6 +128,9 @@ static int dsa_slave_close(struct net_device *dev)
if (!ether_addr_equal(dev->dev_addr, master->dev_addr))
dev_uc_del(master, dev->dev_addr);
+ if (ds->drv->port_disable)
+ ds->drv->port_disable(ds, p->port, p->phy);
+
return 0;
}
@@ -171,6 +192,24 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
+static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+
+ return p->xmit(skb, dev);
+}
+
+static netdev_tx_t dsa_slave_notag_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+
+ skb->dev = p->parent->dst->master_netdev;
+ dev_queue_xmit(skb);
+
+ return NETDEV_TX_OK;
+}
+
/* ethtool operations *******************************************************/
static int
@@ -282,6 +321,65 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP;
}
+static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->drv->get_wol)
+ ds->drv->get_wol(ds, p->port, w);
+}
+
+static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret = -EOPNOTSUPP;
+
+ if (ds->drv->set_wol)
+ ret = ds->drv->set_wol(ds, p->port, w);
+
+ return ret;
+}
+
+static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret;
+
+ if (!ds->drv->set_eee)
+ return -EOPNOTSUPP;
+
+ ret = ds->drv->set_eee(ds, p->port, p->phy, e);
+ if (ret)
+ return ret;
+
+ if (p->phy)
+ ret = phy_ethtool_set_eee(p->phy, e);
+
+ return ret;
+}
+
+static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ int ret;
+
+ if (!ds->drv->get_eee)
+ return -EOPNOTSUPP;
+
+ ret = ds->drv->get_eee(ds, p->port, e);
+ if (ret)
+ return ret;
+
+ if (p->phy)
+ ret = phy_ethtool_get_eee(p->phy, e);
+
+ return ret;
+}
+
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_settings = dsa_slave_get_settings,
.set_settings = dsa_slave_set_settings,
@@ -291,46 +389,143 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_strings = dsa_slave_get_strings,
.get_ethtool_stats = dsa_slave_get_ethtool_stats,
.get_sset_count = dsa_slave_get_sset_count,
+ .set_wol = dsa_slave_set_wol,
+ .get_wol = dsa_slave_get_wol,
+ .set_eee = dsa_slave_set_eee,
+ .get_eee = dsa_slave_get_eee,
};
-#ifdef CONFIG_NET_DSA_TAG_DSA
-static const struct net_device_ops dsa_netdev_ops = {
+static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_init = dsa_slave_init,
.ndo_open = dsa_slave_open,
.ndo_stop = dsa_slave_close,
- .ndo_start_xmit = dsa_xmit,
+ .ndo_start_xmit = dsa_slave_xmit,
.ndo_change_rx_flags = dsa_slave_change_rx_flags,
.ndo_set_rx_mode = dsa_slave_set_rx_mode,
.ndo_set_mac_address = dsa_slave_set_mac_address,
.ndo_do_ioctl = dsa_slave_ioctl,
};
-#endif
-#ifdef CONFIG_NET_DSA_TAG_EDSA
-static const struct net_device_ops edsa_netdev_ops = {
- .ndo_init = dsa_slave_init,
- .ndo_open = dsa_slave_open,
- .ndo_stop = dsa_slave_close,
- .ndo_start_xmit = edsa_xmit,
- .ndo_change_rx_flags = dsa_slave_change_rx_flags,
- .ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_mac_address = dsa_slave_set_mac_address,
- .ndo_do_ioctl = dsa_slave_ioctl,
-};
-#endif
-#ifdef CONFIG_NET_DSA_TAG_TRAILER
-static const struct net_device_ops trailer_netdev_ops = {
- .ndo_init = dsa_slave_init,
- .ndo_open = dsa_slave_open,
- .ndo_stop = dsa_slave_close,
- .ndo_start_xmit = trailer_xmit,
- .ndo_change_rx_flags = dsa_slave_change_rx_flags,
- .ndo_set_rx_mode = dsa_slave_set_rx_mode,
- .ndo_set_mac_address = dsa_slave_set_mac_address,
- .ndo_do_ioctl = dsa_slave_ioctl,
-};
-#endif
+
+static void dsa_slave_adjust_link(struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+ unsigned int status_changed = 0;
+
+ if (p->old_link != p->phy->link) {
+ status_changed = 1;
+ p->old_link = p->phy->link;
+ }
+
+ if (p->old_duplex != p->phy->duplex) {
+ status_changed = 1;
+ p->old_duplex = p->phy->duplex;
+ }
+
+ if (p->old_pause != p->phy->pause) {
+ status_changed = 1;
+ p->old_pause = p->phy->pause;
+ }
+
+ if (ds->drv->adjust_link && status_changed)
+ ds->drv->adjust_link(ds, p->port, p->phy);
+
+ if (status_changed)
+ phy_print_status(p->phy);
+}
+
+static int dsa_slave_fixed_link_update(struct net_device *dev,
+ struct fixed_phy_status *status)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->parent;
+
+ if (ds->drv->fixed_link_update)
+ ds->drv->fixed_link_update(ds, p->port, status);
+
+ return 0;
+}
/* slave device setup *******************************************************/
+static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
+ struct net_device *slave_dev)
+{
+ struct dsa_switch *ds = p->parent;
+ struct dsa_chip_data *cd = ds->pd;
+ struct device_node *phy_dn, *port_dn;
+ bool phy_is_fixed = false;
+ u32 phy_flags = 0;
+ int ret;
+
+ port_dn = cd->port_dn[p->port];
+ p->phy_interface = of_get_phy_mode(port_dn);
+
+ phy_dn = of_parse_phandle(port_dn, "phy-handle", 0);
+ if (of_phy_is_fixed_link(port_dn)) {
+ /* In the case of a fixed PHY, the DT node associated
+ * to the fixed PHY is the Port DT node
+ */
+ ret = of_phy_register_fixed_link(port_dn);
+ if (ret) {
+ pr_err("failed to register fixed PHY\n");
+ return;
+ }
+ phy_is_fixed = true;
+ phy_dn = port_dn;
+ }
+
+ if (ds->drv->get_phy_flags)
+ phy_flags = ds->drv->get_phy_flags(ds, p->port);
+
+ if (phy_dn)
+ p->phy = of_phy_connect(slave_dev, phy_dn,
+ dsa_slave_adjust_link, phy_flags,
+ p->phy_interface);
+
+ if (p->phy && phy_is_fixed)
+ fixed_phy_set_link_update(p->phy, dsa_slave_fixed_link_update);
+
+ /* We could not connect to a designated PHY, so use the switch internal
+ * MDIO bus instead
+ */
+ if (!p->phy)
+ p->phy = ds->slave_mii_bus->phy_map[p->port];
+ else
+ pr_info("attached PHY at address %d [%s]\n",
+ p->phy->addr, p->phy->drv->name);
+}
+
+int dsa_slave_suspend(struct net_device *slave_dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(slave_dev);
+
+ netif_device_detach(slave_dev);
+
+ if (p->phy) {
+ phy_stop(p->phy);
+ p->old_pause = -1;
+ p->old_link = -1;
+ p->old_duplex = -1;
+ phy_suspend(p->phy);
+ }
+
+ return 0;
+}
+
+int dsa_slave_resume(struct net_device *slave_dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(slave_dev);
+
+ netif_device_attach(slave_dev);
+
+ if (p->phy) {
+ phy_resume(p->phy);
+ phy_start(p->phy);
+ }
+
+ return 0;
+}
+
struct net_device *
dsa_slave_create(struct dsa_switch *ds, struct device *parent,
int port, char *name)
@@ -349,35 +544,48 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
eth_hw_addr_inherit(slave_dev, master);
slave_dev->tx_queue_len = 0;
+ slave_dev->netdev_ops = &dsa_slave_netdev_ops;
+
+ SET_NETDEV_DEV(slave_dev, parent);
+ slave_dev->dev.of_node = ds->pd->port_dn[port];
+ slave_dev->vlan_features = master->vlan_features;
+
+ p = netdev_priv(slave_dev);
+ p->dev = slave_dev;
+ p->parent = ds;
+ p->port = port;
switch (ds->dst->tag_protocol) {
#ifdef CONFIG_NET_DSA_TAG_DSA
- case htons(ETH_P_DSA):
- slave_dev->netdev_ops = &dsa_netdev_ops;
+ case DSA_TAG_PROTO_DSA:
+ p->xmit = dsa_netdev_ops.xmit;
break;
#endif
#ifdef CONFIG_NET_DSA_TAG_EDSA
- case htons(ETH_P_EDSA):
- slave_dev->netdev_ops = &edsa_netdev_ops;
+ case DSA_TAG_PROTO_EDSA:
+ p->xmit = edsa_netdev_ops.xmit;
break;
#endif
#ifdef CONFIG_NET_DSA_TAG_TRAILER
- case htons(ETH_P_TRAILER):
- slave_dev->netdev_ops = &trailer_netdev_ops;
+ case DSA_TAG_PROTO_TRAILER:
+ p->xmit = trailer_netdev_ops.xmit;
+ break;
+#endif
+#ifdef CONFIG_NET_DSA_TAG_BRCM
+ case DSA_TAG_PROTO_BRCM:
+ p->xmit = brcm_netdev_ops.xmit;
break;
#endif
default:
- BUG();
+ p->xmit = dsa_slave_notag_xmit;
+ break;
}
- SET_NETDEV_DEV(slave_dev, parent);
- slave_dev->vlan_features = master->vlan_features;
+ p->old_pause = -1;
+ p->old_link = -1;
+ p->old_duplex = -1;
- p = netdev_priv(slave_dev);
- p->dev = slave_dev;
- p->parent = ds;
- p->port = port;
- p->phy = ds->slave_mii_bus->phy_map[port];
+ dsa_slave_phy_setup(p, slave_dev);
ret = register_netdev(slave_dev);
if (ret) {
@@ -390,6 +598,9 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
netif_carrier_off(slave_dev);
if (p->phy != NULL) {
+ if (ds->drv->get_phy_flags(ds, port))
+ p->phy->dev_flags |= ds->drv->get_phy_flags(ds, port);
+
phy_attach(slave_dev, dev_name(&p->phy->dev),
PHY_INTERFACE_MODE_GMII);
@@ -397,7 +608,6 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
p->phy->speed = 0;
p->phy->duplex = 0;
p->phy->advertising = p->phy->supported | ADVERTISED_Autoneg;
- phy_start_aneg(p->phy);
}
return slave_dev;
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
new file mode 100644
index 000000000000..83d3572cdb20
--- /dev/null
+++ b/net/dsa/tag_brcm.c
@@ -0,0 +1,171 @@
+/*
+ * Broadcom tag support
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "dsa_priv.h"
+
+/* This tag length is 4 bytes, older ones were 6 bytes, we do not
+ * handle them
+ */
+#define BRCM_TAG_LEN 4
+
+/* Tag is constructed and desconstructed using byte by byte access
+ * because the tag is placed after the MAC Source Address, which does
+ * not make it 4-bytes aligned, so this might cause unaligned accesses
+ * on most systems where this is used.
+ */
+
+/* Ingress and egress opcodes */
+#define BRCM_OPCODE_SHIFT 5
+#define BRCM_OPCODE_MASK 0x7
+
+/* Ingress fields */
+/* 1st byte in the tag */
+#define BRCM_IG_TC_SHIFT 2
+#define BRCM_IG_TC_MASK 0x7
+/* 2nd byte in the tag */
+#define BRCM_IG_TE_MASK 0x3
+#define BRCM_IG_TS_SHIFT 7
+/* 3rd byte in the tag */
+#define BRCM_IG_DSTMAP2_MASK 1
+#define BRCM_IG_DSTMAP1_MASK 0xff
+
+/* Egress fields */
+
+/* 2nd byte in the tag */
+#define BRCM_EG_CID_MASK 0xff
+
+/* 3rd byte in the tag */
+#define BRCM_EG_RC_MASK 0xff
+#define BRCM_EG_RC_RSVD (3 << 6)
+#define BRCM_EG_RC_EXCEPTION (1 << 5)
+#define BRCM_EG_RC_PROT_SNOOP (1 << 4)
+#define BRCM_EG_RC_PROT_TERM (1 << 3)
+#define BRCM_EG_RC_SWITCH (1 << 2)
+#define BRCM_EG_RC_MAC_LEARN (1 << 1)
+#define BRCM_EG_RC_MIRROR (1 << 0)
+#define BRCM_EG_TC_SHIFT 5
+#define BRCM_EG_TC_MASK 0x7
+#define BRCM_EG_PID_MASK 0x1f
+
+static netdev_tx_t brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ u8 *brcm_tag;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ if (skb_cow_head(skb, BRCM_TAG_LEN) < 0)
+ goto out_free;
+
+ skb_push(skb, BRCM_TAG_LEN);
+
+ memmove(skb->data, skb->data + BRCM_TAG_LEN, 2 * ETH_ALEN);
+
+ /* Build the tag after the MAC Source Address */
+ brcm_tag = skb->data + 2 * ETH_ALEN;
+
+ /* Set the ingress opcode, traffic class, tag enforcment is
+ * deprecated
+ */
+ brcm_tag[0] = (1 << BRCM_OPCODE_SHIFT) |
+ ((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK);
+ brcm_tag[1] = 0;
+ brcm_tag[2] = 0;
+ if (p->port == 8)
+ brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
+ brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
+
+ /* Queue the SKB for transmission on the parent interface, but
+ * do not modify its EtherType
+ */
+ skb->dev = p->parent->dst->master_netdev;
+ dev_queue_xmit(skb);
+
+ return NETDEV_TX_OK;
+
+out_free:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct dsa_switch_tree *dst = dev->dsa_ptr;
+ struct dsa_switch *ds;
+ int source_port;
+ u8 *brcm_tag;
+
+ if (unlikely(dst == NULL))
+ goto out_drop;
+
+ ds = dst->ds[0];
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (skb == NULL)
+ goto out;
+
+ if (unlikely(!pskb_may_pull(skb, BRCM_TAG_LEN)))
+ goto out_drop;
+
+ /* skb->data points to the EtherType, the tag is right before it */
+ brcm_tag = skb->data - 2;
+
+ /* The opcode should never be different than 0b000 */
+ if (unlikely((brcm_tag[0] >> BRCM_OPCODE_SHIFT) & BRCM_OPCODE_MASK))
+ goto out_drop;
+
+ /* We should never see a reserved reason code without knowing how to
+ * handle it
+ */
+ WARN_ON(brcm_tag[2] & BRCM_EG_RC_RSVD);
+
+ /* Locate which port this is coming from */
+ source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
+
+ /* Validate port against switch setup, either the port is totally */
+ if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL)
+ goto out_drop;
+
+ /* Remove Broadcom tag and update checksum */
+ skb_pull_rcsum(skb, BRCM_TAG_LEN);
+
+ /* Move the Ethernet DA and SA */
+ memmove(skb->data - ETH_HLEN,
+ skb->data - ETH_HLEN - BRCM_TAG_LEN,
+ 2 * ETH_ALEN);
+
+ skb_push(skb, ETH_HLEN);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = ds->ports[source_port];
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ skb->dev->stats.rx_packets++;
+ skb->dev->stats.rx_bytes += skb->len;
+
+ netif_receive_skb(skb);
+
+ return 0;
+
+out_drop:
+ kfree_skb(skb);
+out:
+ return 0;
+}
+
+const struct dsa_device_ops brcm_netdev_ops = {
+ .xmit = brcm_tag_xmit,
+ .rcv = brcm_tag_rcv,
+};
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index cacce1e22f9c..ce90c8bdc658 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -10,13 +10,12 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
-#include <linux/netdevice.h>
#include <linux/slab.h>
#include "dsa_priv.h"
#define DSA_HLEN 4
-netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *dsa_header;
@@ -186,7 +185,7 @@ out:
return 0;
}
-struct packet_type dsa_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_DSA),
- .func = dsa_rcv,
+const struct dsa_device_ops dsa_netdev_ops = {
+ .xmit = dsa_xmit,
+ .rcv = dsa_rcv,
};
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index e70c43c25e64..94fcce778679 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -10,14 +10,13 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
-#include <linux/netdevice.h>
#include <linux/slab.h>
#include "dsa_priv.h"
#define DSA_HLEN 4
#define EDSA_HLEN 8
-netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *edsa_header;
@@ -205,7 +204,7 @@ out:
return 0;
}
-struct packet_type edsa_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_EDSA),
- .func = edsa_rcv,
+const struct dsa_device_ops edsa_netdev_ops = {
+ .xmit = edsa_xmit,
+ .rcv = edsa_rcv,
};
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 94bc260d015d..115fdca34077 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -10,11 +10,10 @@
#include <linux/etherdevice.h>
#include <linux/list.h>
-#include <linux/netdevice.h>
#include <linux/slab.h>
#include "dsa_priv.h"
-netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct sk_buff *nskb;
@@ -114,7 +113,7 @@ out:
return 0;
}
-struct packet_type trailer_packet_type __read_mostly = {
- .type = cpu_to_be16(ETH_P_TRAILER),
- .func = trailer_rcv,
+const struct dsa_device_ops trailer_netdev_ops = {
+ .xmit = trailer_xmit,
+ .rcv = trailer_rcv,
};
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f405e0592407..33a140e15834 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -146,6 +146,33 @@ int eth_rebuild_header(struct sk_buff *skb)
EXPORT_SYMBOL(eth_rebuild_header);
/**
+ * eth_get_headlen - determine the the length of header for an ethernet frame
+ * @data: pointer to start of frame
+ * @len: total length of frame
+ *
+ * Make a best effort attempt to pull the length for all of the headers for
+ * a given frame in a linear buffer.
+ */
+u32 eth_get_headlen(void *data, unsigned int len)
+{
+ const struct ethhdr *eth = (const struct ethhdr *)data;
+ struct flow_keys keys;
+
+ /* this should never happen, but better safe than sorry */
+ if (len < sizeof(*eth))
+ return len;
+
+ /* parse any remaining L2/L3 headers, check for L4 */
+ if (!__skb_flow_dissect(NULL, &keys, data,
+ eth->h_proto, sizeof(*eth), len))
+ return max_t(u32, keys.thoff, sizeof(*eth));
+
+ /* parse for any L4 headers */
+ return min_t(u32, __skb_get_poff(NULL, data, &keys, len), len);
+}
+EXPORT_SYMBOL(eth_get_headlen);
+
+/**
* eth_type_trans - determine the packet's protocol ID.
* @skb: received socket data
* @dev: receiving network device
@@ -181,11 +208,8 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
* variants has been configured on the receiving interface,
* and if so, set skb->protocol without looking at the packet.
*/
- if (unlikely(netdev_uses_dsa_tags(dev)))
- return htons(ETH_P_DSA);
-
- if (unlikely(netdev_uses_trailer_tags(dev)))
- return htons(ETH_P_TRAILER);
+ if (unlikely(netdev_uses_dsa(dev)))
+ return htons(ETH_P_XDSA);
if (likely(ntohs(eth->h_proto) >= ETH_P_802_3_MIN))
return eth->h_proto;
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c
index 6591d27e53a4..44136297b673 100644
--- a/net/ieee802154/6lowpan_rtnl.c
+++ b/net/ieee802154/6lowpan_rtnl.c
@@ -71,18 +71,33 @@ struct lowpan_dev_record {
struct list_head list;
};
+/* don't save pan id, it's intra pan */
+struct lowpan_addr {
+ u8 mode;
+ union {
+ /* IPv6 needs big endian here */
+ __be64 extended_addr;
+ __be16 short_addr;
+ } u;
+};
+
+struct lowpan_addr_info {
+ struct lowpan_addr daddr;
+ struct lowpan_addr saddr;
+};
+
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
{
return netdev_priv(dev);
}
-static inline void lowpan_address_flip(u8 *src, u8 *dest)
+static inline struct
+lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb)
{
- int i;
-
- for (i = 0; i < IEEE802154_ADDR_LEN; i++)
- (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
+ WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info));
+ return (struct lowpan_addr_info *)(skb->data -
+ sizeof(struct lowpan_addr_info));
}
static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
@@ -91,8 +106,7 @@ static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
{
const u8 *saddr = _saddr;
const u8 *daddr = _daddr;
- struct ieee802154_addr sa, da;
- struct ieee802154_mac_cb *cb = mac_cb_init(skb);
+ struct lowpan_addr_info *info;
/* TODO:
* if this package isn't ipv6 one, where should it be routed?
@@ -106,41 +120,17 @@ static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
- lowpan_header_compress(skb, dev, type, daddr, saddr, len);
-
- /* NOTE1: I'm still unsure about the fact that compression and WPAN
- * header are created here and not later in the xmit. So wait for
- * an opinion of net maintainers.
- */
- /* NOTE2: to be absolutely correct, we must derive PANid information
- * from MAC subif of the 'dev' and 'real_dev' network devices, but
- * this isn't implemented in mainline yet, so currently we assign 0xff
- */
- cb->type = IEEE802154_FC_TYPE_DATA;
+ info = lowpan_skb_priv(skb);
- /* prepare wpan address data */
- sa.mode = IEEE802154_ADDR_LONG;
- sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
- sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
+ /* TODO: Currently we only support extended_addr */
+ info->daddr.mode = IEEE802154_ADDR_LONG;
+ memcpy(&info->daddr.u.extended_addr, daddr,
+ sizeof(info->daddr.u.extended_addr));
+ info->saddr.mode = IEEE802154_ADDR_LONG;
+ memcpy(&info->saddr.u.extended_addr, saddr,
+ sizeof(info->daddr.u.extended_addr));
- /* intra-PAN communications */
- da.pan_id = sa.pan_id;
-
- /* if the destination address is the broadcast address, use the
- * corresponding short address
- */
- if (lowpan_is_addr_broadcast(daddr)) {
- da.mode = IEEE802154_ADDR_SHORT;
- da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
- } else {
- da.mode = IEEE802154_ADDR_LONG;
- da.extended_addr = ieee802154_devaddr_from_raw(daddr);
- }
-
- cb->ackreq = !lowpan_is_addr_broadcast(daddr);
-
- return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
- type, (void *)&da, (void *)&sa, 0);
+ return 0;
}
static int lowpan_give_skb_to_devices(struct sk_buff *skb,
@@ -338,13 +328,68 @@ err:
return rc;
}
+static int lowpan_header(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ieee802154_addr sa, da;
+ struct ieee802154_mac_cb *cb = mac_cb_init(skb);
+ struct lowpan_addr_info info;
+ void *daddr, *saddr;
+
+ memcpy(&info, lowpan_skb_priv(skb), sizeof(info));
+
+ /* TODO: Currently we only support extended_addr */
+ daddr = &info.daddr.u.extended_addr;
+ saddr = &info.saddr.u.extended_addr;
+
+ lowpan_header_compress(skb, dev, ETH_P_IPV6, daddr, saddr, skb->len);
+
+ cb->type = IEEE802154_FC_TYPE_DATA;
+
+ /* prepare wpan address data */
+ sa.mode = IEEE802154_ADDR_LONG;
+ sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
+ sa.extended_addr = ieee802154_devaddr_from_raw(saddr);
+
+ /* intra-PAN communications */
+ da.pan_id = sa.pan_id;
+
+ /* if the destination address is the broadcast address, use the
+ * corresponding short address
+ */
+ if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
+ da.mode = IEEE802154_ADDR_SHORT;
+ da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
+ cb->ackreq = false;
+ } else {
+ da.mode = IEEE802154_ADDR_LONG;
+ da.extended_addr = ieee802154_devaddr_from_raw(daddr);
+ cb->ackreq = true;
+ }
+
+ return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
+ ETH_P_IPV6, (void *)&da, (void *)&sa, 0);
+}
+
static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee802154_hdr wpan_hdr;
- int max_single;
+ int max_single, ret;
pr_debug("package xmit\n");
+ /* We must take a copy of the skb before we modify/replace the ipv6
+ * header as the header could be used elsewhere
+ */
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_XMIT_DROP;
+
+ ret = lowpan_header(skb, dev);
+ if (ret < 0) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) {
kfree_skb(skb);
return NET_XMIT_DROP;
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
index 32755cb7e64e..7cfcd6885225 100644
--- a/net/ieee802154/reassembly.c
+++ b/net/ieee802154/reassembly.c
@@ -485,7 +485,7 @@ static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
static struct ctl_table_header *lowpan_ctl_header;
-static int lowpan_frags_sysctl_register(void)
+static int __init lowpan_frags_sysctl_register(void)
{
lowpan_ctl_header = register_net_sysctl(&init_net,
"net/ieee802154/6lowpan",
@@ -507,7 +507,7 @@ static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
{
}
-static inline int lowpan_frags_sysctl_register(void)
+static inline int __init lowpan_frags_sysctl_register(void)
{
return 0;
}
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index dbc10d84161f..e682b48e0709 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -309,8 +309,33 @@ config NET_IPVTI
config NET_UDP_TUNNEL
tristate
+ select NET_IP_TUNNEL
default n
+config NET_FOU
+ tristate "IP: Foo (IP protocols) over UDP"
+ select XFRM
+ select NET_UDP_TUNNEL
+ ---help---
+ Foo over UDP allows any IP protocol to be directly encapsulated
+ over UDP include tunnels (IPIP, GRE, SIT). By encapsulating in UDP
+ network mechanisms and optimizations for UDP (such as ECMP
+ and RSS) can be leveraged to provide better service.
+
+config GENEVE
+ tristate "Generic Network Virtualization Encapsulation (Geneve)"
+ depends on INET
+ select NET_UDP_TUNNEL
+ ---help---
+ This allows one to create Geneve virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. Geneve is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ For more information see:
+ http://tools.ietf.org/html/draft-gross-geneve-01
+
+ To compile this driver as a module, choose M here: the module
+
+
config INET_AH
tristate "IP: AH transformation"
select XFRM_ALGO
@@ -560,6 +585,27 @@ config TCP_CONG_ILLINOIS
For further details see:
http://www.ews.uiuc.edu/~shaoliu/tcpillinois/index.html
+config TCP_CONG_DCTCP
+ tristate "DataCenter TCP (DCTCP)"
+ default n
+ ---help---
+ DCTCP leverages Explicit Congestion Notification (ECN) in the network to
+ provide multi-bit feedback to the end hosts. It is designed to provide:
+
+ - High burst tolerance (incast due to partition/aggregate),
+ - Low latency (short flows, queries),
+ - High throughput (continuous data updates, large file transfers) with
+ commodity, shallow-buffered switches.
+
+ All switches in the data center network running DCTCP must support
+ ECN marking and be configured for marking when reaching defined switch
+ buffer thresholds. The default ECN marking threshold heuristic for
+ DCTCP on switches is 20 packets (30KB) at 1Gbps, and 65 packets
+ (~100KB) at 10Gbps, but might need further careful tweaking.
+
+ For further details see:
+ http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
+
choice
prompt "Default TCP congestion control"
default DEFAULT_CUBIC
@@ -588,9 +634,11 @@ choice
config DEFAULT_WESTWOOD
bool "Westwood" if TCP_CONG_WESTWOOD=y
+ config DEFAULT_DCTCP
+ bool "DCTCP" if TCP_CONG_DCTCP=y
+
config DEFAULT_RENO
bool "Reno"
-
endchoice
endif
@@ -610,6 +658,7 @@ config DEFAULT_TCP_CONG
default "westwood" if DEFAULT_WESTWOOD
default "veno" if DEFAULT_VENO
default "reno" if DEFAULT_RENO
+ default "dctcp" if DEFAULT_DCTCP
default "cubic"
config TCP_MD5SIG
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 8ee1cd4053ee..518c04ed666e 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_NET_IPIP) += ipip.o
gre-y := gre_demux.o
+obj-$(CONFIG_NET_FOU) += fou.o
obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
obj-$(CONFIG_NET_UDP_TUNNEL) += udp_tunnel.o
@@ -42,6 +43,7 @@ obj-$(CONFIG_INET_UDP_DIAG) += udp_diag.o
obj-$(CONFIG_NET_TCPPROBE) += tcp_probe.o
obj-$(CONFIG_TCP_CONG_BIC) += tcp_bic.o
obj-$(CONFIG_TCP_CONG_CUBIC) += tcp_cubic.o
+obj-$(CONFIG_TCP_CONG_DCTCP) += tcp_dctcp.o
obj-$(CONFIG_TCP_CONG_WESTWOOD) += tcp_westwood.o
obj-$(CONFIG_TCP_CONG_HSTCP) += tcp_highspeed.o
obj-$(CONFIG_TCP_CONG_HYBLA) += tcp_hybla.o
@@ -54,6 +56,7 @@ obj-$(CONFIG_TCP_CONG_YEAH) += tcp_yeah.o
obj-$(CONFIG_TCP_CONG_ILLINOIS) += tcp_illinois.o
obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
+obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
xfrm4_output.o xfrm4_protocol.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index d156b3c5f363..92db7a69f2b9 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -418,10 +418,6 @@ int inet_release(struct socket *sock)
}
EXPORT_SYMBOL(inet_release);
-/* It is off by default, see below. */
-int sysctl_ip_nonlocal_bind __read_mostly;
-EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
-
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
@@ -461,7 +457,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
* is temporarily down)
*/
err = -EADDRNOTAVAIL;
- if (!sysctl_ip_nonlocal_bind &&
+ if (!net->ipv4.sysctl_ip_nonlocal_bind &&
!(inet->freebind || inet->transparent) &&
addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
@@ -1201,40 +1197,6 @@ int inet_sk_rebuild_header(struct sock *sk)
}
EXPORT_SYMBOL(inet_sk_rebuild_header);
-static int inet_gso_send_check(struct sk_buff *skb)
-{
- const struct net_offload *ops;
- const struct iphdr *iph;
- int proto;
- int ihl;
- int err = -EINVAL;
-
- if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))
- goto out;
-
- iph = ip_hdr(skb);
- ihl = iph->ihl * 4;
- if (ihl < sizeof(*iph))
- goto out;
-
- proto = iph->protocol;
-
- /* Warning: after this point, iph might be no longer valid */
- if (unlikely(!pskb_may_pull(skb, ihl)))
- goto out;
- __skb_pull(skb, ihl);
-
- skb_reset_transport_header(skb);
- err = -EPROTONOSUPPORT;
-
- ops = rcu_dereference(inet_offloads[proto]);
- if (likely(ops && ops->callbacks.gso_send_check))
- err = ops->callbacks.gso_send_check(skb);
-
-out:
- return err;
-}
-
static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -1407,6 +1369,9 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
* immediately following this IP hdr.
*/
+ /* Note : No need to call skb_gro_postpull_rcsum() here,
+ * as we already checked checksum over ipv4 header was 0
+ */
skb_gro_pull(skb, sizeof(*iph));
skb_set_transport_header(skb, skb_gro_offset(skb));
@@ -1659,7 +1624,6 @@ static int ipv4_proc_init(void);
static struct packet_offload ip_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IP),
.callbacks = {
- .gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
.gro_receive = inet_gro_receive,
.gro_complete = inet_gro_complete,
@@ -1668,8 +1632,9 @@ static struct packet_offload ip_packet_offload __read_mostly = {
static const struct net_offload ipip_offload = {
.callbacks = {
- .gso_send_check = inet_gso_send_check,
.gso_segment = inet_gso_segment,
+ .gro_receive = inet_gro_receive,
+ .gro_complete = inet_gro_complete,
},
};
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index a2afa89513a0..ac9a32ec3ee4 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -505,8 +505,6 @@ static int ah_init_state(struct xfrm_state *x)
ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
- BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
-
if (x->props.flags & XFRM_STATE_ALIGN4)
x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
ahp->icv_trunc_len);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 1a9b99e04465..16acb59d665e 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -953,10 +953,11 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
{
const struct arphdr *arp;
+ /* do not tweak dropwatch on an ARP we will ignore */
if (dev->flags & IFF_NOARP ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK)
- goto freeskb;
+ goto consumeskb;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
@@ -974,6 +975,9 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, skb, dev, NULL, arp_process);
+consumeskb:
+ consume_skb(skb);
+ return 0;
freeskb:
kfree_skb(skb);
out_of_mem:
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 05b708bbdb0d..4715f25dfe03 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -246,7 +246,7 @@ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len)
* success, negative values on error.
*
*/
-static int cipso_v4_cache_init(void)
+static int __init cipso_v4_cache_init(void)
{
u32 iter;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 255aa9946fe7..23104a3f2924 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -243,7 +243,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
int rpf, struct in_device *idev, u32 *itag)
{
- int ret, no_addr, accept_local;
+ int ret, no_addr;
struct fib_result res;
struct flowi4 fl4;
struct net *net;
@@ -258,16 +258,17 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
no_addr = idev->ifa_list == NULL;
- accept_local = IN_DEV_ACCEPT_LOCAL(idev);
fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
net = dev_net(dev);
if (fib_lookup(net, &fl4, &res))
goto last_resort;
- if (res.type != RTN_UNICAST) {
- if (res.type != RTN_LOCAL || !accept_local)
- goto e_inval;
- }
+ if (res.type != RTN_UNICAST &&
+ (res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
+ goto e_inval;
+ if (!rpf && !fib_num_tclassid_users(dev_net(dev)) &&
+ (dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev)))
+ goto last_resort;
fib_combine_itag(itag, &res);
dev_match = false;
@@ -321,6 +322,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
if (!r && !fib_num_tclassid_users(dev_net(dev)) &&
+ IN_DEV_ACCEPT_LOCAL(idev) &&
(dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) {
*itag = 0;
return 0;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index b10cd43a4722..5b6efb3d2308 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -157,9 +157,12 @@ static void rt_fibinfo_free(struct rtable __rcu **rtp)
static void free_nh_exceptions(struct fib_nh *nh)
{
- struct fnhe_hash_bucket *hash = nh->nh_exceptions;
+ struct fnhe_hash_bucket *hash;
int i;
+ hash = rcu_dereference_protected(nh->nh_exceptions, 1);
+ if (!hash)
+ return;
for (i = 0; i < FNHE_HASH_SIZE; i++) {
struct fib_nh_exception *fnhe;
@@ -205,8 +208,7 @@ static void free_fib_info_rcu(struct rcu_head *head)
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
dev_put(nexthop_nh->nh_dev);
- if (nexthop_nh->nh_exceptions)
- free_nh_exceptions(nexthop_nh);
+ free_nh_exceptions(nexthop_nh);
rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
new file mode 100644
index 000000000000..efa70ad44906
--- /dev/null
+++ b/net/ipv4/fou.c
@@ -0,0 +1,514 @@
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <net/genetlink.h>
+#include <net/gue.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/xfrm.h>
+#include <uapi/linux/fou.h>
+#include <uapi/linux/genetlink.h>
+
+static DEFINE_SPINLOCK(fou_lock);
+static LIST_HEAD(fou_list);
+
+struct fou {
+ struct socket *sock;
+ u8 protocol;
+ u16 port;
+ struct udp_offload udp_offloads;
+ struct list_head list;
+};
+
+struct fou_cfg {
+ u16 type;
+ u8 protocol;
+ struct udp_port_cfg udp_config;
+};
+
+static inline struct fou *fou_from_sock(struct sock *sk)
+{
+ return sk->sk_user_data;
+}
+
+static int fou_udp_encap_recv_deliver(struct sk_buff *skb,
+ u8 protocol, size_t len)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ /* Remove 'len' bytes from the packet (UDP header and
+ * FOU header if present), modify the protocol to the one
+ * we found, and then call rcv_encap.
+ */
+ iph->tot_len = htons(ntohs(iph->tot_len) - len);
+ __skb_pull(skb, len);
+ skb_postpull_rcsum(skb, udp_hdr(skb), len);
+ skb_reset_transport_header(skb);
+
+ return -protocol;
+}
+
+static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct fou *fou = fou_from_sock(sk);
+
+ if (!fou)
+ return 1;
+
+ return fou_udp_encap_recv_deliver(skb, fou->protocol,
+ sizeof(struct udphdr));
+}
+
+static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct fou *fou = fou_from_sock(sk);
+ size_t len;
+ struct guehdr *guehdr;
+ struct udphdr *uh;
+
+ if (!fou)
+ return 1;
+
+ len = sizeof(struct udphdr) + sizeof(struct guehdr);
+ if (!pskb_may_pull(skb, len))
+ goto drop;
+
+ uh = udp_hdr(skb);
+ guehdr = (struct guehdr *)&uh[1];
+
+ len += guehdr->hlen << 2;
+ if (!pskb_may_pull(skb, len))
+ goto drop;
+
+ if (guehdr->version != 0)
+ goto drop;
+
+ if (guehdr->flags) {
+ /* No support yet */
+ goto drop;
+ }
+
+ return fou_udp_encap_recv_deliver(skb, guehdr->next_hdr, len);
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static struct sk_buff **fou_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ const struct net_offload *ops;
+ struct sk_buff **pp = NULL;
+ u8 proto = NAPI_GRO_CB(skb)->proto;
+ const struct net_offload **offloads;
+
+ rcu_read_lock();
+ offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[proto]);
+ if (!ops || !ops->callbacks.gro_receive)
+ goto out_unlock;
+
+ pp = ops->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+
+ return pp;
+}
+
+static int fou_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ const struct net_offload *ops;
+ u8 proto = NAPI_GRO_CB(skb)->proto;
+ int err = -ENOSYS;
+ const struct net_offload **offloads;
+
+ rcu_read_lock();
+ offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[proto]);
+ if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+ goto out_unlock;
+
+ err = ops->callbacks.gro_complete(skb, nhoff);
+
+out_unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
+static struct sk_buff **gue_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ const struct net_offload **offloads;
+ const struct net_offload *ops;
+ struct sk_buff **pp = NULL;
+ struct sk_buff *p;
+ u8 proto;
+ struct guehdr *guehdr;
+ unsigned int hlen, guehlen;
+ unsigned int off;
+ int flush = 1;
+
+ off = skb_gro_offset(skb);
+ hlen = off + sizeof(*guehdr);
+ guehdr = skb_gro_header_fast(skb, off);
+ if (skb_gro_header_hard(skb, hlen)) {
+ guehdr = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!guehdr))
+ goto out;
+ }
+
+ proto = guehdr->next_hdr;
+
+ rcu_read_lock();
+ offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[proto]);
+ if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+ goto out_unlock;
+
+ guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
+
+ hlen = off + guehlen;
+ if (skb_gro_header_hard(skb, hlen)) {
+ guehdr = skb_gro_header_slow(skb, hlen, off);
+ if (unlikely(!guehdr))
+ goto out_unlock;
+ }
+
+ flush = 0;
+
+ for (p = *head; p; p = p->next) {
+ const struct guehdr *guehdr2;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ guehdr2 = (struct guehdr *)(p->data + off);
+
+ /* Compare base GUE header to be equal (covers
+ * hlen, version, next_hdr, and flags.
+ */
+ if (guehdr->word != guehdr2->word) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ /* Compare optional fields are the same. */
+ if (guehdr->hlen && memcmp(&guehdr[1], &guehdr2[1],
+ guehdr->hlen << 2)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+ }
+
+ skb_gro_pull(skb, guehlen);
+
+ /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
+ skb_gro_postpull_rcsum(skb, guehdr, guehlen);
+
+ pp = ops->callbacks.gro_receive(head, skb);
+
+out_unlock:
+ rcu_read_unlock();
+out:
+ NAPI_GRO_CB(skb)->flush |= flush;
+
+ return pp;
+}
+
+static int gue_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ const struct net_offload **offloads;
+ struct guehdr *guehdr = (struct guehdr *)(skb->data + nhoff);
+ const struct net_offload *ops;
+ unsigned int guehlen;
+ u8 proto;
+ int err = -ENOENT;
+
+ proto = guehdr->next_hdr;
+
+ guehlen = sizeof(*guehdr) + (guehdr->hlen << 2);
+
+ rcu_read_lock();
+ offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[proto]);
+ if (WARN_ON(!ops || !ops->callbacks.gro_complete))
+ goto out_unlock;
+
+ err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
+
+out_unlock:
+ rcu_read_unlock();
+ return err;
+}
+
+static int fou_add_to_port_list(struct fou *fou)
+{
+ struct fou *fout;
+
+ spin_lock(&fou_lock);
+ list_for_each_entry(fout, &fou_list, list) {
+ if (fou->port == fout->port) {
+ spin_unlock(&fou_lock);
+ return -EALREADY;
+ }
+ }
+
+ list_add(&fou->list, &fou_list);
+ spin_unlock(&fou_lock);
+
+ return 0;
+}
+
+static void fou_release(struct fou *fou)
+{
+ struct socket *sock = fou->sock;
+ struct sock *sk = sock->sk;
+
+ udp_del_offload(&fou->udp_offloads);
+
+ list_del(&fou->list);
+
+ /* Remove hooks into tunnel socket */
+ sk->sk_user_data = NULL;
+
+ sock_release(sock);
+
+ kfree(fou);
+}
+
+static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
+{
+ udp_sk(sk)->encap_rcv = fou_udp_recv;
+ fou->protocol = cfg->protocol;
+ fou->udp_offloads.callbacks.gro_receive = fou_gro_receive;
+ fou->udp_offloads.callbacks.gro_complete = fou_gro_complete;
+ fou->udp_offloads.port = cfg->udp_config.local_udp_port;
+ fou->udp_offloads.ipproto = cfg->protocol;
+
+ return 0;
+}
+
+static int gue_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
+{
+ udp_sk(sk)->encap_rcv = gue_udp_recv;
+ fou->udp_offloads.callbacks.gro_receive = gue_gro_receive;
+ fou->udp_offloads.callbacks.gro_complete = gue_gro_complete;
+ fou->udp_offloads.port = cfg->udp_config.local_udp_port;
+
+ return 0;
+}
+
+static int fou_create(struct net *net, struct fou_cfg *cfg,
+ struct socket **sockp)
+{
+ struct fou *fou = NULL;
+ int err;
+ struct socket *sock = NULL;
+ struct sock *sk;
+
+ /* Open UDP socket */
+ err = udp_sock_create(net, &cfg->udp_config, &sock);
+ if (err < 0)
+ goto error;
+
+ /* Allocate FOU port structure */
+ fou = kzalloc(sizeof(*fou), GFP_KERNEL);
+ if (!fou) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ sk = sock->sk;
+
+ fou->port = cfg->udp_config.local_udp_port;
+
+ /* Initial for fou type */
+ switch (cfg->type) {
+ case FOU_ENCAP_DIRECT:
+ err = fou_encap_init(sk, fou, cfg);
+ if (err)
+ goto error;
+ break;
+ case FOU_ENCAP_GUE:
+ err = gue_encap_init(sk, fou, cfg);
+ if (err)
+ goto error;
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
+ }
+
+ udp_sk(sk)->encap_type = 1;
+ udp_encap_enable();
+
+ sk->sk_user_data = fou;
+ fou->sock = sock;
+
+ udp_set_convert_csum(sk, true);
+
+ sk->sk_allocation = GFP_ATOMIC;
+
+ if (cfg->udp_config.family == AF_INET) {
+ err = udp_add_offload(&fou->udp_offloads);
+ if (err)
+ goto error;
+ }
+
+ err = fou_add_to_port_list(fou);
+ if (err)
+ goto error;
+
+ if (sockp)
+ *sockp = sock;
+
+ return 0;
+
+error:
+ kfree(fou);
+ if (sock)
+ sock_release(sock);
+
+ return err;
+}
+
+static int fou_destroy(struct net *net, struct fou_cfg *cfg)
+{
+ struct fou *fou;
+ u16 port = cfg->udp_config.local_udp_port;
+ int err = -EINVAL;
+
+ spin_lock(&fou_lock);
+ list_for_each_entry(fou, &fou_list, list) {
+ if (fou->port == port) {
+ udp_del_offload(&fou->udp_offloads);
+ fou_release(fou);
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock(&fou_lock);
+
+ return err;
+}
+
+static struct genl_family fou_nl_family = {
+ .id = GENL_ID_GENERATE,
+ .hdrsize = 0,
+ .name = FOU_GENL_NAME,
+ .version = FOU_GENL_VERSION,
+ .maxattr = FOU_ATTR_MAX,
+ .netnsok = true,
+};
+
+static struct nla_policy fou_nl_policy[FOU_ATTR_MAX + 1] = {
+ [FOU_ATTR_PORT] = { .type = NLA_U16, },
+ [FOU_ATTR_AF] = { .type = NLA_U8, },
+ [FOU_ATTR_IPPROTO] = { .type = NLA_U8, },
+ [FOU_ATTR_TYPE] = { .type = NLA_U8, },
+};
+
+static int parse_nl_config(struct genl_info *info,
+ struct fou_cfg *cfg)
+{
+ memset(cfg, 0, sizeof(*cfg));
+
+ cfg->udp_config.family = AF_INET;
+
+ if (info->attrs[FOU_ATTR_AF]) {
+ u8 family = nla_get_u8(info->attrs[FOU_ATTR_AF]);
+
+ if (family != AF_INET && family != AF_INET6)
+ return -EINVAL;
+
+ cfg->udp_config.family = family;
+ }
+
+ if (info->attrs[FOU_ATTR_PORT]) {
+ u16 port = nla_get_u16(info->attrs[FOU_ATTR_PORT]);
+
+ cfg->udp_config.local_udp_port = port;
+ }
+
+ if (info->attrs[FOU_ATTR_IPPROTO])
+ cfg->protocol = nla_get_u8(info->attrs[FOU_ATTR_IPPROTO]);
+
+ if (info->attrs[FOU_ATTR_TYPE])
+ cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]);
+
+ return 0;
+}
+
+static int fou_nl_cmd_add_port(struct sk_buff *skb, struct genl_info *info)
+{
+ struct fou_cfg cfg;
+ int err;
+
+ err = parse_nl_config(info, &cfg);
+ if (err)
+ return err;
+
+ return fou_create(&init_net, &cfg, NULL);
+}
+
+static int fou_nl_cmd_rm_port(struct sk_buff *skb, struct genl_info *info)
+{
+ struct fou_cfg cfg;
+
+ parse_nl_config(info, &cfg);
+
+ return fou_destroy(&init_net, &cfg);
+}
+
+static const struct genl_ops fou_nl_ops[] = {
+ {
+ .cmd = FOU_CMD_ADD,
+ .doit = fou_nl_cmd_add_port,
+ .policy = fou_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = FOU_CMD_DEL,
+ .doit = fou_nl_cmd_rm_port,
+ .policy = fou_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static int __init fou_init(void)
+{
+ int ret;
+
+ ret = genl_register_family_with_ops(&fou_nl_family,
+ fou_nl_ops);
+
+ return ret;
+}
+
+static void __exit fou_fini(void)
+{
+ struct fou *fou, *next;
+
+ genl_unregister_family(&fou_nl_family);
+
+ /* Close all the FOU sockets */
+
+ spin_lock(&fou_lock);
+ list_for_each_entry_safe(fou, next, &fou_list, list)
+ fou_release(fou);
+ spin_unlock(&fou_lock);
+}
+
+module_init(fou_init);
+module_exit(fou_fini);
+MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
new file mode 100644
index 000000000000..065cd94c640c
--- /dev/null
+++ b/net/ipv4/geneve.c
@@ -0,0 +1,373 @@
+/*
+ * Geneve: Generic Network Virtualization Encapsulation
+ *
+ * Copyright (c) 2014 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/rculist.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/igmp.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/hash.h>
+#include <linux/ethtool.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/geneve.h>
+#include <net/protocol.h>
+#include <net/udp_tunnel.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <net/ip6_tunnel.h>
+#include <net/ip6_checksum.h>
+#endif
+
+#define PORT_HASH_BITS 8
+#define PORT_HASH_SIZE (1<<PORT_HASH_BITS)
+
+/* per-network namespace private data for this module */
+struct geneve_net {
+ struct hlist_head sock_list[PORT_HASH_SIZE];
+ spinlock_t sock_lock; /* Protects sock_list */
+};
+
+static int geneve_net_id;
+
+static struct workqueue_struct *geneve_wq;
+
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+ return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
+static struct hlist_head *gs_head(struct net *net, __be16 port)
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+
+ return &gn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
+}
+
+/* Find geneve socket based on network namespace and UDP port */
+static struct geneve_sock *geneve_find_sock(struct net *net, __be16 port)
+{
+ struct geneve_sock *gs;
+
+ hlist_for_each_entry_rcu(gs, gs_head(net, port), hlist) {
+ if (inet_sk(gs->sock->sk)->inet_sport == port)
+ return gs;
+ }
+
+ return NULL;
+}
+
+static void geneve_build_header(struct genevehdr *geneveh,
+ __be16 tun_flags, u8 vni[3],
+ u8 options_len, u8 *options)
+{
+ geneveh->ver = GENEVE_VER;
+ geneveh->opt_len = options_len / 4;
+ geneveh->oam = !!(tun_flags & TUNNEL_OAM);
+ geneveh->critical = !!(tun_flags & TUNNEL_CRIT_OPT);
+ geneveh->rsvd1 = 0;
+ memcpy(geneveh->vni, vni, 3);
+ geneveh->proto_type = htons(ETH_P_TEB);
+ geneveh->rsvd2 = 0;
+
+ memcpy(geneveh->options, options, options_len);
+}
+
+/* Transmit a fully formated Geneve frame.
+ *
+ * When calling this function. The skb->data should point
+ * to the geneve header which is fully formed.
+ *
+ * This function will add other UDP tunnel headers.
+ */
+int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos,
+ __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port,
+ __be16 tun_flags, u8 vni[3], u8 opt_len, u8 *opt,
+ bool xnet)
+{
+ struct genevehdr *gnvh;
+ int min_headroom;
+ int err;
+
+ skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
+
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ return err;
+
+ if (vlan_tx_tag_present(skb)) {
+ if (unlikely(!__vlan_put_tag(skb,
+ skb->vlan_proto,
+ vlan_tx_tag_get(skb)))) {
+ err = -ENOMEM;
+ return err;
+ }
+ skb->vlan_tci = 0;
+ }
+
+ gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
+ geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
+
+ return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
+ tos, ttl, df, src_port, dst_port, xnet);
+}
+EXPORT_SYMBOL_GPL(geneve_xmit_skb);
+
+static void geneve_notify_add_rx_port(struct geneve_sock *gs)
+{
+ struct sock *sk = gs->sock->sk;
+ sa_family_t sa_family = sk->sk_family;
+ int err;
+
+ if (sa_family == AF_INET) {
+ err = udp_add_offload(&gs->udp_offloads);
+ if (err)
+ pr_warn("geneve: udp_add_offload failed with status %d\n",
+ err);
+ }
+}
+
+/* Callback from net/ipv4/udp.c to receive packets */
+static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct genevehdr *geneveh;
+ struct geneve_sock *gs;
+ int opts_len;
+
+ /* Need Geneve and inner Ethernet header to be present */
+ if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN)))
+ goto error;
+
+ /* Return packets with reserved bits set */
+ geneveh = geneve_hdr(skb);
+
+ if (unlikely(geneveh->ver != GENEVE_VER))
+ goto error;
+
+ if (unlikely(geneveh->proto_type != htons(ETH_P_TEB)))
+ goto error;
+
+ opts_len = geneveh->opt_len * 4;
+ if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len,
+ htons(ETH_P_TEB)))
+ goto drop;
+
+ gs = rcu_dereference_sk_user_data(sk);
+ if (!gs)
+ goto drop;
+
+ gs->rcv(gs, skb);
+ return 0;
+
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+ return 0;
+
+error:
+ /* Let the UDP layer deal with the skb */
+ return 1;
+}
+
+static void geneve_del_work(struct work_struct *work)
+{
+ struct geneve_sock *gs = container_of(work, struct geneve_sock,
+ del_work);
+
+ udp_tunnel_sock_release(gs->sock);
+ kfree_rcu(gs, rcu);
+}
+
+static struct socket *geneve_create_sock(struct net *net, bool ipv6,
+ __be16 port)
+{
+ struct socket *sock;
+ struct udp_port_cfg udp_conf;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+
+ if (ipv6) {
+ udp_conf.family = AF_INET6;
+ } else {
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ }
+
+ udp_conf.local_udp_port = port;
+
+ /* Open UDP socket */
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return sock;
+}
+
+/* Create new listen socket if needed */
+static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
+ geneve_rcv_t *rcv, void *data,
+ bool ipv6)
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ struct geneve_sock *gs;
+ struct socket *sock;
+ struct udp_tunnel_sock_cfg tunnel_cfg;
+
+ gs = kzalloc(sizeof(*gs), GFP_KERNEL);
+ if (!gs)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&gs->del_work, geneve_del_work);
+
+ sock = geneve_create_sock(net, ipv6, port);
+ if (IS_ERR(sock)) {
+ kfree(gs);
+ return ERR_CAST(sock);
+ }
+
+ gs->sock = sock;
+ atomic_set(&gs->refcnt, 1);
+ gs->rcv = rcv;
+ gs->rcv_data = data;
+
+ /* Initialize the geneve udp offloads structure */
+ gs->udp_offloads.port = port;
+ gs->udp_offloads.callbacks.gro_receive = NULL;
+ gs->udp_offloads.callbacks.gro_complete = NULL;
+
+ spin_lock(&gn->sock_lock);
+ hlist_add_head_rcu(&gs->hlist, gs_head(net, port));
+ geneve_notify_add_rx_port(gs);
+ spin_unlock(&gn->sock_lock);
+
+ /* Mark socket as an encapsulation socket */
+ tunnel_cfg.sk_user_data = gs;
+ tunnel_cfg.encap_type = 1;
+ tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+ tunnel_cfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
+
+ return gs;
+}
+
+struct geneve_sock *geneve_sock_add(struct net *net, __be16 port,
+ geneve_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6)
+{
+ struct geneve_sock *gs;
+
+ gs = geneve_socket_create(net, port, rcv, data, ipv6);
+ if (!IS_ERR(gs))
+ return gs;
+
+ if (no_share) /* Return error if sharing is not allowed. */
+ return ERR_PTR(-EINVAL);
+
+ gs = geneve_find_sock(net, port);
+ if (gs) {
+ if (gs->rcv == rcv)
+ atomic_inc(&gs->refcnt);
+ else
+ gs = ERR_PTR(-EBUSY);
+ } else {
+ gs = ERR_PTR(-EINVAL);
+ }
+
+ return gs;
+}
+EXPORT_SYMBOL_GPL(geneve_sock_add);
+
+void geneve_sock_release(struct geneve_sock *gs)
+{
+ if (!atomic_dec_and_test(&gs->refcnt))
+ return;
+
+ queue_work(geneve_wq, &gs->del_work);
+}
+EXPORT_SYMBOL_GPL(geneve_sock_release);
+
+static __net_init int geneve_init_net(struct net *net)
+{
+ struct geneve_net *gn = net_generic(net, geneve_net_id);
+ unsigned int h;
+
+ spin_lock_init(&gn->sock_lock);
+
+ for (h = 0; h < PORT_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&gn->sock_list[h]);
+
+ return 0;
+}
+
+static struct pernet_operations geneve_net_ops = {
+ .init = geneve_init_net,
+ .exit = NULL,
+ .id = &geneve_net_id,
+ .size = sizeof(struct geneve_net),
+};
+
+static int __init geneve_init_module(void)
+{
+ int rc;
+
+ geneve_wq = alloc_workqueue("geneve", 0, 0);
+ if (!geneve_wq)
+ return -ENOMEM;
+
+ rc = register_pernet_subsys(&geneve_net_ops);
+ if (rc)
+ return rc;
+
+ pr_info("Geneve driver\n");
+
+ return 0;
+}
+late_initcall(geneve_init_module);
+
+static void __exit geneve_cleanup_module(void)
+{
+ destroy_workqueue(geneve_wq);
+}
+module_exit(geneve_cleanup_module);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jesse Gross <jesse@nicira.com>");
+MODULE_DESCRIPTION("Driver for GENEVE encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("geneve");
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 0485bf7f8f03..4a7b5b2a1ce3 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -98,7 +98,6 @@ EXPORT_SYMBOL_GPL(gre_build_header);
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err)
{
- unsigned int ip_hlen = ip_hdrlen(skb);
const struct gre_base_hdr *greh;
__be32 *options;
int hdr_len;
@@ -106,7 +105,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
return -EINVAL;
- greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen);
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
return -EINVAL;
@@ -116,7 +115,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
if (!pskb_may_pull(skb, hdr_len))
return -EINVAL;
- greh = (struct gre_base_hdr *)(skb_network_header(skb) + ip_hlen);
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
tpi->proto = greh->protocol;
options = (__be32 *)(greh + 1);
@@ -125,6 +124,10 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
*csum_err = true;
return -EINVAL;
}
+
+ skb_checksum_try_convert(skb, IPPROTO_GRE, 0,
+ null_compute_pseudo);
+
options++;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 6556263c8fa5..a77729503071 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -15,13 +15,6 @@
#include <net/protocol.h>
#include <net/gre.h>
-static int gre_gso_send_check(struct sk_buff *skb)
-{
- if (!skb->encapsulation)
- return -EINVAL;
- return 0;
-}
-
static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -46,6 +39,9 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
SKB_GSO_IPIP)))
goto out;
+ if (!skb->encapsulation)
+ goto out;
+
if (unlikely(!pskb_may_pull(skb, sizeof(*greh))))
goto out;
@@ -119,28 +115,6 @@ out:
return segs;
}
-/* Compute the whole skb csum in s/w and store it, then verify GRO csum
- * starting from gro_offset.
- */
-static __sum16 gro_skb_checksum(struct sk_buff *skb)
-{
- __sum16 sum;
-
- skb->csum = skb_checksum(skb, 0, skb->len, 0);
- NAPI_GRO_CB(skb)->csum = csum_sub(skb->csum,
- csum_partial(skb->data, skb_gro_offset(skb), 0));
- sum = csum_fold(NAPI_GRO_CB(skb)->csum);
- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) {
- if (unlikely(!sum) && !skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev);
- } else {
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum_complete_sw = 1;
- }
-
- return sum;
-}
-
static struct sk_buff **gre_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
@@ -192,22 +166,16 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
if (unlikely(!greh))
goto out_unlock;
}
- if (greh->flags & GRE_CSUM) { /* Need to verify GRE csum first */
- __sum16 csum = 0;
-
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- csum = csum_fold(NAPI_GRO_CB(skb)->csum);
- /* Don't trust csum error calculated/reported by h/w */
- if (skb->ip_summed == CHECKSUM_NONE || csum != 0)
- csum = gro_skb_checksum(skb);
-
- /* GRE CSUM is the 1's complement of the 1's complement sum
- * of the GRE hdr plus payload so it should add up to 0xffff
- * (and 0 after csum_fold()) just like the IPv4 hdr csum.
- */
- if (csum)
+
+ /* Don't bother verifying checksum if we're going to flush anyway. */
+ if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
+ if (skb_gro_checksum_simple_validate(skb))
goto out_unlock;
+
+ skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0,
+ null_compute_pseudo);
}
+
flush = 0;
for (p = *head; p; p = p->next) {
@@ -284,7 +252,6 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
static const struct net_offload gre_offload = {
.callbacks = {
- .gso_send_check = gre_gso_send_check,
.gso_segment = gre_gso_segment,
.gro_receive = gre_gro_receive,
.gro_complete = gre_gro_complete,
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ea7d4afe8205..5882f584910e 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -231,12 +231,62 @@ static inline void icmp_xmit_unlock(struct sock *sk)
spin_unlock_bh(&sk->sk_lock.slock);
}
+int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
+int sysctl_icmp_msgs_burst __read_mostly = 50;
+
+static struct {
+ spinlock_t lock;
+ u32 credit;
+ u32 stamp;
+} icmp_global = {
+ .lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock),
+};
+
+/**
+ * icmp_global_allow - Are we allowed to send one more ICMP message ?
+ *
+ * Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
+ * Returns false if we reached the limit and can not send another packet.
+ * Note: called with BH disabled
+ */
+bool icmp_global_allow(void)
+{
+ u32 credit, delta, incr = 0, now = (u32)jiffies;
+ bool rc = false;
+
+ /* Check if token bucket is empty and cannot be refilled
+ * without taking the spinlock.
+ */
+ if (!icmp_global.credit) {
+ delta = min_t(u32, now - icmp_global.stamp, HZ);
+ if (delta < HZ / 50)
+ return false;
+ }
+
+ spin_lock(&icmp_global.lock);
+ delta = min_t(u32, now - icmp_global.stamp, HZ);
+ if (delta >= HZ / 50) {
+ incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
+ if (incr)
+ icmp_global.stamp = now;
+ }
+ credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
+ if (credit) {
+ credit--;
+ rc = true;
+ }
+ icmp_global.credit = credit;
+ spin_unlock(&icmp_global.lock);
+ return rc;
+}
+EXPORT_SYMBOL(icmp_global_allow);
+
/*
* Send an ICMP frame.
*/
-static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
- struct flowi4 *fl4, int type, int code)
+static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
+ struct flowi4 *fl4, int type, int code)
{
struct dst_entry *dst = &rt->dst;
bool rc = true;
@@ -253,8 +303,14 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
goto out;
/* Limit if icmp type is enabled in ratemask. */
- if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
- struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
+ if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask))
+ goto out;
+
+ rc = false;
+ if (icmp_global_allow()) {
+ struct inet_peer *peer;
+
+ peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
rc = inet_peer_xrlim_allow(peer,
net->ipv4.sysctl_icmp_ratelimit);
if (peer)
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index f10eab462282..fb70e3ecc3e4 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -117,7 +117,7 @@
#define IGMP_V2_Unsolicited_Report_Interval (10*HZ)
#define IGMP_V3_Unsolicited_Report_Interval (1*HZ)
#define IGMP_Query_Response_Interval (10*HZ)
-#define IGMP_Unsolicited_Report_Count 2
+#define IGMP_Query_Robustness_Variable 2
#define IGMP_Initial_Report_Delay (1)
@@ -756,8 +756,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
{
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
return;
- in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
- IGMP_Unsolicited_Report_Count;
+ in_dev->mr_ifc_count = in_dev->mr_qrv ?: sysctl_igmp_qrv;
igmp_ifc_start_timer(in_dev, 1);
}
@@ -932,7 +931,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
in_dev->mr_qrv = ih3->qrv;
if (!group) { /* general query */
if (ih3->nsrcs)
- return false; /* no sources allowed */
+ return true; /* no sources allowed */
igmp_gq_start_timer(in_dev);
return false;
}
@@ -1086,8 +1085,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
pmc->interface = im->interface;
in_dev_hold(in_dev);
pmc->multiaddr = im->multiaddr;
- pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
- IGMP_Unsolicited_Report_Count;
+ pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
pmc->sfmode = im->sfmode;
if (pmc->sfmode == MCAST_INCLUDE) {
struct ip_sf_list *psf;
@@ -1226,8 +1224,7 @@ static void igmp_group_added(struct ip_mc_list *im)
}
/* else, v3 */
- im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
- IGMP_Unsolicited_Report_Count;
+ im->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
igmp_ifc_event(in_dev);
#endif
}
@@ -1322,7 +1319,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
- im->unsolicit_count = IGMP_Unsolicited_Report_Count;
+ im->unsolicit_count = sysctl_igmp_qrv;
#endif
im->next_rcu = in_dev->mc_list;
@@ -1460,7 +1457,7 @@ void ip_mc_init_dev(struct in_device *in_dev)
(unsigned long)in_dev);
setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
(unsigned long)in_dev);
- in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
+ in_dev->mr_qrv = sysctl_igmp_qrv;
#endif
spin_lock_init(&in_dev->mc_tomb_lock);
@@ -1474,6 +1471,9 @@ void ip_mc_up(struct in_device *in_dev)
ASSERT_RTNL();
+#ifdef CONFIG_IP_MULTICAST
+ in_dev->mr_qrv = sysctl_igmp_qrv;
+#endif
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for_each_pmc_rtnl(in_dev, pmc)
@@ -1540,7 +1540,9 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
*/
int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
-
+#ifdef CONFIG_IP_MULTICAST
+int sysctl_igmp_qrv __read_mostly = IGMP_Query_Robustness_Variable;
+#endif
static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
__be32 *psfsrc)
@@ -1575,8 +1577,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
#ifdef CONFIG_IP_MULTICAST
if (psf->sf_oldin &&
!IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
- psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
- IGMP_Unsolicited_Report_Count;
+ psf->sf_crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
psf->sf_next = pmc->tomb;
pmc->tomb = psf;
rv = 1;
@@ -1639,8 +1640,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
/* filter mode change */
pmc->sfmode = MCAST_INCLUDE;
#ifdef CONFIG_IP_MULTICAST
- pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
- IGMP_Unsolicited_Report_Count;
+ pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
in_dev->mr_ifc_count = pmc->crcount;
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
@@ -1818,8 +1818,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
#ifdef CONFIG_IP_MULTICAST
/* else no filters; keep old mode for reports */
- pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
- IGMP_Unsolicited_Report_Count;
+ pmc->crcount = in_dev->mr_qrv ?: sysctl_igmp_qrv;
in_dev->mr_ifc_count = pmc->crcount;
for (psf = pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
@@ -2539,7 +2538,7 @@ static int igmp_mc_seq_show(struct seq_file *seq, void *v)
querier = "NONE";
#endif
- if (rcu_dereference(state->in_dev->mc_list) == im) {
+ if (rcu_access_pointer(state->in_dev->mc_list) == im) {
seq_printf(seq, "%d\t%-10s: %5d %7s\n",
state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 43116e8c8e13..9111a4e22155 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -229,7 +229,7 @@ begin:
}
} else if (score == hiscore && reuseport) {
matches++;
- if (((u64)phash * matches) >> 32 == 0)
+ if (reciprocal_scale(phash, matches) == 0)
result = sk;
phash = next_pseudo_random32(phash);
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index bd5f5928167d..241afd743d2c 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -72,29 +72,10 @@ void inet_peer_base_init(struct inet_peer_base *bp)
{
bp->root = peer_avl_empty_rcu;
seqlock_init(&bp->lock);
- bp->flush_seq = ~0U;
bp->total = 0;
}
EXPORT_SYMBOL_GPL(inet_peer_base_init);
-static atomic_t v4_seq = ATOMIC_INIT(0);
-static atomic_t v6_seq = ATOMIC_INIT(0);
-
-static atomic_t *inetpeer_seq_ptr(int family)
-{
- return (family == AF_INET ? &v4_seq : &v6_seq);
-}
-
-static inline void flush_check(struct inet_peer_base *base, int family)
-{
- atomic_t *fp = inetpeer_seq_ptr(family);
-
- if (unlikely(base->flush_seq != atomic_read(fp))) {
- inetpeer_invalidate_tree(base);
- base->flush_seq = atomic_read(fp);
- }
-}
-
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
/* Exported for sysctl_net_ipv4. */
@@ -444,8 +425,6 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
unsigned int sequence;
int invalidated, gccnt = 0;
- flush_check(base, daddr->family);
-
/* Attempt a lockless lookup first.
* Because of a concurrent writer, we might not find an existing entry.
*/
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 15f0e2bad7ad..2811cc18701a 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -790,7 +790,7 @@ static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
kfree(table);
}
-static void ip4_frags_ctl_register(void)
+static void __init ip4_frags_ctl_register(void)
{
register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
}
@@ -804,7 +804,7 @@ static inline void ip4_frags_ns_ctl_unregister(struct net *net)
{
}
-static inline void ip4_frags_ctl_register(void)
+static inline void __init ip4_frags_ctl_register(void)
{
}
#endif
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 9b842544aea3..12055fdbe716 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -239,7 +239,9 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
tpi.seq = htonl(tunnel->o_seqno);
/* Push GRE header. */
- gre_build_header(skb, &tpi, tunnel->hlen);
+ gre_build_header(skb, &tpi, tunnel->tun_hlen);
+
+ skb_set_inner_protocol(skb, tpi.proto);
ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
}
@@ -310,7 +312,7 @@ out:
static int ipgre_tunnel_ioctl(struct net_device *dev,
struct ifreq *ifr, int cmd)
{
- int err = 0;
+ int err;
struct ip_tunnel_parm p;
if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
@@ -470,13 +472,18 @@ static void ipgre_tunnel_setup(struct net_device *dev)
static void __gre_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel;
+ int t_hlen;
tunnel = netdev_priv(dev);
- tunnel->hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
+ tunnel->tun_hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
tunnel->parms.iph.protocol = IPPROTO_GRE;
- dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
- dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
+ tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
+
+ t_hlen = tunnel->hlen + sizeof(struct iphdr);
+
+ dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4;
+ dev->mtu = ETH_DATA_LEN - t_hlen - 4;
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
@@ -503,7 +510,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &iph->daddr, 4);
dev->flags = IFF_NOARP;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
dev->addr_len = 4;
if (iph->daddr) {
@@ -628,6 +635,40 @@ static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
parms->iph.frag_off = htons(IP_DF);
}
+/* This function returns true when ENCAP attributes are present in the nl msg */
+static bool ipgre_netlink_encap_parms(struct nlattr *data[],
+ struct ip_tunnel_encap *ipencap)
+{
+ bool ret = false;
+
+ memset(ipencap, 0, sizeof(*ipencap));
+
+ if (!data)
+ return ret;
+
+ if (data[IFLA_GRE_ENCAP_TYPE]) {
+ ret = true;
+ ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
+ }
+
+ if (data[IFLA_GRE_ENCAP_FLAGS]) {
+ ret = true;
+ ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
+ }
+
+ if (data[IFLA_GRE_ENCAP_SPORT]) {
+ ret = true;
+ ipencap->sport = nla_get_u16(data[IFLA_GRE_ENCAP_SPORT]);
+ }
+
+ if (data[IFLA_GRE_ENCAP_DPORT]) {
+ ret = true;
+ ipencap->dport = nla_get_u16(data[IFLA_GRE_ENCAP_DPORT]);
+ }
+
+ return ret;
+}
+
static int gre_tap_init(struct net_device *dev)
{
__gre_tunnel_init(dev);
@@ -657,6 +698,15 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct ip_tunnel_parm p;
+ struct ip_tunnel_encap ipencap;
+
+ if (ipgre_netlink_encap_parms(data, &ipencap)) {
+ struct ip_tunnel *t = netdev_priv(dev);
+ int err = ip_tunnel_encap_setup(t, &ipencap);
+
+ if (err < 0)
+ return err;
+ }
ipgre_netlink_parms(data, tb, &p);
return ip_tunnel_newlink(dev, tb, &p);
@@ -666,6 +716,15 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip_tunnel_parm p;
+ struct ip_tunnel_encap ipencap;
+
+ if (ipgre_netlink_encap_parms(data, &ipencap)) {
+ struct ip_tunnel *t = netdev_priv(dev);
+ int err = ip_tunnel_encap_setup(t, &ipencap);
+
+ if (err < 0)
+ return err;
+ }
ipgre_netlink_parms(data, tb, &p);
return ip_tunnel_changelink(dev, tb, &p);
@@ -694,6 +753,14 @@ static size_t ipgre_get_size(const struct net_device *dev)
nla_total_size(1) +
/* IFLA_GRE_PMTUDISC */
nla_total_size(1) +
+ /* IFLA_GRE_ENCAP_TYPE */
+ nla_total_size(2) +
+ /* IFLA_GRE_ENCAP_FLAGS */
+ nla_total_size(2) +
+ /* IFLA_GRE_ENCAP_SPORT */
+ nla_total_size(2) +
+ /* IFLA_GRE_ENCAP_DPORT */
+ nla_total_size(2) +
0;
}
@@ -714,6 +781,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_GRE_PMTUDISC,
!!(p->iph.frag_off & htons(IP_DF))))
goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
+ t->encap.type) ||
+ nla_put_u16(skb, IFLA_GRE_ENCAP_SPORT,
+ t->encap.sport) ||
+ nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT,
+ t->encap.dport) ||
+ nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
+ t->encap.dport))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -731,6 +809,10 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
[IFLA_GRE_TTL] = { .type = NLA_U8 },
[IFLA_GRE_TOS] = { .type = NLA_U8 },
[IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
+ [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
+ [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
+ [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
+ [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
};
static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ad382499bace..5b3d91be2db0 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -87,17 +87,15 @@ void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
* NOTE: dopt cannot point to skb.
*/
-int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
+int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb,
+ const struct ip_options *sopt)
{
- const struct ip_options *sopt;
unsigned char *sptr, *dptr;
int soffset, doffset;
int optlen;
memset(dopt, 0, sizeof(struct ip_options));
- sopt = &(IPCB(skb)->opt);
-
if (sopt->optlen == 0)
return 0;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 215af2b155cb..e35b71289156 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -516,7 +516,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
hlen = iph->ihl * 4;
mtu = mtu - hlen; /* Size of data space */
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (skb->nf_bridge)
mtu -= nf_bridge_mtu_reduction(skb);
#endif
@@ -1522,8 +1522,10 @@ static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
.uc_ttl = -1,
};
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
- __be32 saddr, const struct ip_reply_arg *arg,
+void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
+ const struct ip_options *sopt,
+ __be32 daddr, __be32 saddr,
+ const struct ip_reply_arg *arg,
unsigned int len)
{
struct ip_options_data replyopts;
@@ -1534,7 +1536,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
struct sock *sk;
struct inet_sock *inet;
- if (ip_options_echo(&replyopts.opt.opt, skb))
+ if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
return;
ipc.addr = daddr;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5cb830c78990..c373a9ad4555 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -303,7 +303,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
}
/* dont let ip_call_ra_chain() use sk again */
ra->sk = NULL;
- rcu_assign_pointer(*rap, ra->next);
+ RCU_INIT_POINTER(*rap, ra->next);
spin_unlock_bh(&ip_ra_lock);
if (ra->destructor)
@@ -325,7 +325,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
new_ra->sk = sk;
new_ra->destructor = destructor;
- new_ra->next = ra;
+ RCU_INIT_POINTER(new_ra->next, ra);
rcu_assign_pointer(*rap, new_ra);
sock_hold(sk);
spin_unlock_bh(&ip_ra_lock);
@@ -405,7 +405,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
struct sock_exterr_skb *serr;
- struct sk_buff *skb, *skb2;
+ struct sk_buff *skb;
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct {
struct sock_extended_err ee;
@@ -415,7 +415,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
int copied;
err = -EAGAIN;
- skb = skb_dequeue(&sk->sk_error_queue);
+ skb = sock_dequeue_err_skb(sk);
if (skb == NULL)
goto out;
@@ -462,17 +462,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
- /* Reset and regenerate socket error */
- spin_lock_bh(&sk->sk_error_queue.lock);
- sk->sk_err = 0;
- skb2 = skb_peek(&sk->sk_error_queue);
- if (skb2 != NULL) {
- sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_bh(&sk->sk_error_queue.lock);
- sk->sk_error_report(sk);
- } else
- spin_unlock_bh(&sk->sk_error_queue.lock);
-
out_free_skb:
kfree_skb(skb);
out:
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bda4bb8ae260..0bb8e141eacc 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -55,6 +55,8 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
+#include <net/udp.h>
+#include <net/gue.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
@@ -487,6 +489,103 @@ drop:
}
EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
+static int ip_encap_hlen(struct ip_tunnel_encap *e)
+{
+ switch (e->type) {
+ case TUNNEL_ENCAP_NONE:
+ return 0;
+ case TUNNEL_ENCAP_FOU:
+ return sizeof(struct udphdr);
+ case TUNNEL_ENCAP_GUE:
+ return sizeof(struct udphdr) + sizeof(struct guehdr);
+ default:
+ return -EINVAL;
+ }
+}
+
+int ip_tunnel_encap_setup(struct ip_tunnel *t,
+ struct ip_tunnel_encap *ipencap)
+{
+ int hlen;
+
+ memset(&t->encap, 0, sizeof(t->encap));
+
+ hlen = ip_encap_hlen(ipencap);
+ if (hlen < 0)
+ return hlen;
+
+ t->encap.type = ipencap->type;
+ t->encap.sport = ipencap->sport;
+ t->encap.dport = ipencap->dport;
+ t->encap.flags = ipencap->flags;
+
+ t->encap_hlen = hlen;
+ t->hlen = t->encap_hlen + t->tun_hlen;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup);
+
+static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
+ size_t hdr_len, u8 *protocol, struct flowi4 *fl4)
+{
+ struct udphdr *uh;
+ __be16 sport;
+ bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM);
+ int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+
+ skb = iptunnel_handle_offloads(skb, csum, type);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* Get length and hash before making space in skb */
+
+ sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
+ skb, 0, 0, false);
+
+ skb_push(skb, hdr_len);
+
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ if (e->type == TUNNEL_ENCAP_GUE) {
+ struct guehdr *guehdr = (struct guehdr *)&uh[1];
+
+ guehdr->version = 0;
+ guehdr->hlen = 0;
+ guehdr->flags = 0;
+ guehdr->next_hdr = *protocol;
+ }
+
+ uh->dest = e->dport;
+ uh->source = sport;
+ uh->len = htons(skb->len);
+ uh->check = 0;
+ udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb,
+ fl4->saddr, fl4->daddr, skb->len);
+
+ *protocol = IPPROTO_UDP;
+
+ return 0;
+}
+
+int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ u8 *protocol, struct flowi4 *fl4)
+{
+ switch (t->encap.type) {
+ case TUNNEL_ENCAP_NONE:
+ return 0;
+ case TUNNEL_ENCAP_FOU:
+ case TUNNEL_ENCAP_GUE:
+ return fou_build_header(skb, &t->encap, t->encap_hlen,
+ protocol, fl4);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(ip_tunnel_encap);
+
static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
struct rtable *rt, __be16 df)
{
@@ -536,7 +635,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
}
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
- const struct iphdr *tnl_params, const u8 protocol)
+ const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *inner_iph;
@@ -617,6 +716,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
+ if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
+ goto tx_error;
+
rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
if (!rt) {
@@ -670,7 +772,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
df |= (inner_iph->frag_off&htons(IP_DF));
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
- + rt->dst.header_len;
+ + rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
if (max_headroom > dev->needed_headroom)
dev->needed_headroom = max_headroom;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index e453cb724a95..3e861011e4a3 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -364,7 +364,7 @@ static int vti_tunnel_init(struct net_device *dev)
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
return ip_tunnel_init(dev);
}
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 5bbef4fdcb43..648fa1490ea7 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -262,7 +262,8 @@ static int __init ic_open_devs(void)
/* wait for a carrier on at least one device */
start = jiffies;
next_msg = start + msecs_to_jiffies(CONF_CARRIER_TIMEOUT/12);
- while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
+ while (time_before(jiffies, start +
+ msecs_to_jiffies(CONF_CARRIER_TIMEOUT))) {
int wait, elapsed;
for_each_netdev(&init_net, dev)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 62eaa005e146..37096d64730e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -224,6 +224,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (IS_ERR(skb))
goto out;
+ skb_set_inner_ipproto(skb, IPPROTO_IPIP);
+
ip_tunnel_xmit(skb, dev, tiph, tiph->protocol);
return NETDEV_TX_OK;
@@ -287,7 +289,7 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
dev->features |= IPIP_FEATURES;
dev->hw_features |= IPIP_FEATURES;
@@ -301,7 +303,8 @@ static int ipip_tunnel_init(struct net_device *dev)
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
- tunnel->hlen = 0;
+ tunnel->tun_hlen = 0;
+ tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
tunnel->parms.iph.protocol = IPPROTO_IPIP;
return ip_tunnel_init(dev);
}
@@ -340,10 +343,53 @@ static void ipip_netlink_parms(struct nlattr *data[],
parms->iph.frag_off = htons(IP_DF);
}
+/* This function returns true when ENCAP attributes are present in the nl msg */
+static bool ipip_netlink_encap_parms(struct nlattr *data[],
+ struct ip_tunnel_encap *ipencap)
+{
+ bool ret = false;
+
+ memset(ipencap, 0, sizeof(*ipencap));
+
+ if (!data)
+ return ret;
+
+ if (data[IFLA_IPTUN_ENCAP_TYPE]) {
+ ret = true;
+ ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
+ ret = true;
+ ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_SPORT]) {
+ ret = true;
+ ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_DPORT]) {
+ ret = true;
+ ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]);
+ }
+
+ return ret;
+}
+
static int ipip_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct ip_tunnel_parm p;
+ struct ip_tunnel_encap ipencap;
+
+ if (ipip_netlink_encap_parms(data, &ipencap)) {
+ struct ip_tunnel *t = netdev_priv(dev);
+ int err = ip_tunnel_encap_setup(t, &ipencap);
+
+ if (err < 0)
+ return err;
+ }
ipip_netlink_parms(data, &p);
return ip_tunnel_newlink(dev, tb, &p);
@@ -353,6 +399,15 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
struct ip_tunnel_parm p;
+ struct ip_tunnel_encap ipencap;
+
+ if (ipip_netlink_encap_parms(data, &ipencap)) {
+ struct ip_tunnel *t = netdev_priv(dev);
+ int err = ip_tunnel_encap_setup(t, &ipencap);
+
+ if (err < 0)
+ return err;
+ }
ipip_netlink_parms(data, &p);
@@ -378,6 +433,14 @@ static size_t ipip_get_size(const struct net_device *dev)
nla_total_size(1) +
/* IFLA_IPTUN_PMTUDISC */
nla_total_size(1) +
+ /* IFLA_IPTUN_ENCAP_TYPE */
+ nla_total_size(2) +
+ /* IFLA_IPTUN_ENCAP_FLAGS */
+ nla_total_size(2) +
+ /* IFLA_IPTUN_ENCAP_SPORT */
+ nla_total_size(2) +
+ /* IFLA_IPTUN_ENCAP_DPORT */
+ nla_total_size(2) +
0;
}
@@ -394,6 +457,17 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_IPTUN_PMTUDISC,
!!(parm->iph.frag_off & htons(IP_DF))))
goto nla_put_failure;
+
+ if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
+ tunnel->encap.type) ||
+ nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT,
+ tunnel->encap.sport) ||
+ nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
+ tunnel->encap.dport) ||
+ nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
+ tunnel->encap.dport))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -407,6 +481,10 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_TTL] = { .type = NLA_U8 },
[IFLA_IPTUN_TOS] = { .type = NLA_U8 },
[IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
+ [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
+ [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
+ [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
};
static struct rtnl_link_ops ipip_link_ops __read_mostly = {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 7cbcaf4f0194..4c019d5c3f57 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -61,18 +61,13 @@ config NFT_CHAIN_ROUTE_IPV4
fields such as the source, destination, type of service and
the packet mark.
-config NFT_CHAIN_NAT_IPV4
- depends on NF_TABLES_IPV4
- depends on NF_NAT_IPV4 && NFT_NAT
- tristate "IPv4 nf_tables nat chain support"
- help
- This option enables the "nat" chain for IPv4 in nf_tables. This
- chain type is used to perform Network Address Translation (NAT)
- packet transformations such as the source, destination address and
- source and destination ports.
+config NF_REJECT_IPV4
+ tristate "IPv4 packet rejection"
+ default m if NETFILTER_ADVANCED=n
config NFT_REJECT_IPV4
depends on NF_TABLES_IPV4
+ select NF_REJECT_IPV4
default NFT_REJECT
tristate
@@ -94,6 +89,30 @@ config NF_NAT_IPV4
if NF_NAT_IPV4
+config NFT_CHAIN_NAT_IPV4
+ depends on NF_TABLES_IPV4
+ tristate "IPv4 nf_tables nat chain support"
+ help
+ This option enables the "nat" chain for IPv4 in nf_tables. This
+ chain type is used to perform Network Address Translation (NAT)
+ packet transformations such as the source, destination address and
+ source and destination ports.
+
+config NF_NAT_MASQUERADE_IPV4
+ tristate "IPv4 masquerade support"
+ help
+ This is the kernel functionality to provide NAT in the masquerade
+ flavour (automatic source address selection).
+
+config NFT_MASQ_IPV4
+ tristate "IPv4 masquerading support for nf_tables"
+ depends on NF_TABLES_IPV4
+ depends on NFT_MASQ
+ select NF_NAT_MASQUERADE_IPV4
+ help
+ This is the expression that provides IPv4 masquerading support for
+ nf_tables.
+
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"
depends on NF_CONNTRACK_SNMP
@@ -194,6 +213,7 @@ config IP_NF_FILTER
config IP_NF_TARGET_REJECT
tristate "REJECT target support"
depends on IP_NF_FILTER
+ select NF_REJECT_IPV4
default m if NETFILTER_ADVANCED=n
help
The REJECT target allows a filtering rule to specify that an ICMP
@@ -234,6 +254,7 @@ if IP_NF_NAT
config IP_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
+ select NF_NAT_MASQUERADE_IPV4
default m if NETFILTER_ADVANCED=n
help
Masquerading is a special case of NAT: all outgoing connections are
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index edf4af32e9f2..f4cef5af0969 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -23,10 +23,14 @@ obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o
obj-$(CONFIG_NF_LOG_ARP) += nf_log_arp.o
obj-$(CONFIG_NF_LOG_IPV4) += nf_log_ipv4.o
+# reject
+obj-$(CONFIG_NF_REJECT_IPV4) += nf_reject_ipv4.o
+
# NAT helpers (nf_conntrack)
obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o
+obj-$(CONFIG_NF_NAT_MASQUERADE_IPV4) += nf_nat_masquerade_ipv4.o
# NAT protocols (nf_nat)
obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
@@ -35,6 +39,7 @@ obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
+obj-$(CONFIG_NFT_MASQ_IPV4) += nft_masq_ipv4.o
obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
# generic IP tables
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 2510c02c2d21..e90f83a3415b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -285,7 +285,7 @@ clusterip_hashfn(const struct sk_buff *skb,
}
/* node numbers are 1..n, not 0..n */
- return (((u64)hashval * config->num_total_nodes) >> 32) + 1;
+ return reciprocal_scale(hashval, config->num_total_nodes) + 1;
}
static inline int
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 00352ce0f0de..da7f02a0b868 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -22,6 +22,7 @@
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter/x_tables.h>
#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/ipv4/nf_nat_masquerade.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -46,103 +47,17 @@ static int masquerade_tg_check(const struct xt_tgchk_param *par)
static unsigned int
masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
- struct nf_conn *ct;
- struct nf_conn_nat *nat;
- enum ip_conntrack_info ctinfo;
- struct nf_nat_range newrange;
+ struct nf_nat_range range;
const struct nf_nat_ipv4_multi_range_compat *mr;
- const struct rtable *rt;
- __be32 newsrc, nh;
-
- NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING);
-
- ct = nf_ct_get(skb, &ctinfo);
- nat = nfct_nat(ct);
-
- NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
- ctinfo == IP_CT_RELATED_REPLY));
-
- /* Source address is 0.0.0.0 - locally generated packet that is
- * probably not supposed to be masqueraded.
- */
- if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
- return NF_ACCEPT;
mr = par->targinfo;
- rt = skb_rtable(skb);
- nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
- newsrc = inet_select_addr(par->out, nh, RT_SCOPE_UNIVERSE);
- if (!newsrc) {
- pr_info("%s ate my IP address\n", par->out->name);
- return NF_DROP;
- }
-
- nat->masq_index = par->out->ifindex;
-
- /* Transfer from original range. */
- memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
- memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
- newrange.flags = mr->range[0].flags | NF_NAT_RANGE_MAP_IPS;
- newrange.min_addr.ip = newsrc;
- newrange.max_addr.ip = newsrc;
- newrange.min_proto = mr->range[0].min;
- newrange.max_proto = mr->range[0].max;
+ range.flags = mr->range[0].flags;
+ range.min_proto = mr->range[0].min;
+ range.max_proto = mr->range[0].max;
- /* Hand modified range to generic setup. */
- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+ return nf_nat_masquerade_ipv4(skb, par->hooknum, &range, par->out);
}
-static int
-device_cmp(struct nf_conn *i, void *ifindex)
-{
- const struct nf_conn_nat *nat = nfct_nat(i);
-
- if (!nat)
- return 0;
- if (nf_ct_l3num(i) != NFPROTO_IPV4)
- return 0;
- return nat->masq_index == (int)(long)ifindex;
-}
-
-static int masq_device_event(struct notifier_block *this,
- unsigned long event,
- void *ptr)
-{
- const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct net *net = dev_net(dev);
-
- if (event == NETDEV_DOWN) {
- /* Device was downed. Search entire table for
- conntracks which were associated with that device,
- and forget them. */
- NF_CT_ASSERT(dev->ifindex != 0);
-
- nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex, 0, 0);
- }
-
- return NOTIFY_DONE;
-}
-
-static int masq_inet_event(struct notifier_block *this,
- unsigned long event,
- void *ptr)
-{
- struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
- struct netdev_notifier_info info;
-
- netdev_notifier_info_init(&info, dev);
- return masq_device_event(this, event, &info);
-}
-
-static struct notifier_block masq_dev_notifier = {
- .notifier_call = masq_device_event,
-};
-
-static struct notifier_block masq_inet_notifier = {
- .notifier_call = masq_inet_event,
-};
-
static struct xt_target masquerade_tg_reg __read_mostly = {
.name = "MASQUERADE",
.family = NFPROTO_IPV4,
@@ -160,12 +75,8 @@ static int __init masquerade_tg_init(void)
ret = xt_register_target(&masquerade_tg_reg);
- if (ret == 0) {
- /* Register for device down reports */
- register_netdevice_notifier(&masq_dev_notifier);
- /* Register IP address change reports */
- register_inetaddr_notifier(&masq_inet_notifier);
- }
+ if (ret == 0)
+ nf_nat_masquerade_ipv4_register_notifier();
return ret;
}
@@ -173,8 +84,7 @@ static int __init masquerade_tg_init(void)
static void __exit masquerade_tg_exit(void)
{
xt_unregister_target(&masquerade_tg_reg);
- unregister_netdevice_notifier(&masq_dev_notifier);
- unregister_inetaddr_notifier(&masq_inet_notifier);
+ nf_nat_masquerade_ipv4_unregister_notifier();
}
module_init(masquerade_tg_init);
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 5b6e0df4ccff..8f48f5517e33 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -20,7 +20,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_REJECT.h>
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
#include <linux/netfilter_bridge.h>
#endif
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index f1787c04a4dd..6b67d7e9a75d 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -28,222 +28,57 @@ static const struct xt_table nf_nat_ipv4_table = {
.af = NFPROTO_IPV4,
};
-static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
-{
- /* Force range to this IP; let proto decide mapping for
- * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
- */
- struct nf_nat_range range;
-
- range.flags = 0;
- pr_debug("Allocating NULL binding for %p (%pI4)\n", ct,
- HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip :
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip);
-
- return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
-}
-
-static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
- const struct net_device *in,
- const struct net_device *out,
- struct nf_conn *ct)
+static unsigned int iptable_nat_do_chain(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- unsigned int ret;
- ret = ipt_do_table(skb, hooknum, in, out, net->ipv4.nat_table);
- if (ret == NF_ACCEPT) {
- if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
- ret = alloc_null_binding(ct, hooknum);
- }
- return ret;
+ return ipt_do_table(skb, ops->hooknum, in, out, net->ipv4.nat_table);
}
-static unsigned int
-nf_nat_ipv4_fn(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int iptable_nat_ipv4_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- struct nf_conn_nat *nat;
- /* maniptype == SRC for postrouting. */
- enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
-
- /* We never see fragments: conntrack defrags on pre-routing
- * and local-out, and nf_nat_out protects post-routing.
- */
- NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
-
- ct = nf_ct_get(skb, &ctinfo);
- /* Can't track? It's not due to stress, or conntrack would
- * have dropped it. Hence it's the user's responsibilty to
- * packet filter it out, or implement conntrack/NAT for that
- * protocol. 8) --RR
- */
- if (!ct)
- return NF_ACCEPT;
-
- /* Don't try to NAT if this packet is not conntracked */
- if (nf_ct_is_untracked(ct))
- return NF_ACCEPT;
-
- nat = nf_ct_nat_ext_add(ct);
- if (nat == NULL)
- return NF_ACCEPT;
-
- switch (ctinfo) {
- case IP_CT_RELATED:
- case IP_CT_RELATED_REPLY:
- if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
- if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
- ops->hooknum))
- return NF_DROP;
- else
- return NF_ACCEPT;
- }
- /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
- case IP_CT_NEW:
- /* Seen it before? This can happen for loopback, retrans,
- * or local packets.
- */
- if (!nf_nat_initialized(ct, maniptype)) {
- unsigned int ret;
-
- ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
- if (ret != NF_ACCEPT)
- return ret;
- } else {
- pr_debug("Already setup manip %s for ct %p\n",
- maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
- ct);
- if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
- goto oif_changed;
- }
- break;
-
- default:
- /* ESTABLISHED */
- NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
- ctinfo == IP_CT_ESTABLISHED_REPLY);
- if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
- goto oif_changed;
- }
-
- return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
-
-oif_changed:
- nf_ct_kill_acct(ct, ctinfo, skb);
- return NF_DROP;
+ return nf_nat_ipv4_fn(ops, skb, in, out, iptable_nat_do_chain);
}
-static unsigned int
-nf_nat_ipv4_in(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int iptable_nat_ipv4_in(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- unsigned int ret;
- __be32 daddr = ip_hdr(skb)->daddr;
-
- ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- daddr != ip_hdr(skb)->daddr)
- skb_dst_drop(skb);
-
- return ret;
+ return nf_nat_ipv4_in(ops, skb, in, out, iptable_nat_do_chain);
}
-static unsigned int
-nf_nat_ipv4_out(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int iptable_nat_ipv4_out(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
-#ifdef CONFIG_XFRM
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- int err;
-#endif
- unsigned int ret;
-
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
- return NF_ACCEPT;
-
- ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
-#ifdef CONFIG_XFRM
- if (ret != NF_DROP && ret != NF_STOLEN &&
- !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if ((ct->tuplehash[dir].tuple.src.u3.ip !=
- ct->tuplehash[!dir].tuple.dst.u3.ip) ||
- (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
- ct->tuplehash[dir].tuple.src.u.all !=
- ct->tuplehash[!dir].tuple.dst.u.all)) {
- err = nf_xfrm_me_harder(skb, AF_INET);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
- }
-#endif
- return ret;
+ return nf_nat_ipv4_out(ops, skb, in, out, iptable_nat_do_chain);
}
-static unsigned int
-nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int iptable_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- unsigned int ret;
- int err;
-
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct iphdr) ||
- ip_hdrlen(skb) < sizeof(struct iphdr))
- return NF_ACCEPT;
-
- ret = nf_nat_ipv4_fn(ops, skb, in, out, okfn);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if (ct->tuplehash[dir].tuple.dst.u3.ip !=
- ct->tuplehash[!dir].tuple.src.u3.ip) {
- err = ip_route_me_harder(skb, RTN_UNSPEC);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
-#ifdef CONFIG_XFRM
- else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
- ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
- ct->tuplehash[dir].tuple.dst.u.all !=
- ct->tuplehash[!dir].tuple.src.u.all) {
- err = nf_xfrm_me_harder(skb, AF_INET);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
-#endif
- }
- return ret;
+ return nf_nat_ipv4_local_fn(ops, skb, in, out, iptable_nat_do_chain);
}
static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
/* Before packet filtering, change destination */
{
- .hook = nf_nat_ipv4_in,
+ .hook = iptable_nat_ipv4_in,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
@@ -251,7 +86,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
},
/* After packet filtering, change source */
{
- .hook = nf_nat_ipv4_out,
+ .hook = iptable_nat_ipv4_out,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
@@ -259,7 +94,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
},
/* Before packet filtering, change destination */
{
- .hook = nf_nat_ipv4_local_fn,
+ .hook = iptable_nat_ipv4_local_fn,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
@@ -267,7 +102,7 @@ static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = {
},
/* After packet filtering, change source */
{
- .hook = nf_nat_ipv4_fn,
+ .hook = iptable_nat_ipv4_fn,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 76bd1aef257f..7e5ca6f2d0cd 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -50,7 +50,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (skb->nf_bridge &&
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
return IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
index 14f5ccd06337..fc37711e11f3 100644
--- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
@@ -254,6 +254,205 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
+unsigned int
+nf_nat_ipv4_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct))
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn_nat *nat;
+ /* maniptype == SRC for postrouting. */
+ enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+
+ /* We never see fragments: conntrack defrags on pre-routing
+ * and local-out, and nf_nat_out protects post-routing.
+ */
+ NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
+
+ ct = nf_ct_get(skb, &ctinfo);
+ /* Can't track? It's not due to stress, or conntrack would
+ * have dropped it. Hence it's the user's responsibilty to
+ * packet filter it out, or implement conntrack/NAT for that
+ * protocol. 8) --RR
+ */
+ if (!ct)
+ return NF_ACCEPT;
+
+ /* Don't try to NAT if this packet is not conntracked */
+ if (nf_ct_is_untracked(ct))
+ return NF_ACCEPT;
+
+ nat = nf_ct_nat_ext_add(ct);
+ if (nat == NULL)
+ return NF_ACCEPT;
+
+ switch (ctinfo) {
+ case IP_CT_RELATED:
+ case IP_CT_RELATED_REPLY:
+ if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+ if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
+ ops->hooknum))
+ return NF_DROP;
+ else
+ return NF_ACCEPT;
+ }
+ /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
+ case IP_CT_NEW:
+ /* Seen it before? This can happen for loopback, retrans,
+ * or local packets.
+ */
+ if (!nf_nat_initialized(ct, maniptype)) {
+ unsigned int ret;
+
+ ret = do_chain(ops, skb, in, out, ct);
+ if (ret != NF_ACCEPT)
+ return ret;
+
+ if (nf_nat_initialized(ct, HOOK2MANIP(ops->hooknum)))
+ break;
+
+ ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+ if (ret != NF_ACCEPT)
+ return ret;
+ } else {
+ pr_debug("Already setup manip %s for ct %p\n",
+ maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
+ ct);
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+ goto oif_changed;
+ }
+ break;
+
+ default:
+ /* ESTABLISHED */
+ NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
+ ctinfo == IP_CT_ESTABLISHED_REPLY);
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+ goto oif_changed;
+ }
+
+ return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+
+oif_changed:
+ nf_ct_kill_acct(ct, ctinfo, skb);
+ return NF_DROP;
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv4_fn);
+
+unsigned int
+nf_nat_ipv4_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct))
+{
+ unsigned int ret;
+ __be32 daddr = ip_hdr(skb)->daddr;
+
+ ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ daddr != ip_hdr(skb)->daddr)
+ skb_dst_drop(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv4_in);
+
+unsigned int
+nf_nat_ipv4_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct))
+{
+#ifdef CONFIG_XFRM
+ const struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ int err;
+#endif
+ unsigned int ret;
+
+ /* root is playing with raw sockets. */
+ if (skb->len < sizeof(struct iphdr) ||
+ ip_hdrlen(skb) < sizeof(struct iphdr))
+ return NF_ACCEPT;
+
+ ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+#ifdef CONFIG_XFRM
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ !(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
+ (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if ((ct->tuplehash[dir].tuple.src.u3.ip !=
+ ct->tuplehash[!dir].tuple.dst.u3.ip) ||
+ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
+ ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all)) {
+ err = nf_xfrm_me_harder(skb, AF_INET);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+ }
+#endif
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv4_out);
+
+unsigned int
+nf_nat_ipv4_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct))
+{
+ const struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ unsigned int ret;
+ int err;
+
+ /* root is playing with raw sockets. */
+ if (skb->len < sizeof(struct iphdr) ||
+ ip_hdrlen(skb) < sizeof(struct iphdr))
+ return NF_ACCEPT;
+
+ ret = nf_nat_ipv4_fn(ops, skb, in, out, do_chain);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (ct->tuplehash[dir].tuple.dst.u3.ip !=
+ ct->tuplehash[!dir].tuple.src.u3.ip) {
+ err = ip_route_me_harder(skb, RTN_UNSPEC);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+#ifdef CONFIG_XFRM
+ else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) &&
+ ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP &&
+ ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all) {
+ err = nf_xfrm_me_harder(skb, AF_INET);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+#endif
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv4_local_fn);
+
static int __init nf_nat_l3proto_ipv4_init(void)
{
int err;
diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
new file mode 100644
index 000000000000..c6eb42100e9a
--- /dev/null
+++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
@@ -0,0 +1,153 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/route.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/ipv4/nf_nat_masquerade.h>
+
+unsigned int
+nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct nf_nat_range *range,
+ const struct net_device *out)
+{
+ struct nf_conn *ct;
+ struct nf_conn_nat *nat;
+ enum ip_conntrack_info ctinfo;
+ struct nf_nat_range newrange;
+ const struct rtable *rt;
+ __be32 newsrc, nh;
+
+ NF_CT_ASSERT(hooknum == NF_INET_POST_ROUTING);
+
+ ct = nf_ct_get(skb, &ctinfo);
+ nat = nfct_nat(ct);
+
+ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+ ctinfo == IP_CT_RELATED_REPLY));
+
+ /* Source address is 0.0.0.0 - locally generated packet that is
+ * probably not supposed to be masqueraded.
+ */
+ if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip == 0)
+ return NF_ACCEPT;
+
+ rt = skb_rtable(skb);
+ nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
+ newsrc = inet_select_addr(out, nh, RT_SCOPE_UNIVERSE);
+ if (!newsrc) {
+ pr_info("%s ate my IP address\n", out->name);
+ return NF_DROP;
+ }
+
+ nat->masq_index = out->ifindex;
+
+ /* Transfer from original range. */
+ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+ memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr.ip = newsrc;
+ newrange.max_addr.ip = newsrc;
+ newrange.min_proto = range->min_proto;
+ newrange.max_proto = range->max_proto;
+
+ /* Hand modified range to generic setup. */
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
+
+static int device_cmp(struct nf_conn *i, void *ifindex)
+{
+ const struct nf_conn_nat *nat = nfct_nat(i);
+
+ if (!nat)
+ return 0;
+ if (nf_ct_l3num(i) != NFPROTO_IPV4)
+ return 0;
+ return nat->masq_index == (int)(long)ifindex;
+}
+
+static int masq_device_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+
+ if (event == NETDEV_DOWN) {
+ /* Device was downed. Search entire table for
+ * conntracks which were associated with that device,
+ * and forget them.
+ */
+ NF_CT_ASSERT(dev->ifindex != 0);
+
+ nf_ct_iterate_cleanup(net, device_cmp,
+ (void *)(long)dev->ifindex, 0, 0);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int masq_inet_event(struct notifier_block *this,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
+ struct netdev_notifier_info info;
+
+ netdev_notifier_info_init(&info, dev);
+ return masq_device_event(this, event, &info);
+}
+
+static struct notifier_block masq_dev_notifier = {
+ .notifier_call = masq_device_event,
+};
+
+static struct notifier_block masq_inet_notifier = {
+ .notifier_call = masq_inet_event,
+};
+
+static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
+
+void nf_nat_masquerade_ipv4_register_notifier(void)
+{
+ /* check if the notifier was already set */
+ if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
+ return;
+
+ /* Register for device down reports */
+ register_netdevice_notifier(&masq_dev_notifier);
+ /* Register IP address change reports */
+ register_inetaddr_notifier(&masq_inet_notifier);
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
+
+void nf_nat_masquerade_ipv4_unregister_notifier(void)
+{
+ /* check if the notifier still has clients */
+ if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
+ return;
+
+ unregister_netdevice_notifier(&masq_dev_notifier);
+ unregister_inetaddr_notifier(&masq_inet_notifier);
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
new file mode 100644
index 000000000000..b023b4eb1a96
--- /dev/null
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -0,0 +1,127 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/dst.h>
+#include <linux/netfilter_ipv4.h>
+
+/* Send RST reply */
+void nf_send_reset(struct sk_buff *oldskb, int hook)
+{
+ struct sk_buff *nskb;
+ const struct iphdr *oiph;
+ struct iphdr *niph;
+ const struct tcphdr *oth;
+ struct tcphdr _otcph, *tcph;
+
+ /* IP header checks: fragment. */
+ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
+ return;
+
+ oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
+ sizeof(_otcph), &_otcph);
+ if (oth == NULL)
+ return;
+
+ /* No RST for RST. */
+ if (oth->rst)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+ return;
+
+ /* Check checksum */
+ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
+ return;
+ oiph = ip_hdr(oldskb);
+
+ nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
+ LL_MAX_HEADER, GFP_ATOMIC);
+ if (!nskb)
+ return;
+
+ skb_reserve(nskb, LL_MAX_HEADER);
+
+ skb_reset_network_header(nskb);
+ niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
+ niph->version = 4;
+ niph->ihl = sizeof(struct iphdr) / 4;
+ niph->tos = 0;
+ niph->id = 0;
+ niph->frag_off = htons(IP_DF);
+ niph->protocol = IPPROTO_TCP;
+ niph->check = 0;
+ niph->saddr = oiph->daddr;
+ niph->daddr = oiph->saddr;
+
+ skb_reset_transport_header(nskb);
+ tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
+ memset(tcph, 0, sizeof(*tcph));
+ tcph->source = oth->dest;
+ tcph->dest = oth->source;
+ tcph->doff = sizeof(struct tcphdr) / 4;
+
+ if (oth->ack)
+ tcph->seq = oth->ack_seq;
+ else {
+ tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
+ oldskb->len - ip_hdrlen(oldskb) -
+ (oth->doff << 2));
+ tcph->ack = 1;
+ }
+
+ tcph->rst = 1;
+ tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
+ niph->daddr, 0);
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum_start = (unsigned char *)tcph - nskb->head;
+ nskb->csum_offset = offsetof(struct tcphdr, check);
+
+ /* ip_route_me_harder expects skb->dst to be set */
+ skb_dst_set_noref(nskb, skb_dst(oldskb));
+
+ nskb->protocol = htons(ETH_P_IP);
+ if (ip_route_me_harder(nskb, RTN_UNSPEC))
+ goto free_nskb;
+
+ niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
+
+ /* "Never happens" */
+ if (nskb->len > dst_mtu(skb_dst(nskb)))
+ goto free_nskb;
+
+ nf_ct_attach(nskb, oldskb);
+
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ /* If we use ip_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ goto free_nskb;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip_local_out(nskb);
+
+ return;
+
+ free_nskb:
+ kfree_skb(nskb);
+}
+EXPORT_SYMBOL_GPL(nf_send_reset);
diff --git a/net/ipv4/netfilter/nft_chain_nat_ipv4.c b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
index 3964157d826c..df547bf50078 100644
--- a/net/ipv4/netfilter/nft_chain_nat_ipv4.c
+++ b/net/ipv4/netfilter/nft_chain_nat_ipv4.c
@@ -26,136 +26,53 @@
#include <net/netfilter/nf_nat_l3proto.h>
#include <net/ip.h>
-/*
- * NAT chains
- */
-
-static unsigned int nf_nat_fn(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct)
{
- enum ip_conntrack_info ctinfo;
- struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_nat *nat;
- enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
struct nft_pktinfo pkt;
- unsigned int ret;
-
- if (ct == NULL || nf_ct_is_untracked(ct))
- return NF_ACCEPT;
-
- NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)));
-
- nat = nf_ct_nat_ext_add(ct);
- if (nat == NULL)
- return NF_ACCEPT;
-
- switch (ctinfo) {
- case IP_CT_RELATED:
- case IP_CT_RELATED + IP_CT_IS_REPLY:
- if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
- if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
- ops->hooknum))
- return NF_DROP;
- else
- return NF_ACCEPT;
- }
- /* Fall through */
- case IP_CT_NEW:
- if (nf_nat_initialized(ct, maniptype))
- break;
- nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
+ nft_set_pktinfo_ipv4(&pkt, ops, skb, in, out);
- ret = nft_do_chain(&pkt, ops);
- if (ret != NF_ACCEPT)
- return ret;
- if (!nf_nat_initialized(ct, maniptype)) {
- ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
- if (ret != NF_ACCEPT)
- return ret;
- }
- default:
- break;
- }
-
- return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+ return nft_do_chain(&pkt, ops);
}
-static unsigned int nf_nat_prerouting(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int nft_nat_ipv4_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- __be32 daddr = ip_hdr(skb)->daddr;
- unsigned int ret;
-
- ret = nf_nat_fn(ops, skb, in, out, okfn);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- ip_hdr(skb)->daddr != daddr) {
- skb_dst_drop(skb);
- }
- return ret;
+ return nf_nat_ipv4_fn(ops, skb, in, out, nft_nat_do_chain);
}
-static unsigned int nf_nat_postrouting(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int nft_nat_ipv4_in(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- enum ip_conntrack_info ctinfo __maybe_unused;
- const struct nf_conn *ct __maybe_unused;
- unsigned int ret;
-
- ret = nf_nat_fn(ops, skb, in, out, okfn);
-#ifdef CONFIG_XFRM
- if (ret != NF_DROP && ret != NF_STOLEN &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if (ct->tuplehash[dir].tuple.src.u3.ip !=
- ct->tuplehash[!dir].tuple.dst.u3.ip ||
- ct->tuplehash[dir].tuple.src.u.all !=
- ct->tuplehash[!dir].tuple.dst.u.all)
- return nf_xfrm_me_harder(skb, AF_INET) == 0 ?
- ret : NF_DROP;
- }
-#endif
- return ret;
+ return nf_nat_ipv4_in(ops, skb, in, out, nft_nat_do_chain);
}
-static unsigned int nf_nat_output(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int nft_nat_ipv4_out(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- enum ip_conntrack_info ctinfo;
- const struct nf_conn *ct;
- unsigned int ret;
-
- ret = nf_nat_fn(ops, skb, in, out, okfn);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ return nf_nat_ipv4_out(ops, skb, in, out, nft_nat_do_chain);
+}
- if (ct->tuplehash[dir].tuple.dst.u3.ip !=
- ct->tuplehash[!dir].tuple.src.u3.ip) {
- if (ip_route_me_harder(skb, RTN_UNSPEC))
- ret = NF_DROP;
- }
-#ifdef CONFIG_XFRM
- else if (ct->tuplehash[dir].tuple.dst.u.all !=
- ct->tuplehash[!dir].tuple.src.u.all)
- if (nf_xfrm_me_harder(skb, AF_INET))
- ret = NF_DROP;
-#endif
- }
- return ret;
+static unsigned int nft_nat_ipv4_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return nf_nat_ipv4_local_fn(ops, skb, in, out, nft_nat_do_chain);
}
static const struct nf_chain_type nft_chain_nat_ipv4 = {
@@ -168,10 +85,10 @@ static const struct nf_chain_type nft_chain_nat_ipv4 = {
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.hooks = {
- [NF_INET_PRE_ROUTING] = nf_nat_prerouting,
- [NF_INET_POST_ROUTING] = nf_nat_postrouting,
- [NF_INET_LOCAL_OUT] = nf_nat_output,
- [NF_INET_LOCAL_IN] = nf_nat_fn,
+ [NF_INET_PRE_ROUTING] = nft_nat_ipv4_in,
+ [NF_INET_POST_ROUTING] = nft_nat_ipv4_out,
+ [NF_INET_LOCAL_OUT] = nft_nat_ipv4_local_fn,
+ [NF_INET_LOCAL_IN] = nft_nat_ipv4_fn,
},
};
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
new file mode 100644
index 000000000000..1c636d6b5b50
--- /dev/null
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_masq.h>
+#include <net/netfilter/ipv4/nf_nat_masquerade.h>
+
+static void nft_masq_ipv4_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_masq *priv = nft_expr_priv(expr);
+ struct nf_nat_range range;
+ unsigned int verdict;
+
+ range.flags = priv->flags;
+
+ verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum,
+ &range, pkt->out);
+
+ data[NFT_REG_VERDICT].verdict = verdict;
+}
+
+static struct nft_expr_type nft_masq_ipv4_type;
+static const struct nft_expr_ops nft_masq_ipv4_ops = {
+ .type = &nft_masq_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
+ .eval = nft_masq_ipv4_eval,
+ .init = nft_masq_init,
+ .dump = nft_masq_dump,
+};
+
+static struct nft_expr_type nft_masq_ipv4_type __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .name = "masq",
+ .ops = &nft_masq_ipv4_ops,
+ .policy = nft_masq_policy,
+ .maxattr = NFTA_MASQ_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_masq_ipv4_module_init(void)
+{
+ int ret;
+
+ ret = nft_register_expr(&nft_masq_ipv4_type);
+ if (ret < 0)
+ return ret;
+
+ nf_nat_masquerade_ipv4_register_notifier();
+
+ return ret;
+}
+
+static void __exit nft_masq_ipv4_module_exit(void)
+{
+ nft_unregister_expr(&nft_masq_ipv4_type);
+ nf_nat_masquerade_ipv4_unregister_notifier();
+}
+
+module_init(nft_masq_ipv4_module_init);
+module_exit(nft_masq_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "masq");
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index e79718a382f2..ed33299c56d1 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -16,7 +16,6 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
-#include <net/icmp.h>
#include <net/netfilter/ipv4/nf_reject.h>
#include <net/netfilter/nft_reject.h>
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index a3c59a077a5f..57f7c9804139 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -311,7 +311,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
chk_addr_ret = RTN_LOCAL;
- if ((sysctl_ip_nonlocal_bind == 0 &&
+ if ((net->ipv4.sysctl_ip_nonlocal_bind == 0 &&
isk->freebind == 0 && isk->transparent == 0 &&
chk_addr_ret != RTN_LOCAL) ||
chk_addr_ret == RTN_MULTICAST ||
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 46d6a1c923a8..4b7c0ec65251 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -30,6 +30,7 @@
const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS] __read_mostly;
+EXPORT_SYMBOL(inet_offloads);
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cbadb942c332..793c0bb8c4fd 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -596,12 +596,12 @@ static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
static inline u32 fnhe_hashfun(__be32 daddr)
{
+ static u32 fnhe_hashrnd __read_mostly;
u32 hval;
- hval = (__force u32) daddr;
- hval ^= (hval >> 11) ^ (hval >> 22);
-
- return hval & (FNHE_HASH_SIZE - 1);
+ net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
+ hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
+ return hash_32(hval, FNHE_HASH_SHIFT);
}
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
@@ -628,12 +628,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
spin_lock_bh(&fnhe_lock);
- hash = nh->nh_exceptions;
+ hash = rcu_dereference(nh->nh_exceptions);
if (!hash) {
hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
if (!hash)
goto out_unlock;
- nh->nh_exceptions = hash;
+ rcu_assign_pointer(nh->nh_exceptions, hash);
}
hash += hval;
@@ -1242,7 +1242,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
{
- struct fnhe_hash_bucket *hash = nh->nh_exceptions;
+ struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
struct fib_nh_exception *fnhe;
u32 hval;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index c0c75688896e..0431a8f3c8f4 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -25,7 +25,7 @@
extern int sysctl_tcp_syncookies;
-static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
+static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 79a007c52558..8a25509c35b3 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -286,13 +286,6 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &ip_ttl_max,
},
{
- .procname = "ip_nonlocal_bind",
- .data = &sysctl_ip_nonlocal_bind,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "tcp_syn_retries",
.data = &sysctl_tcp_syn_retries,
.maxlen = sizeof(int),
@@ -450,6 +443,16 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+#ifdef CONFIG_IP_MULTICAST
+ {
+ .procname = "igmp_qrv",
+ .data = &sysctl_igmp_qrv,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one
+ },
+#endif
{
.procname = "inet_peer_threshold",
.data = &inet_peer_threshold,
@@ -728,6 +731,22 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &one,
},
{
+ .procname = "icmp_msgs_per_sec",
+ .data = &sysctl_icmp_msgs_per_sec,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ {
+ .procname = "icmp_msgs_burst",
+ .data = &sysctl_icmp_msgs_burst,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ },
+ {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
@@ -839,6 +858,13 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "ip_nonlocal_bind",
+ .data = &init_net.ipv4.sysctl_ip_nonlocal_bind,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "fwmark_reflect",
.data = &init_net.ipv4.sysctl_fwmark_reflect,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 541f26a67ba2..26a6f113f00c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -405,7 +405,7 @@ void tcp_init_sock(struct sock *sk)
tp->reordering = sysctl_tcp_reordering;
tcp_enable_early_retrans(tp);
- icsk->icsk_ca_ops = &tcp_init_congestion_ops;
+ tcp_assign_congestion_control(sk);
tp->tsoffset = 0;
@@ -609,7 +609,7 @@ static inline bool forced_push(const struct tcp_sock *tp)
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
}
-static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
+static void skb_entail(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
@@ -618,7 +618,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
tcb->seq = tcb->end_seq = tp->write_seq;
tcb->tcp_flags = TCPHDR_ACK;
tcb->sacked = 0;
- skb_header_release(skb);
+ __skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize);
@@ -963,7 +963,7 @@ new_segment:
skb->ip_summed = CHECKSUM_PARTIAL;
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
- skb_shinfo(skb)->gso_segs = 0;
+ tcp_skb_pcount_set(skb, 0);
if (!copied)
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
@@ -1261,7 +1261,7 @@ new_segment:
tp->write_seq += copy;
TCP_SKB_CB(skb)->end_seq += copy;
- skb_shinfo(skb)->gso_segs = 0;
+ tcp_skb_pcount_set(skb, 0);
from += copy;
copied += copy;
@@ -1510,9 +1510,9 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
offset = seq - TCP_SKB_CB(skb)->seq;
- if (tcp_hdr(skb)->syn)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
offset--;
- if (offset < skb->len || tcp_hdr(skb)->fin) {
+ if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
*off = offset;
return skb;
}
@@ -1585,7 +1585,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
if (offset + 1 != skb->len)
continue;
}
- if (tcp_hdr(skb)->fin) {
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
sk_eat_skb(sk, skb, false);
++seq;
break;
@@ -1722,11 +1722,11 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
break;
offset = *seq - TCP_SKB_CB(skb)->seq;
- if (tcp_hdr(skb)->syn)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
offset--;
if (offset < skb->len)
goto found_ok_skb;
- if (tcp_hdr(skb)->fin)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
WARN(!(flags & MSG_PEEK),
"recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
@@ -1959,7 +1959,7 @@ skip_copy:
if (used + offset < skb->len)
continue;
- if (tcp_hdr(skb)->fin)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
@@ -2160,8 +2160,10 @@ void tcp_close(struct sock *sk, long timeout)
* reader process may not have drained the data yet!
*/
while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
- u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
- tcp_hdr(skb)->fin;
+ u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
+
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+ len--;
data_was_unread += len;
__kfree_skb(skb);
}
@@ -2691,7 +2693,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
break;
#endif
case TCP_USER_TIMEOUT:
- /* Cap the max timeout in ms TCP will retry/retrans
+ /* Cap the max time in ms TCP will retry or probe the window
* before giving up and aborting (ETIMEDOUT) a connection.
*/
if (val < 0)
@@ -3170,7 +3172,7 @@ static int __init set_thash_entries(char *str)
}
__setup("thash_entries=", set_thash_entries);
-static void tcp_init_mem(void)
+static void __init tcp_init_mem(void)
{
unsigned long limit = nr_free_buffer_pages() / 8;
limit = max(limit, 128UL);
@@ -3256,8 +3258,6 @@ void __init tcp_init(void)
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
tcp_metrics_init();
-
- tcp_register_congestion_control(&tcp_reno);
-
+ BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
tcp_tasklet_init();
}
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index d5de69bc04f5..bb395d46a389 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <net/tcp.h>
-
#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
* max_cwnd = snd_cwnd * beta
*/
@@ -46,11 +45,10 @@ MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
module_param(smooth_part, int, 0644);
MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wmax-B to Wmax");
-
/* BIC TCP Parameters */
struct bictcp {
u32 cnt; /* increase cwnd by 1 after ACKs */
- u32 last_max_cwnd; /* last maximum snd_cwnd */
+ u32 last_max_cwnd; /* last maximum snd_cwnd */
u32 loss_cwnd; /* congestion window at last loss */
u32 last_cwnd; /* the last snd_cwnd */
u32 last_time; /* time when updated last_cwnd */
@@ -103,7 +101,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
/* binary increase */
if (cwnd < ca->last_max_cwnd) {
- __u32 dist = (ca->last_max_cwnd - cwnd)
+ __u32 dist = (ca->last_max_cwnd - cwnd)
/ BICTCP_B;
if (dist > max_increment)
@@ -154,7 +152,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
bictcp_update(ca, tp->snd_cwnd);
tcp_cong_avoid_ai(tp, ca->cnt);
}
-
}
/*
@@ -177,7 +174,6 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
ca->loss_cwnd = tp->snd_cwnd;
-
if (tp->snd_cwnd <= low_window)
return max(tp->snd_cwnd >> 1U, 2U);
else
@@ -188,6 +184,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct bictcp *ca = inet_csk_ca(sk);
+
return max(tp->snd_cwnd, ca->loss_cwnd);
}
@@ -206,12 +203,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt)
if (icsk->icsk_ca_state == TCP_CA_Open) {
struct bictcp *ca = inet_csk_ca(sk);
+
cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
ca->delayed_ack += cnt;
}
}
-
static struct tcp_congestion_ops bictcp __read_mostly = {
.init = bictcp_init,
.ssthresh = bictcp_recalc_ssthresh,
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 7b09d8b49fa5..b1c5970d47a1 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -74,24 +74,34 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
/* Assign choice of congestion control. */
-void tcp_init_congestion_control(struct sock *sk)
+void tcp_assign_congestion_control(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_congestion_ops *ca;
- /* if no choice made yet assign the current value set as default */
- if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) {
- rcu_read_lock();
- list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
- if (try_module_get(ca->owner)) {
- icsk->icsk_ca_ops = ca;
- break;
- }
-
- /* fallback to next available */
+ rcu_read_lock();
+ list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
+ if (likely(try_module_get(ca->owner))) {
+ icsk->icsk_ca_ops = ca;
+ goto out;
}
- rcu_read_unlock();
+ /* Fallback to next available. The last really
+ * guaranteed fallback is Reno from this list.
+ */
}
+out:
+ rcu_read_unlock();
+
+ /* Clear out private data before diag gets it and
+ * the ca has not been initialized.
+ */
+ if (ca->get_info)
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+}
+
+void tcp_init_congestion_control(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
@@ -142,7 +152,6 @@ static int __init tcp_congestion_default(void)
}
late_initcall(tcp_congestion_default);
-
/* Build string with list of available congestion control values */
void tcp_get_available_congestion_control(char *buf, size_t maxlen)
{
@@ -154,7 +163,6 @@ void tcp_get_available_congestion_control(char *buf, size_t maxlen)
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
-
}
rcu_read_unlock();
}
@@ -186,7 +194,6 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
offs += snprintf(buf + offs, maxlen - offs,
"%s%s",
offs == 0 ? "" : " ", ca->name);
-
}
rcu_read_unlock();
}
@@ -230,7 +237,6 @@ out:
return ret;
}
-
/* Change congestion control for socket */
int tcp_set_congestion_control(struct sock *sk, const char *name)
{
@@ -285,15 +291,13 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
* ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
* returns the leftover acks to adjust cwnd in congestion avoidance mode.
*/
-int tcp_slow_start(struct tcp_sock *tp, u32 acked)
+void tcp_slow_start(struct tcp_sock *tp, u32 acked)
{
u32 cwnd = tp->snd_cwnd + acked;
if (cwnd > tp->snd_ssthresh)
cwnd = tp->snd_ssthresh + 1;
- acked -= cwnd - tp->snd_cwnd;
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
- return acked;
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
@@ -337,6 +341,7 @@ EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
u32 tcp_reno_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+
return max(tp->snd_cwnd >> 1U, 2U);
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
@@ -348,15 +353,3 @@ struct tcp_congestion_ops tcp_reno = {
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
};
-
-/* Initial congestion control used (until SYN)
- * really reno under another name so we can tell difference
- * during tcp_set_default_congestion_control
- */
-struct tcp_congestion_ops tcp_init_congestion_ops = {
- .name = "",
- .owner = THIS_MODULE,
- .ssthresh = tcp_reno_ssthresh,
- .cong_avoid = tcp_reno_cong_avoid,
-};
-EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9bd8a4828a9..20de0118c98e 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -82,12 +82,13 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse
/* BIC TCP Parameters */
struct bictcp {
u32 cnt; /* increase cwnd by 1 after ACKs */
- u32 last_max_cwnd; /* last maximum snd_cwnd */
+ u32 last_max_cwnd; /* last maximum snd_cwnd */
u32 loss_cwnd; /* congestion window at last loss */
u32 last_cwnd; /* the last snd_cwnd */
u32 last_time; /* time when updated last_cwnd */
u32 bic_origin_point;/* origin point of bic function */
- u32 bic_K; /* time to origin point from the beginning of the current epoch */
+ u32 bic_K; /* time to origin point
+ from the beginning of the current epoch */
u32 delay_min; /* min delay (msec << 3) */
u32 epoch_start; /* beginning of an epoch */
u32 ack_cnt; /* number of acks */
@@ -219,7 +220,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
ca->last_time = tcp_time_stamp;
if (ca->epoch_start == 0) {
- ca->epoch_start = tcp_time_stamp; /* record the beginning of an epoch */
+ ca->epoch_start = tcp_time_stamp; /* record beginning */
ca->ack_cnt = 1; /* start counting */
ca->tcp_cwnd = cwnd; /* syn with cubic */
@@ -263,9 +264,9 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
/* c/rtt * (t-K)^3 */
delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
- if (t < ca->bic_K) /* below origin*/
+ if (t < ca->bic_K) /* below origin*/
bic_target = ca->bic_origin_point - delta;
- else /* above origin*/
+ else /* above origin*/
bic_target = ca->bic_origin_point + delta;
/* cubic function - calc bictcp_cnt*/
@@ -285,13 +286,14 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
/* TCP Friendly */
if (tcp_friendliness) {
u32 scale = beta_scale;
+
delta = (cwnd * scale) >> 3;
while (ca->ack_cnt > delta) { /* update tcp cwnd */
ca->ack_cnt -= delta;
ca->tcp_cwnd++;
}
- if (ca->tcp_cwnd > cwnd){ /* if bic is slower than tcp */
+ if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
delta = ca->tcp_cwnd - cwnd;
max_cnt = cwnd / delta;
if (ca->cnt > max_cnt)
@@ -320,7 +322,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
bictcp_update(ca, tp->snd_cwnd);
tcp_cong_avoid_ai(tp, ca->cnt);
}
-
}
static u32 bictcp_recalc_ssthresh(struct sock *sk)
@@ -452,7 +453,8 @@ static int __init cubictcp_register(void)
* based on SRTT of 100ms
*/
- beta_scale = 8*(BICTCP_BETA_SCALE+beta)/ 3 / (BICTCP_BETA_SCALE - beta);
+ beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
+ / (BICTCP_BETA_SCALE - beta);
cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
new file mode 100644
index 000000000000..b504371af742
--- /dev/null
+++ b/net/ipv4/tcp_dctcp.c
@@ -0,0 +1,344 @@
+/* DataCenter TCP (DCTCP) congestion control.
+ *
+ * http://simula.stanford.edu/~alizade/Site/DCTCP.html
+ *
+ * This is an implementation of DCTCP over Reno, an enhancement to the
+ * TCP congestion control algorithm designed for data centers. DCTCP
+ * leverages Explicit Congestion Notification (ECN) in the network to
+ * provide multi-bit feedback to the end hosts. DCTCP's goal is to meet
+ * the following three data center transport requirements:
+ *
+ * - High burst tolerance (incast due to partition/aggregate)
+ * - Low latency (short flows, queries)
+ * - High throughput (continuous data updates, large file transfers)
+ * with commodity shallow buffered switches
+ *
+ * The algorithm is described in detail in the following two papers:
+ *
+ * 1) Mohammad Alizadeh, Albert Greenberg, David A. Maltz, Jitendra Padhye,
+ * Parveen Patel, Balaji Prabhakar, Sudipta Sengupta, and Murari Sridharan:
+ * "Data Center TCP (DCTCP)", Data Center Networks session
+ * Proc. ACM SIGCOMM, New Delhi, 2010.
+ * http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp-final.pdf
+ *
+ * 2) Mohammad Alizadeh, Adel Javanmard, and Balaji Prabhakar:
+ * "Analysis of DCTCP: Stability, Convergence, and Fairness"
+ * Proc. ACM SIGMETRICS, San Jose, 2011.
+ * http://simula.stanford.edu/~alizade/Site/DCTCP_files/dctcp_analysis-full.pdf
+ *
+ * Initial prototype from Abdul Kabbani, Masato Yasuda and Mohammad Alizadeh.
+ *
+ * Authors:
+ *
+ * Daniel Borkmann <dborkman@redhat.com>
+ * Florian Westphal <fw@strlen.de>
+ * Glenn Judd <glenn.judd@morganstanley.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <net/tcp.h>
+#include <linux/inet_diag.h>
+
+#define DCTCP_MAX_ALPHA 1024U
+
+struct dctcp {
+ u32 acked_bytes_ecn;
+ u32 acked_bytes_total;
+ u32 prior_snd_una;
+ u32 prior_rcv_nxt;
+ u32 dctcp_alpha;
+ u32 next_seq;
+ u32 ce_state;
+ u32 delayed_ack_reserved;
+};
+
+static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */
+module_param(dctcp_shift_g, uint, 0644);
+MODULE_PARM_DESC(dctcp_shift_g, "parameter g for updating dctcp_alpha");
+
+static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
+module_param(dctcp_alpha_on_init, uint, 0644);
+MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
+
+static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
+module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
+MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
+ "parameter for clamping alpha on loss");
+
+static struct tcp_congestion_ops dctcp_reno;
+
+static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
+{
+ ca->next_seq = tp->snd_nxt;
+
+ ca->acked_bytes_ecn = 0;
+ ca->acked_bytes_total = 0;
+}
+
+static void dctcp_init(struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ if ((tp->ecn_flags & TCP_ECN_OK) ||
+ (sk->sk_state == TCP_LISTEN ||
+ sk->sk_state == TCP_CLOSE)) {
+ struct dctcp *ca = inet_csk_ca(sk);
+
+ ca->prior_snd_una = tp->snd_una;
+ ca->prior_rcv_nxt = tp->rcv_nxt;
+
+ ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
+
+ ca->delayed_ack_reserved = 0;
+ ca->ce_state = 0;
+
+ dctcp_reset(tp, ca);
+ return;
+ }
+
+ /* No ECN support? Fall back to Reno. Also need to clear
+ * ECT from sk since it is set during 3WHS for DCTCP.
+ */
+ inet_csk(sk)->icsk_ca_ops = &dctcp_reno;
+ INET_ECN_dontxmit(sk);
+}
+
+static u32 dctcp_ssthresh(struct sock *sk)
+{
+ const struct dctcp *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
+}
+
+/* Minimal DCTP CE state machine:
+ *
+ * S: 0 <- last pkt was non-CE
+ * 1 <- last pkt was CE
+ */
+
+static void dctcp_ce_state_0_to_1(struct sock *sk)
+{
+ struct dctcp *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* State has changed from CE=0 to CE=1 and delayed
+ * ACK has not sent yet.
+ */
+ if (!ca->ce_state && ca->delayed_ack_reserved) {
+ u32 tmp_rcv_nxt;
+
+ /* Save current rcv_nxt. */
+ tmp_rcv_nxt = tp->rcv_nxt;
+
+ /* Generate previous ack with CE=0. */
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+ tp->rcv_nxt = ca->prior_rcv_nxt;
+
+ tcp_send_ack(sk);
+
+ /* Recover current rcv_nxt. */
+ tp->rcv_nxt = tmp_rcv_nxt;
+ }
+
+ ca->prior_rcv_nxt = tp->rcv_nxt;
+ ca->ce_state = 1;
+
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+}
+
+static void dctcp_ce_state_1_to_0(struct sock *sk)
+{
+ struct dctcp *ca = inet_csk_ca(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* State has changed from CE=1 to CE=0 and delayed
+ * ACK has not sent yet.
+ */
+ if (ca->ce_state && ca->delayed_ack_reserved) {
+ u32 tmp_rcv_nxt;
+
+ /* Save current rcv_nxt. */
+ tmp_rcv_nxt = tp->rcv_nxt;
+
+ /* Generate previous ack with CE=1. */
+ tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
+ tp->rcv_nxt = ca->prior_rcv_nxt;
+
+ tcp_send_ack(sk);
+
+ /* Recover current rcv_nxt. */
+ tp->rcv_nxt = tmp_rcv_nxt;
+ }
+
+ ca->prior_rcv_nxt = tp->rcv_nxt;
+ ca->ce_state = 0;
+
+ tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
+}
+
+static void dctcp_update_alpha(struct sock *sk, u32 flags)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ struct dctcp *ca = inet_csk_ca(sk);
+ u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
+
+ /* If ack did not advance snd_una, count dupack as MSS size.
+ * If ack did update window, do not count it at all.
+ */
+ if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
+ acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
+ if (acked_bytes) {
+ ca->acked_bytes_total += acked_bytes;
+ ca->prior_snd_una = tp->snd_una;
+
+ if (flags & CA_ACK_ECE)
+ ca->acked_bytes_ecn += acked_bytes;
+ }
+
+ /* Expired RTT */
+ if (!before(tp->snd_una, ca->next_seq)) {
+ /* For avoiding denominator == 1. */
+ if (ca->acked_bytes_total == 0)
+ ca->acked_bytes_total = 1;
+
+ /* alpha = (1 - g) * alpha + g * F */
+ ca->dctcp_alpha = ca->dctcp_alpha -
+ (ca->dctcp_alpha >> dctcp_shift_g) +
+ (ca->acked_bytes_ecn << (10U - dctcp_shift_g)) /
+ ca->acked_bytes_total;
+
+ if (ca->dctcp_alpha > DCTCP_MAX_ALPHA)
+ /* Clamp dctcp_alpha to max. */
+ ca->dctcp_alpha = DCTCP_MAX_ALPHA;
+
+ dctcp_reset(tp, ca);
+ }
+}
+
+static void dctcp_state(struct sock *sk, u8 new_state)
+{
+ if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) {
+ struct dctcp *ca = inet_csk_ca(sk);
+
+ /* If this extension is enabled, we clamp dctcp_alpha to
+ * max on packet loss; the motivation is that dctcp_alpha
+ * is an indicator to the extend of congestion and packet
+ * loss is an indicator of extreme congestion; setting
+ * this in practice turned out to be beneficial, and
+ * effectively assumes total congestion which reduces the
+ * window by half.
+ */
+ ca->dctcp_alpha = DCTCP_MAX_ALPHA;
+ }
+}
+
+static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
+{
+ struct dctcp *ca = inet_csk_ca(sk);
+
+ switch (ev) {
+ case CA_EVENT_DELAYED_ACK:
+ if (!ca->delayed_ack_reserved)
+ ca->delayed_ack_reserved = 1;
+ break;
+ case CA_EVENT_NON_DELAYED_ACK:
+ if (ca->delayed_ack_reserved)
+ ca->delayed_ack_reserved = 0;
+ break;
+ default:
+ /* Don't care for the rest. */
+ break;
+ }
+}
+
+static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+{
+ switch (ev) {
+ case CA_EVENT_ECN_IS_CE:
+ dctcp_ce_state_0_to_1(sk);
+ break;
+ case CA_EVENT_ECN_NO_CE:
+ dctcp_ce_state_1_to_0(sk);
+ break;
+ case CA_EVENT_DELAYED_ACK:
+ case CA_EVENT_NON_DELAYED_ACK:
+ dctcp_update_ack_reserved(sk, ev);
+ break;
+ default:
+ /* Don't care for the rest. */
+ break;
+ }
+}
+
+static void dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+{
+ const struct dctcp *ca = inet_csk_ca(sk);
+
+ /* Fill it also in case of VEGASINFO due to req struct limits.
+ * We can still correctly retrieve it later.
+ */
+ if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) ||
+ ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
+ struct tcp_dctcp_info info;
+
+ memset(&info, 0, sizeof(info));
+ if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) {
+ info.dctcp_enabled = 1;
+ info.dctcp_ce_state = (u16) ca->ce_state;
+ info.dctcp_alpha = ca->dctcp_alpha;
+ info.dctcp_ab_ecn = ca->acked_bytes_ecn;
+ info.dctcp_ab_tot = ca->acked_bytes_total;
+ }
+
+ nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info);
+ }
+}
+
+static struct tcp_congestion_ops dctcp __read_mostly = {
+ .init = dctcp_init,
+ .in_ack_event = dctcp_update_alpha,
+ .cwnd_event = dctcp_cwnd_event,
+ .ssthresh = dctcp_ssthresh,
+ .cong_avoid = tcp_reno_cong_avoid,
+ .set_state = dctcp_state,
+ .get_info = dctcp_get_info,
+ .flags = TCP_CONG_NEEDS_ECN,
+ .owner = THIS_MODULE,
+ .name = "dctcp",
+};
+
+static struct tcp_congestion_ops dctcp_reno __read_mostly = {
+ .ssthresh = tcp_reno_ssthresh,
+ .cong_avoid = tcp_reno_cong_avoid,
+ .get_info = dctcp_get_info,
+ .owner = THIS_MODULE,
+ .name = "dctcp-reno",
+};
+
+static int __init dctcp_register(void)
+{
+ BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE);
+ return tcp_register_congestion_control(&dctcp);
+}
+
+static void __exit dctcp_unregister(void)
+{
+ tcp_unregister_congestion_control(&dctcp);
+}
+
+module_init(dctcp_register);
+module_exit(dctcp_unregister);
+
+MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_AUTHOR("Glenn Judd <glenn.judd@morganstanley.com>");
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DataCenter TCP (DCTCP)");
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index ed3f2ad42e0f..0d73f9ddb55b 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -9,7 +9,6 @@
* 2 of the License, or (at your option) any later version.
*/
-
#include <linux/module.h>
#include <linux/inet_diag.h>
@@ -35,13 +34,13 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
}
static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
- struct inet_diag_req_v2 *r, struct nlattr *bc)
+ struct inet_diag_req_v2 *r, struct nlattr *bc)
{
inet_diag_dump_icsk(&tcp_hashinfo, skb, cb, r, bc);
}
static int tcp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh,
- struct inet_diag_req_v2 *req)
+ struct inet_diag_req_v2 *req)
{
return inet_diag_dump_one_icsk(&tcp_hashinfo, in_skb, nlh, req);
}
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 9771563ab564..815c85e3b1e0 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -115,7 +115,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
struct in6_addr *buf = (struct in6_addr *) tmp.val;
- int i = 4;
+ int i;
for (i = 0; i < 4; i++)
buf->s6_addr32[i] ^= ip6h->daddr.s6_addr32[i];
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 1c4908280d92..882c08aae2f5 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <net/tcp.h>
-
/* From AIMD tables from RFC 3649 appendix B,
* with fixed-point MD scaled <<8.
*/
@@ -17,78 +16,78 @@ static const struct hstcp_aimd_val {
unsigned int cwnd;
unsigned int md;
} hstcp_aimd_vals[] = {
- { 38, 128, /* 0.50 */ },
- { 118, 112, /* 0.44 */ },
- { 221, 104, /* 0.41 */ },
- { 347, 98, /* 0.38 */ },
- { 495, 93, /* 0.37 */ },
- { 663, 89, /* 0.35 */ },
- { 851, 86, /* 0.34 */ },
- { 1058, 83, /* 0.33 */ },
- { 1284, 81, /* 0.32 */ },
- { 1529, 78, /* 0.31 */ },
- { 1793, 76, /* 0.30 */ },
- { 2076, 74, /* 0.29 */ },
- { 2378, 72, /* 0.28 */ },
- { 2699, 71, /* 0.28 */ },
- { 3039, 69, /* 0.27 */ },
- { 3399, 68, /* 0.27 */ },
- { 3778, 66, /* 0.26 */ },
- { 4177, 65, /* 0.26 */ },
- { 4596, 64, /* 0.25 */ },
- { 5036, 62, /* 0.25 */ },
- { 5497, 61, /* 0.24 */ },
- { 5979, 60, /* 0.24 */ },
- { 6483, 59, /* 0.23 */ },
- { 7009, 58, /* 0.23 */ },
- { 7558, 57, /* 0.22 */ },
- { 8130, 56, /* 0.22 */ },
- { 8726, 55, /* 0.22 */ },
- { 9346, 54, /* 0.21 */ },
- { 9991, 53, /* 0.21 */ },
- { 10661, 52, /* 0.21 */ },
- { 11358, 52, /* 0.20 */ },
- { 12082, 51, /* 0.20 */ },
- { 12834, 50, /* 0.20 */ },
- { 13614, 49, /* 0.19 */ },
- { 14424, 48, /* 0.19 */ },
- { 15265, 48, /* 0.19 */ },
- { 16137, 47, /* 0.19 */ },
- { 17042, 46, /* 0.18 */ },
- { 17981, 45, /* 0.18 */ },
- { 18955, 45, /* 0.18 */ },
- { 19965, 44, /* 0.17 */ },
- { 21013, 43, /* 0.17 */ },
- { 22101, 43, /* 0.17 */ },
- { 23230, 42, /* 0.17 */ },
- { 24402, 41, /* 0.16 */ },
- { 25618, 41, /* 0.16 */ },
- { 26881, 40, /* 0.16 */ },
- { 28193, 39, /* 0.16 */ },
- { 29557, 39, /* 0.15 */ },
- { 30975, 38, /* 0.15 */ },
- { 32450, 38, /* 0.15 */ },
- { 33986, 37, /* 0.15 */ },
- { 35586, 36, /* 0.14 */ },
- { 37253, 36, /* 0.14 */ },
- { 38992, 35, /* 0.14 */ },
- { 40808, 35, /* 0.14 */ },
- { 42707, 34, /* 0.13 */ },
- { 44694, 33, /* 0.13 */ },
- { 46776, 33, /* 0.13 */ },
- { 48961, 32, /* 0.13 */ },
- { 51258, 32, /* 0.13 */ },
- { 53677, 31, /* 0.12 */ },
- { 56230, 30, /* 0.12 */ },
- { 58932, 30, /* 0.12 */ },
- { 61799, 29, /* 0.12 */ },
- { 64851, 28, /* 0.11 */ },
- { 68113, 28, /* 0.11 */ },
- { 71617, 27, /* 0.11 */ },
- { 75401, 26, /* 0.10 */ },
- { 79517, 26, /* 0.10 */ },
- { 84035, 25, /* 0.10 */ },
- { 89053, 24, /* 0.10 */ },
+ { 38, 128, /* 0.50 */ },
+ { 118, 112, /* 0.44 */ },
+ { 221, 104, /* 0.41 */ },
+ { 347, 98, /* 0.38 */ },
+ { 495, 93, /* 0.37 */ },
+ { 663, 89, /* 0.35 */ },
+ { 851, 86, /* 0.34 */ },
+ { 1058, 83, /* 0.33 */ },
+ { 1284, 81, /* 0.32 */ },
+ { 1529, 78, /* 0.31 */ },
+ { 1793, 76, /* 0.30 */ },
+ { 2076, 74, /* 0.29 */ },
+ { 2378, 72, /* 0.28 */ },
+ { 2699, 71, /* 0.28 */ },
+ { 3039, 69, /* 0.27 */ },
+ { 3399, 68, /* 0.27 */ },
+ { 3778, 66, /* 0.26 */ },
+ { 4177, 65, /* 0.26 */ },
+ { 4596, 64, /* 0.25 */ },
+ { 5036, 62, /* 0.25 */ },
+ { 5497, 61, /* 0.24 */ },
+ { 5979, 60, /* 0.24 */ },
+ { 6483, 59, /* 0.23 */ },
+ { 7009, 58, /* 0.23 */ },
+ { 7558, 57, /* 0.22 */ },
+ { 8130, 56, /* 0.22 */ },
+ { 8726, 55, /* 0.22 */ },
+ { 9346, 54, /* 0.21 */ },
+ { 9991, 53, /* 0.21 */ },
+ { 10661, 52, /* 0.21 */ },
+ { 11358, 52, /* 0.20 */ },
+ { 12082, 51, /* 0.20 */ },
+ { 12834, 50, /* 0.20 */ },
+ { 13614, 49, /* 0.19 */ },
+ { 14424, 48, /* 0.19 */ },
+ { 15265, 48, /* 0.19 */ },
+ { 16137, 47, /* 0.19 */ },
+ { 17042, 46, /* 0.18 */ },
+ { 17981, 45, /* 0.18 */ },
+ { 18955, 45, /* 0.18 */ },
+ { 19965, 44, /* 0.17 */ },
+ { 21013, 43, /* 0.17 */ },
+ { 22101, 43, /* 0.17 */ },
+ { 23230, 42, /* 0.17 */ },
+ { 24402, 41, /* 0.16 */ },
+ { 25618, 41, /* 0.16 */ },
+ { 26881, 40, /* 0.16 */ },
+ { 28193, 39, /* 0.16 */ },
+ { 29557, 39, /* 0.15 */ },
+ { 30975, 38, /* 0.15 */ },
+ { 32450, 38, /* 0.15 */ },
+ { 33986, 37, /* 0.15 */ },
+ { 35586, 36, /* 0.14 */ },
+ { 37253, 36, /* 0.14 */ },
+ { 38992, 35, /* 0.14 */ },
+ { 40808, 35, /* 0.14 */ },
+ { 42707, 34, /* 0.13 */ },
+ { 44694, 33, /* 0.13 */ },
+ { 46776, 33, /* 0.13 */ },
+ { 48961, 32, /* 0.13 */ },
+ { 51258, 32, /* 0.13 */ },
+ { 53677, 31, /* 0.12 */ },
+ { 56230, 30, /* 0.12 */ },
+ { 58932, 30, /* 0.12 */ },
+ { 61799, 29, /* 0.12 */ },
+ { 64851, 28, /* 0.11 */ },
+ { 68113, 28, /* 0.11 */ },
+ { 71617, 27, /* 0.11 */ },
+ { 75401, 26, /* 0.10 */ },
+ { 79517, 26, /* 0.10 */ },
+ { 84035, 25, /* 0.10 */ },
+ { 89053, 24, /* 0.10 */ },
};
#define HSTCP_AIMD_MAX ARRAY_SIZE(hstcp_aimd_vals)
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 031361311a8b..58469fff6c18 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -98,7 +98,8 @@ static inline void measure_rtt(struct sock *sk, u32 srtt)
}
}
-static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked, s32 rtt)
+static void measure_achieved_throughput(struct sock *sk,
+ u32 pkts_acked, s32 rtt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
@@ -148,8 +149,8 @@ static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT)
if (use_bandwidth_switch) {
u32 maxB = ca->maxB;
u32 old_maxB = ca->old_maxB;
- ca->old_maxB = ca->maxB;
+ ca->old_maxB = ca->maxB;
if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
ca->beta = BETA_MIN;
ca->modeswitch = 0;
@@ -270,6 +271,7 @@ static void htcp_state(struct sock *sk, u8 new_state)
case TCP_CA_Open:
{
struct htcp *ca = inet_csk_ca(sk);
+
if (ca->undo_last_cong) {
ca->last_cong = jiffies;
ca->undo_last_cong = 0;
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index d8f8f05a4951..f963b274f2b0 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -29,7 +29,6 @@ static int rtt0 = 25;
module_param(rtt0, int, 0644);
MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
-
/* This is called to refresh values for hybla parameters */
static inline void hybla_recalc_param (struct sock *sk)
{
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 5999b3972e64..1d5a30a90adf 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -284,7 +284,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
if (delta >= tp->snd_cwnd) {
tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
- (u32) tp->snd_cwnd_clamp);
+ (u32)tp->snd_cwnd_clamp);
tp->snd_cwnd_cnt = 0;
}
}
@@ -299,7 +299,6 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
}
-
/* Extract info for Tcp socket info provided via netlink. */
static void tcp_illinois_info(struct sock *sk, u32 ext,
struct sk_buff *skb)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a906e0200ff2..aa38f98b7884 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -201,28 +201,25 @@ static inline bool tcp_in_quickack_mode(const struct sock *sk)
return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
}
-static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
+static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
{
if (tp->ecn_flags & TCP_ECN_OK)
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
}
-static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
+static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
{
if (tcp_hdr(skb)->cwr)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
-static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
+static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
{
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
-static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
+static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
{
- if (!(tp->ecn_flags & TCP_ECN_OK))
- return;
-
switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
case INET_ECN_NOT_ECT:
/* Funny extension: if ECT is not set on a segment,
@@ -233,30 +230,43 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *s
tcp_enter_quickack_mode((struct sock *)tp);
break;
case INET_ECN_CE:
+ if (tcp_ca_needs_ecn((struct sock *)tp))
+ tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE);
+
if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
/* Better not delay acks, sender can have a very low cwnd */
tcp_enter_quickack_mode((struct sock *)tp);
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
}
- /* fallinto */
+ tp->ecn_flags |= TCP_ECN_SEEN;
+ break;
default:
+ if (tcp_ca_needs_ecn((struct sock *)tp))
+ tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE);
tp->ecn_flags |= TCP_ECN_SEEN;
+ break;
}
}
-static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
+static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
+{
+ if (tp->ecn_flags & TCP_ECN_OK)
+ __tcp_ecn_check_ce(tp, skb);
+}
+
+static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
-static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
+static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
-static bool TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
+static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
{
if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
return true;
@@ -653,7 +663,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
}
icsk->icsk_ack.lrcvtime = now;
- TCP_ECN_check_ce(tp, skb);
+ tcp_ecn_check_ce(tp, skb);
if (skb->len >= 128)
tcp_grow_window(sk, skb);
@@ -1295,9 +1305,9 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
TCP_SKB_CB(prev)->end_seq += shifted;
TCP_SKB_CB(skb)->seq += shifted;
- skb_shinfo(prev)->gso_segs += pcount;
- BUG_ON(skb_shinfo(skb)->gso_segs < pcount);
- skb_shinfo(skb)->gso_segs -= pcount;
+ tcp_skb_pcount_add(prev, pcount);
+ BUG_ON(tcp_skb_pcount(skb) < pcount);
+ tcp_skb_pcount_add(skb, -pcount);
/* When we're adding to gso_segs == 1, gso_size will be zero,
* in theory this shouldn't be necessary but as long as DSACK
@@ -1310,7 +1320,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
}
/* CHECKME: To clear or not to clear? Mimics normal skb currently */
- if (skb_shinfo(skb)->gso_segs <= 1) {
+ if (tcp_skb_pcount(skb) <= 1) {
skb_shinfo(skb)->gso_size = 0;
skb_shinfo(skb)->gso_type = 0;
}
@@ -1888,21 +1898,21 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
tp->sacked_out = 0;
}
-static void tcp_clear_retrans_partial(struct tcp_sock *tp)
+void tcp_clear_retrans(struct tcp_sock *tp)
{
tp->retrans_out = 0;
tp->lost_out = 0;
-
tp->undo_marker = 0;
tp->undo_retrans = -1;
+ tp->fackets_out = 0;
+ tp->sacked_out = 0;
}
-void tcp_clear_retrans(struct tcp_sock *tp)
+static inline void tcp_init_undo(struct tcp_sock *tp)
{
- tcp_clear_retrans_partial(tp);
-
- tp->fackets_out = 0;
- tp->sacked_out = 0;
+ tp->undo_marker = tp->snd_una;
+ /* Retransmission still in flight may cause DSACKs later. */
+ tp->undo_retrans = tp->retrans_out ? : -1;
}
/* Enter Loss state. If we detect SACK reneging, forget all SACK information
@@ -1925,18 +1935,18 @@ void tcp_enter_loss(struct sock *sk)
tp->prior_ssthresh = tcp_current_ssthresh(sk);
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
+ tcp_init_undo(tp);
}
tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
- tcp_clear_retrans_partial(tp);
+ tp->retrans_out = 0;
+ tp->lost_out = 0;
if (tcp_is_reno(tp))
tcp_reset_reno_sack(tp);
- tp->undo_marker = tp->snd_una;
-
skb = tcp_write_queue_head(sk);
is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
if (is_reneg) {
@@ -1950,9 +1960,6 @@ void tcp_enter_loss(struct sock *sk)
if (skb == tcp_send_head(sk))
break;
- if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
- tp->undo_marker = 0;
-
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
@@ -1972,7 +1979,7 @@ void tcp_enter_loss(struct sock *sk)
sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
- TCP_ECN_queue_cwr(tp);
+ tcp_ecn_queue_cwr(tp);
/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
* loss recovery is underway except recurring timeout(s) on
@@ -2364,7 +2371,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
if (tp->prior_ssthresh > tp->snd_ssthresh) {
tp->snd_ssthresh = tp->prior_ssthresh;
- TCP_ECN_withdraw_cwr(tp);
+ tcp_ecn_withdraw_cwr(tp);
}
} else {
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
@@ -2494,7 +2501,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
tp->prr_delivered = 0;
tp->prr_out = 0;
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
- TCP_ECN_queue_cwr(tp);
+ tcp_ecn_queue_cwr(tp);
}
static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
@@ -2671,8 +2678,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
NET_INC_STATS_BH(sock_net(sk), mib_idx);
tp->prior_ssthresh = 0;
- tp->undo_marker = tp->snd_una;
- tp->undo_retrans = tp->retrans_out ? : -1;
+ tcp_init_undo(tp);
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
if (!ece_ack)
@@ -2971,7 +2977,8 @@ void tcp_rearm_rto(struct sock *sk)
if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
struct sk_buff *skb = tcp_write_queue_head(sk);
- const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
+ const u32 rto_time_stamp =
+ tcp_skb_timestamp(skb) + rto;
s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
/* delta may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled.
@@ -3211,9 +3218,10 @@ static void tcp_ack_probe(struct sock *sk)
* This function is not for random using!
*/
} else {
+ unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
+
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ when, TCP_RTO_MAX);
}
}
@@ -3364,6 +3372,14 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
}
}
+static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (icsk->icsk_ca_ops->in_ack_event)
+ icsk->icsk_ca_ops->in_ack_event(sk, flags);
+}
+
/* This routine deals with incoming acks, but not outgoing ones. */
static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
{
@@ -3423,10 +3439,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tp->snd_una = ack;
flag |= FLAG_WIN_UPDATE;
- tcp_ca_event(sk, CA_EVENT_FAST_ACK);
+ tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
} else {
+ u32 ack_ev_flags = CA_ACK_SLOWPATH;
+
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
flag |= FLAG_DATA;
else
@@ -3438,10 +3456,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
&sack_rtt_us);
- if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
+ if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
flag |= FLAG_ECE;
+ ack_ev_flags |= CA_ACK_ECE;
+ }
+
+ if (flag & FLAG_WIN_UPDATE)
+ ack_ev_flags |= CA_ACK_WIN_UPDATE;
- tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
+ tcp_in_ack_event(sk, ack_ev_flags);
}
/* We passed data and got it acked, remove any soft error
@@ -4063,6 +4086,44 @@ static void tcp_sack_remove(struct tcp_sock *tp)
tp->rx_opt.num_sacks = num_sacks;
}
+/**
+ * tcp_try_coalesce - try to merge skb to prior one
+ * @sk: socket
+ * @to: prior buffer
+ * @from: buffer to add in queue
+ * @fragstolen: pointer to boolean
+ *
+ * Before queueing skb @from after @to, try to merge them
+ * to reduce overall memory use and queue lengths, if cost is small.
+ * Packets in ofo or receive queues can stay a long time.
+ * Better try to coalesce them right now to avoid future collapses.
+ * Returns true if caller should free @from instead of queueing it
+ */
+static bool tcp_try_coalesce(struct sock *sk,
+ struct sk_buff *to,
+ struct sk_buff *from,
+ bool *fragstolen)
+{
+ int delta;
+
+ *fragstolen = false;
+
+ /* Its possible this segment overlaps with prior segment in queue */
+ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
+ return false;
+
+ if (!skb_try_coalesce(to, from, fragstolen, &delta))
+ return false;
+
+ atomic_add(delta, &sk->sk_rmem_alloc);
+ sk_mem_charge(sk, delta);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
+ TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
+ TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
+ TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
+ return true;
+}
+
/* This one checks to see if we can put data from the
* out_of_order queue into the receive_queue.
*/
@@ -4070,7 +4131,8 @@ static void tcp_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 dsack_high = tp->rcv_nxt;
- struct sk_buff *skb;
+ struct sk_buff *skb, *tail;
+ bool fragstolen, eaten;
while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
@@ -4083,9 +4145,9 @@ static void tcp_ofo_queue(struct sock *sk)
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
}
+ __skb_unlink(skb, &tp->out_of_order_queue);
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
SOCK_DEBUG(sk, "ofo packet was already received\n");
- __skb_unlink(skb, &tp->out_of_order_queue);
__kfree_skb(skb);
continue;
}
@@ -4093,11 +4155,15 @@ static void tcp_ofo_queue(struct sock *sk)
tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq);
- __skb_unlink(skb, &tp->out_of_order_queue);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
+ tail = skb_peek_tail(&sk->sk_receive_queue);
+ eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
- if (tcp_hdr(skb)->fin)
+ if (!eaten)
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
tcp_fin(sk);
+ if (eaten)
+ kfree_skb_partial(skb, fragstolen);
}
}
@@ -4124,53 +4190,13 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
return 0;
}
-/**
- * tcp_try_coalesce - try to merge skb to prior one
- * @sk: socket
- * @to: prior buffer
- * @from: buffer to add in queue
- * @fragstolen: pointer to boolean
- *
- * Before queueing skb @from after @to, try to merge them
- * to reduce overall memory use and queue lengths, if cost is small.
- * Packets in ofo or receive queues can stay a long time.
- * Better try to coalesce them right now to avoid future collapses.
- * Returns true if caller should free @from instead of queueing it
- */
-static bool tcp_try_coalesce(struct sock *sk,
- struct sk_buff *to,
- struct sk_buff *from,
- bool *fragstolen)
-{
- int delta;
-
- *fragstolen = false;
-
- if (tcp_hdr(from)->fin)
- return false;
-
- /* Its possible this segment overlaps with prior segment in queue */
- if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
- return false;
-
- if (!skb_try_coalesce(to, from, fragstolen, &delta))
- return false;
-
- atomic_add(delta, &sk->sk_rmem_alloc);
- sk_mem_charge(sk, delta);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
- TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
- TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
- return true;
-}
-
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb1;
u32 seq, end_seq;
- TCP_ECN_check_ce(tp, skb);
+ tcp_ecn_check_ce(tp, skb);
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
@@ -4309,24 +4335,19 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
{
- struct sk_buff *skb = NULL;
- struct tcphdr *th;
+ struct sk_buff *skb;
bool fragstolen;
if (size == 0)
return 0;
- skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
+ skb = alloc_skb(size, sk->sk_allocation);
if (!skb)
goto err;
- if (tcp_try_rmem_schedule(sk, skb, size + sizeof(*th)))
+ if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
goto err_free;
- th = (struct tcphdr *)skb_put(skb, sizeof(*th));
- skb_reset_transport_header(skb);
- memset(th, 0, sizeof(*th));
-
if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
goto err_free;
@@ -4334,7 +4355,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
- if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) {
+ if (tcp_queue_rcv(sk, skb, 0, &fragstolen)) {
WARN_ON_ONCE(fragstolen); /* should not happen */
__kfree_skb(skb);
}
@@ -4348,7 +4369,6 @@ err:
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
- const struct tcphdr *th = tcp_hdr(skb);
struct tcp_sock *tp = tcp_sk(sk);
int eaten = -1;
bool fragstolen = false;
@@ -4357,9 +4377,9 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
goto drop;
skb_dst_drop(skb);
- __skb_pull(skb, th->doff * 4);
+ __skb_pull(skb, tcp_hdr(skb)->doff * 4);
- TCP_ECN_accept_cwr(tp, skb);
+ tcp_ecn_accept_cwr(tp, skb);
tp->rx_opt.dsack = 0;
@@ -4401,7 +4421,7 @@ queue_and_out:
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (skb->len)
tcp_event_data_recv(sk, skb);
- if (th->fin)
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
tcp_fin(sk);
if (!skb_queue_empty(&tp->out_of_order_queue)) {
@@ -4516,7 +4536,7 @@ restart:
* - bloated or contains data before "start" or
* overlaps to the next one.
*/
- if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
+ if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) &&
(tcp_win_from_space(skb->truesize) > skb->len ||
before(TCP_SKB_CB(skb)->seq, start))) {
end_of_skbs = false;
@@ -4535,30 +4555,18 @@ restart:
/* Decided to skip this, advance start seq. */
start = TCP_SKB_CB(skb)->end_seq;
}
- if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
+ if (end_of_skbs ||
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
return;
while (before(start, end)) {
+ int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
struct sk_buff *nskb;
- unsigned int header = skb_headroom(skb);
- int copy = SKB_MAX_ORDER(header, 0);
- /* Too big header? This can happen with IPv6. */
- if (copy < 0)
- return;
- if (end - start < copy)
- copy = end - start;
- nskb = alloc_skb(copy + header, GFP_ATOMIC);
+ nskb = alloc_skb(copy, GFP_ATOMIC);
if (!nskb)
return;
- skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head);
- skb_set_network_header(nskb, (skb_network_header(skb) -
- skb->head));
- skb_set_transport_header(nskb, (skb_transport_header(skb) -
- skb->head));
- skb_reserve(nskb, header);
- memcpy(nskb->head, skb->head, header);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
__skb_queue_before(list, skb, nskb);
@@ -4582,8 +4590,7 @@ restart:
skb = tcp_collapse_one(sk, skb, list);
if (!skb ||
skb == tail ||
- tcp_hdr(skb)->syn ||
- tcp_hdr(skb)->fin)
+ (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
return;
}
}
@@ -5453,7 +5460,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* state to ESTABLISHED..."
*/
- TCP_ECN_rcv_synack(tp, th);
+ tcp_ecn_rcv_synack(tp, th);
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
tcp_ack(sk, skb, FLAG_SLOWPATH);
@@ -5572,7 +5579,7 @@ discard:
tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
tp->max_window = tp->snd_wnd;
- TCP_ECN_rcv_syn(tp, th);
+ tcp_ecn_rcv_syn(tp, th);
tcp_mtup_init(sk);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
@@ -5902,6 +5909,40 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
#endif
}
+/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
+ *
+ * If we receive a SYN packet with these bits set, it means a
+ * network is playing bad games with TOS bits. In order to
+ * avoid possible false congestion notifications, we disable
+ * TCP ECN negociation.
+ *
+ * Exception: tcp_ca wants ECN. This is required for DCTCP
+ * congestion control; it requires setting ECT on all packets,
+ * including SYN. We inverse the test in this case: If our
+ * local socket wants ECN, but peer only set ece/cwr (but not
+ * ECT in IP header) its probably a non-DCTCP aware sender.
+ */
+static void tcp_ecn_create_request(struct request_sock *req,
+ const struct sk_buff *skb,
+ const struct sock *listen_sk)
+{
+ const struct tcphdr *th = tcp_hdr(skb);
+ const struct net *net = sock_net(listen_sk);
+ bool th_ecn = th->ece && th->cwr;
+ bool ect, need_ecn;
+
+ if (!th_ecn)
+ return;
+
+ ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
+ need_ecn = tcp_ca_needs_ecn(listen_sk);
+
+ if (!ect && !need_ecn && net->ipv4.sysctl_tcp_ecn)
+ inet_rsk(req)->ecn_ok = 1;
+ else if (ect && need_ecn)
+ inet_rsk(req)->ecn_ok = 1;
+}
+
int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb)
@@ -5910,7 +5951,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
struct request_sock *req;
struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = NULL;
- __u32 isn = TCP_SKB_CB(skb)->when;
+ __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
bool want_cookie = false, fastopen;
struct flowi fl;
struct tcp_fastopen_cookie foc = { .len = -1 };
@@ -5962,7 +6003,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
goto drop_and_free;
if (!want_cookie || tmp_opt.tstamp_ok)
- TCP_ECN_create_request(req, skb, sock_net(sk));
+ tcp_ecn_create_request(req, skb, sk);
if (want_cookie) {
isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index cd17f009aede..9ce3eac02957 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -90,7 +90,6 @@ int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_low_latency);
-
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th);
@@ -431,15 +430,16 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
break;
icsk->icsk_backoff--;
- inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
- TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
- tcp_bound_rto(sk);
+ icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
+ TCP_TIMEOUT_INIT;
+ icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
skb = tcp_write_queue_head(sk);
BUG_ON(!skb);
- remaining = icsk->icsk_rto - min(icsk->icsk_rto,
- tcp_time_stamp - TCP_SKB_CB(skb)->when);
+ remaining = icsk->icsk_rto -
+ min(icsk->icsk_rto,
+ tcp_time_stamp - tcp_skb_timestamp(skb));
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
@@ -681,8 +681,9 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
+ ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -764,8 +765,9 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
- ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
+ ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
+ ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
+ &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
}
@@ -884,18 +886,16 @@ EXPORT_SYMBOL(tcp_syn_flood_action);
*/
static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
{
- const struct ip_options *opt = &(IPCB(skb)->opt);
+ const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
struct ip_options_rcu *dopt = NULL;
if (opt && opt->optlen) {
int opt_size = sizeof(*dopt) + opt->optlen;
dopt = kmalloc(opt_size, GFP_ATOMIC);
- if (dopt) {
- if (ip_options_echo(&dopt->opt, skb)) {
- kfree(dopt);
- dopt = NULL;
- }
+ if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
+ kfree(dopt);
+ dopt = NULL;
}
}
return dopt;
@@ -1269,7 +1269,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.send_ack = tcp_v4_reqsk_send_ack,
.destructor = tcp_v4_reqsk_destructor,
.send_reset = tcp_v4_send_reset,
- .syn_ack_timeout = tcp_syn_ack_timeout,
+ .syn_ack_timeout = tcp_syn_ack_timeout,
};
static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
@@ -1429,7 +1429,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
#ifdef CONFIG_SYN_COOKIES
if (!th->syn)
- sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
+ sk = cookie_v4_check(sk, skb, &TCP_SKB_CB(skb)->header.h4.opt);
#endif
return sk;
}
@@ -1559,7 +1559,17 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;
- skb_dst_force(skb);
+ /* Before escaping RCU protected region, we need to take care of skb
+ * dst. Prequeue is only enabled for established sockets.
+ * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
+ * Instead of doing full sk_rx_dst validity here, let's perform
+ * an optimistic check.
+ */
+ if (likely(sk->sk_rx_dst))
+ skb_dst_drop(skb);
+ else
+ skb_dst_force(skb);
+
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (tp->ucopy.memory > sk->sk_rcvbuf) {
@@ -1624,11 +1634,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
th = tcp_hdr(skb);
iph = ip_hdr(skb);
+ /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
+ * barrier() makes sure compiler wont play fool^Waliasing games.
+ */
+ memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
+ sizeof(struct inet_skb_parm));
+ barrier();
+
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
- TCP_SKB_CB(skb)->when = 0;
+ TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+ TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0;
@@ -1765,9 +1783,11 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- dst_hold(dst);
- sk->sk_rx_dst = dst;
- inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+ if (dst) {
+ dst_hold(dst);
+ sk->sk_rx_dst = dst;
+ inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+ }
}
EXPORT_SYMBOL(inet_sk_rx_dst_set);
@@ -2183,7 +2203,7 @@ int tcp_seq_open(struct inode *inode, struct file *file)
s = ((struct seq_file *)file->private_data)->private;
s->family = afinfo->family;
- s->last_pos = 0;
+ s->last_pos = 0;
return 0;
}
EXPORT_SYMBOL(tcp_seq_open);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 1649988bd1b6..63d2680b65db 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -232,7 +232,7 @@ kill:
u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
if (isn == 0)
isn++;
- TCP_SKB_CB(skb)->when = isn;
+ TCP_SKB_CB(skb)->tcp_tw_isn = isn;
return TCP_TW_SYN;
}
@@ -393,8 +393,8 @@ void tcp_openreq_init_rwin(struct request_sock *req,
}
EXPORT_SYMBOL(tcp_openreq_init_rwin);
-static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
- struct request_sock *req)
+static void tcp_ecn_openreq_child(struct tcp_sock *tp,
+ const struct request_sock *req)
{
tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
}
@@ -451,9 +451,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->snd_cwnd = TCP_INIT_CWND;
newtp->snd_cwnd_cnt = 0;
- if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
- !try_module_get(newicsk->icsk_ca_ops->owner))
- newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
+ if (!try_module_get(newicsk->icsk_ca_ops->owner))
+ tcp_assign_congestion_control(newsk);
tcp_set_ca_state(newsk, TCP_CA_Open);
tcp_init_xmit_timers(newsk);
@@ -508,7 +507,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
- TCP_ECN_openreq_child(newtp, req);
+ tcp_ecn_openreq_child(newtp, req);
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index bc1b83cb8309..5b90f2f447a5 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -29,6 +29,28 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
}
}
+struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
+
+ /* Set up checksum pseudo header, usually expect stack to
+ * have done this already.
+ */
+
+ th->check = 0;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
+ }
+
+ return tcp_gso_segment(skb, features);
+}
+
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -44,9 +66,6 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
__sum16 newcheck;
bool ooo_okay, copy_destructor;
- if (!pskb_may_pull(skb, sizeof(*th)))
- goto out;
-
th = tcp_hdr(skb);
thlen = th->doff * 4;
if (thlen < sizeof(*th))
@@ -269,54 +288,16 @@ int tcp_gro_complete(struct sk_buff *skb)
}
EXPORT_SYMBOL(tcp_gro_complete);
-static int tcp_v4_gso_send_check(struct sk_buff *skb)
-{
- const struct iphdr *iph;
- struct tcphdr *th;
-
- if (!pskb_may_pull(skb, sizeof(*th)))
- return -EINVAL;
-
- iph = ip_hdr(skb);
- th = tcp_hdr(skb);
-
- th->check = 0;
- skb->ip_summed = CHECKSUM_PARTIAL;
- __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
- return 0;
-}
-
static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
{
- /* Use the IP hdr immediately proceeding for this transport */
- const struct iphdr *iph = skb_gro_network_header(skb);
- __wsum wsum;
-
/* Don't bother verifying checksum if we're going to flush anyway. */
- if (NAPI_GRO_CB(skb)->flush)
- goto skip_csum;
-
- wsum = NAPI_GRO_CB(skb)->csum;
-
- switch (skb->ip_summed) {
- case CHECKSUM_NONE:
- wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
- 0);
-
- /* fall through */
-
- case CHECKSUM_COMPLETE:
- if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
- wsum)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- }
-
+ if (!NAPI_GRO_CB(skb)->flush &&
+ skb_gro_checksum_validate(skb, IPPROTO_TCP,
+ inet_gro_compute_pseudo)) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
-skip_csum:
return tcp_gro_receive(head, skb);
}
@@ -334,8 +315,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
static const struct net_offload tcpv4_offload = {
.callbacks = {
- .gso_send_check = tcp_v4_gso_send_check,
- .gso_segment = tcp_gso_segment,
+ .gso_segment = tcp4_gso_segment,
.gro_receive = tcp4_gro_receive,
.gro_complete = tcp4_gro_complete,
},
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5a7c41fbc6d3..8d4eac793700 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -318,36 +318,47 @@ static u16 tcp_select_window(struct sock *sk)
}
/* Packet ECN state for a SYN-ACK */
-static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb)
+static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
{
+ const struct tcp_sock *tp = tcp_sk(sk);
+
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
if (!(tp->ecn_flags & TCP_ECN_OK))
TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
+ else if (tcp_ca_needs_ecn(sk))
+ INET_ECN_xmit(sk);
}
/* Packet ECN state for a SYN. */
-static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
+static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->ecn_flags = 0;
- if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
+ if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
+ tcp_ca_needs_ecn(sk)) {
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
+ if (tcp_ca_needs_ecn(sk))
+ INET_ECN_xmit(sk);
}
}
-static __inline__ void
-TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th)
+static void
+tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
+ struct sock *sk)
{
- if (inet_rsk(req)->ecn_ok)
+ if (inet_rsk(req)->ecn_ok) {
th->ece = 1;
+ if (tcp_ca_needs_ecn(sk))
+ INET_ECN_xmit(sk);
+ }
}
/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
* be sent.
*/
-static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
+static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
int tcp_header_len)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -362,7 +373,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
tcp_hdr(skb)->cwr = 1;
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
}
- } else {
+ } else if (!tcp_ca_needs_ecn(sk)) {
/* ACK or retransmitted segment: clear ECT|CE */
INET_ECN_dontxmit(sk);
}
@@ -384,7 +395,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
TCP_SKB_CB(skb)->tcp_flags = flags;
TCP_SKB_CB(skb)->sacked = 0;
- shinfo->gso_segs = 1;
+ tcp_skb_pcount_set(skb, 1);
shinfo->gso_size = 0;
shinfo->gso_type = 0;
@@ -550,7 +561,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
opts->options |= OPTION_TS;
- opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset;
+ opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
opts->tsecr = tp->rx_opt.ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
@@ -618,7 +629,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
}
if (likely(ireq->tstamp_ok)) {
opts->options |= OPTION_TS;
- opts->tsval = TCP_SKB_CB(skb)->when;
+ opts->tsval = tcp_skb_timestamp(skb);
opts->tsecr = req->ts_recent;
remaining -= TCPOLEN_TSTAMP_ALIGNED;
}
@@ -647,7 +658,6 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5)
{
- struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
struct tcp_sock *tp = tcp_sk(sk);
unsigned int size = 0;
unsigned int eff_sacks;
@@ -666,7 +676,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
if (likely(tp->rx_opt.tstamp_ok)) {
opts->options |= OPTION_TS;
- opts->tsval = tcb ? tcb->when + tp->tsoffset : 0;
+ opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
opts->tsecr = tp->rx_opt.ts_recent;
size += TCPOLEN_TSTAMP_ALIGNED;
}
@@ -886,8 +896,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb = skb_clone(skb, gfp_mask);
if (unlikely(!skb))
return -ENOBUFS;
- /* Our usage of tstamp should remain private */
- skb->tstamp.tv64 = 0;
}
inet = inet_sk(sk);
@@ -952,7 +960,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
tcp_options_write((__be32 *)(th + 1), tp, &opts);
if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
- TCP_ECN_send(sk, skb, tcp_header_size);
+ tcp_ecn_send(sk, skb, tcp_header_size);
#ifdef CONFIG_TCP_MD5SIG
/* Calculate the MD5 hash, as we have all we need now */
@@ -975,7 +983,18 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
tcp_skb_pcount(skb));
+ /* OK, its time to fill skb_shinfo(skb)->gso_segs */
+ skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
+
+ /* Our usage of tstamp should remain private */
+ skb->tstamp.tv64 = 0;
+
+ /* Cleanup our debris for IP stacks */
+ memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
+ sizeof(struct inet6_skb_parm)));
+
err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
+
if (likely(err <= 0))
return err;
@@ -995,7 +1014,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
/* Advance write_seq and place onto the write_queue. */
tp->write_seq = TCP_SKB_CB(skb)->end_seq;
- skb_header_release(skb);
+ __skb_header_release(skb);
tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize);
@@ -1014,11 +1033,11 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
/* Avoid the costly divide in the normal
* non-TSO case.
*/
- shinfo->gso_segs = 1;
+ tcp_skb_pcount_set(skb, 1);
shinfo->gso_size = 0;
shinfo->gso_type = 0;
} else {
- shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
+ tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
shinfo->gso_size = mss_now;
shinfo->gso_type = sk->sk_gso_type;
}
@@ -1146,10 +1165,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
buff->ip_summed = skb->ip_summed;
- /* Looks stupid, but our code really uses when of
- * skbs, which it never sent before. --ANK
- */
- TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
buff->tstamp = skb->tstamp;
tcp_fragment_tstamp(skb, buff);
@@ -1171,7 +1186,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
}
/* Link BUFF into the send queue. */
- skb_header_release(buff);
+ __skb_header_release(buff);
tcp_insert_write_queue_after(skb, buff, sk);
return 0;
@@ -1675,7 +1690,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
tcp_set_skb_tso_segs(sk, buff, mss_now);
/* Link BUFF into the send queue. */
- skb_header_release(buff);
+ __skb_header_release(buff);
tcp_insert_write_queue_after(skb, buff, sk);
return 0;
@@ -1874,8 +1889,8 @@ static int tcp_mtu_probe(struct sock *sk)
tcp_init_tso_segs(sk, nskb, nskb->len);
/* We're ready to send. If this fails, the probe will
- * be resegmented into mss-sized pieces by tcp_write_xmit(). */
- TCP_SKB_CB(nskb)->when = tcp_time_stamp;
+ * be resegmented into mss-sized pieces by tcp_write_xmit().
+ */
if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
/* Decrement cwnd here because we are sending
* effectively two packets. */
@@ -1935,8 +1950,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
BUG_ON(!tso_segs);
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
- /* "when" is used as a start point for the retransmit timer */
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
+ /* "skb_mstamp" is used as a start point for the retransmit timer */
+ skb_mstamp_get(&skb->skb_mstamp);
goto repair; /* Skip network transmission */
}
@@ -2000,8 +2015,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
-
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
@@ -2097,10 +2110,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
static bool skb_still_in_host_queue(const struct sock *sk,
const struct sk_buff *skb)
{
- const struct sk_buff *fclone = skb + 1;
-
- if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
- fclone->fclone == SKB_FCLONE_CLONE)) {
+ if (unlikely(skb_fclone_busy(skb))) {
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
return true;
@@ -2499,7 +2509,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Make a copy, if the first transmission SKB clone we made
* is still in somebody's hands, else make a clone.
*/
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
/* make sure skb->data is aligned on arches that require it
* and check if ack-trimming & collapsing extended the headroom
@@ -2544,7 +2553,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
/* Save stamp of the first retransmit. */
if (!tp->retrans_stamp)
- tp->retrans_stamp = TCP_SKB_CB(skb)->when;
+ tp->retrans_stamp = tcp_skb_timestamp(skb);
/* snd_nxt is stored to detect loss of retransmitted segment,
* see tcp_input.c tcp_sacktag_write_queue().
@@ -2752,7 +2761,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST);
/* Send it off. */
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
@@ -2780,7 +2788,7 @@ int tcp_send_synack(struct sock *sk)
if (nskb == NULL)
return -ENOMEM;
tcp_unlink_write_queue(skb, sk);
- skb_header_release(nskb);
+ __skb_header_release(nskb);
__tcp_add_write_queue_head(sk, nskb);
sk_wmem_free_skb(sk, skb);
sk->sk_wmem_queued += nskb->truesize;
@@ -2789,9 +2797,8 @@ int tcp_send_synack(struct sock *sk)
}
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
- TCP_ECN_send_synack(tcp_sk(sk), skb);
+ tcp_ecn_send_synack(sk, skb);
}
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
@@ -2835,10 +2842,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
- TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
+ skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
else
#endif
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
+ skb_mstamp_get(&skb->skb_mstamp);
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
foc) + sizeof(*th);
@@ -2849,7 +2856,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
memset(th, 0, sizeof(struct tcphdr));
th->syn = 1;
th->ack = 1;
- TCP_ECN_make_synack(req, th);
+ tcp_ecn_make_synack(req, th, sk);
th->source = htons(ireq->ir_num);
th->dest = ireq->ir_rmt_port;
/* Setting of flags are superfluous here for callers (and ECE is
@@ -2956,7 +2963,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
tcb->end_seq += skb->len;
- skb_header_release(skb);
+ __skb_header_release(skb);
__tcp_add_write_queue_tail(sk, skb);
sk->sk_wmem_queued += skb->truesize;
sk_mem_charge(sk, skb->truesize);
@@ -3086,9 +3093,9 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER);
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
- tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
+ tp->retrans_stamp = tcp_time_stamp;
tcp_connect_queue_skb(sk, buff);
- TCP_ECN_send_syn(sk, buff);
+ tcp_ecn_send_syn(sk, buff);
/* Send off SYN; include data in Fast Open. */
err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
@@ -3120,6 +3127,8 @@ void tcp_send_delayed_ack(struct sock *sk)
int ato = icsk->icsk_ack.ato;
unsigned long timeout;
+ tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
+
if (ato > TCP_DELACK_MIN) {
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ / 2;
@@ -3176,6 +3185,8 @@ void tcp_send_ack(struct sock *sk)
if (sk->sk_state == TCP_CLOSE)
return;
+ tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
+
/* We are not putting this on the write queue, so
* tcp_transmit_skb() will set the ownership to this
* sock.
@@ -3194,9 +3205,10 @@ void tcp_send_ack(struct sock *sk)
tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
/* Send it off, this clears delayed acks for us. */
- TCP_SKB_CB(buff)->when = tcp_time_stamp;
+ skb_mstamp_get(&buff->skb_mstamp);
tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
}
+EXPORT_SYMBOL_GPL(tcp_send_ack);
/* This routine sends a packet with an out of date sequence
* number. It assumes the other end will try to ack it.
@@ -3226,7 +3238,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
* send it.
*/
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
+ skb_mstamp_get(&skb->skb_mstamp);
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
@@ -3270,7 +3282,6 @@ int tcp_write_wakeup(struct sock *sk)
tcp_set_skb_tso_segs(sk, skb, mss);
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err)
tcp_event_new_data_sent(sk, skb);
@@ -3289,6 +3300,7 @@ void tcp_send_probe0(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long probe_max;
int err;
err = tcp_write_wakeup(sk);
@@ -3304,9 +3316,7 @@ void tcp_send_probe0(struct sock *sk)
if (icsk->icsk_backoff < sysctl_tcp_retries2)
icsk->icsk_backoff++;
icsk->icsk_probes_out++;
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ probe_max = TCP_RTO_MAX;
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember icsk_probes_out.
@@ -3316,11 +3326,11 @@ void tcp_send_probe0(struct sock *sk)
*/
if (!icsk->icsk_probes_out)
icsk->icsk_probes_out = 1;
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff,
- TCP_RESOURCE_PROBE_INTERVAL),
- TCP_RTO_MAX);
+ probe_max = TCP_RESOURCE_PROBE_INTERVAL;
}
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
+ inet_csk_rto_backoff(icsk, probe_max),
+ TCP_RTO_MAX);
}
int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 3b66610d4156..ebf5ff57526e 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -83,7 +83,6 @@ static struct {
struct tcp_log *log;
} tcp_probe;
-
static inline int tcp_probe_used(void)
{
return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1);
@@ -101,7 +100,6 @@ static inline int tcp_probe_avail(void)
si4.sin_addr.s_addr = inet->inet_##mem##addr; \
} while (0) \
-
/*
* Hook inserted to be called before each receive packet.
* Note: arguments must match tcp_rcv_established()!
@@ -194,8 +192,8 @@ static int tcpprobe_sprint(char *tbuf, int n)
return scnprintf(tbuf, n,
"%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
- (unsigned long) tv.tv_sec,
- (unsigned long) tv.tv_nsec,
+ (unsigned long)tv.tv_sec,
+ (unsigned long)tv.tv_nsec,
&p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
}
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 8250949b8853..6824afb65d93 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -31,10 +31,10 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
static u32 tcp_scalable_ssthresh(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+
return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
}
-
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
.ssthresh = tcp_scalable_ssthresh,
.cong_avoid = tcp_scalable_cong_avoid,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index df90cd1ce37f..9b21ae8b2e31 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -52,7 +52,7 @@ static void tcp_write_err(struct sock *sk)
* limit.
* 2. If we have strong memory pressure.
*/
-static int tcp_out_of_resources(struct sock *sk, int do_reset)
+static int tcp_out_of_resources(struct sock *sk, bool do_reset)
{
struct tcp_sock *tp = tcp_sk(sk);
int shift = 0;
@@ -72,7 +72,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
/* 2. Window is closed. */
(!tp->snd_wnd && !tp->packets_out))
- do_reset = 1;
+ do_reset = true;
if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk);
@@ -135,10 +135,9 @@ static bool retransmits_timed_out(struct sock *sk,
if (!inet_csk(sk)->icsk_retransmits)
return false;
- if (unlikely(!tcp_sk(sk)->retrans_stamp))
- start_ts = TCP_SKB_CB(tcp_write_queue_head(sk))->when;
- else
- start_ts = tcp_sk(sk)->retrans_stamp;
+ start_ts = tcp_sk(sk)->retrans_stamp;
+ if (unlikely(!start_ts))
+ start_ts = tcp_skb_timestamp(tcp_write_queue_head(sk));
if (likely(timeout == 0)) {
linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
@@ -181,7 +180,7 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
- const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
+ const int alive = icsk->icsk_rto < TCP_RTO_MAX;
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
@@ -271,40 +270,41 @@ static void tcp_probe_timer(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int max_probes;
+ u32 start_ts;
if (tp->packets_out || !tcp_send_head(sk)) {
icsk->icsk_probes_out = 0;
return;
}
- /* *WARNING* RFC 1122 forbids this
- *
- * It doesn't AFAIK, because we kill the retransmit timer -AK
- *
- * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
- * this behaviour in Solaris down as a bug fix. [AC]
- *
- * Let me to explain. icsk_probes_out is zeroed by incoming ACKs
- * even if they advertise zero window. Hence, connection is killed only
- * if we received no ACKs for normal connection timeout. It is not killed
- * only because window stays zero for some time, window may be zero
- * until armageddon and even later. We are in full accordance
- * with RFCs, only probe timer combines both retransmission timeout
- * and probe timeout in one bottle. --ANK
+ /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
+ * long as the receiver continues to respond probes. We support this by
+ * default and reset icsk_probes_out with incoming ACKs. But if the
+ * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
+ * kill the socket when the retry count and the time exceeds the
+ * corresponding system limit. We also implement similar policy when
+ * we use RTO to probe window in tcp_retransmit_timer().
*/
- max_probes = sysctl_tcp_retries2;
+ start_ts = tcp_skb_timestamp(tcp_send_head(sk));
+ if (!start_ts)
+ skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp);
+ else if (icsk->icsk_user_timeout &&
+ (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
+ goto abort;
+ max_probes = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
- const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
+ const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
max_probes = tcp_orphan_retries(sk, alive);
-
- if (tcp_out_of_resources(sk, alive || icsk->icsk_probes_out <= max_probes))
+ if (!alive && icsk->icsk_backoff >= max_probes)
+ goto abort;
+ if (tcp_out_of_resources(sk, true))
return;
}
if (icsk->icsk_probes_out > max_probes) {
- tcp_write_err(sk);
+abort: tcp_write_err(sk);
} else {
/* Only send another probe if we didn't close things up. */
tcp_send_probe0(sk);
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index b40ad897f945..a6afde666ab1 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -51,7 +51,6 @@ MODULE_PARM_DESC(beta, "upper bound of packets in network");
module_param(gamma, int, 0644);
MODULE_PARM_DESC(gamma, "limit on increase (scale by 2)");
-
/* There are several situations when we must "re-start" Vegas:
*
* o when a connection is established
@@ -133,7 +132,6 @@ EXPORT_SYMBOL_GPL(tcp_vegas_pkts_acked);
void tcp_vegas_state(struct sock *sk, u8 ca_state)
{
-
if (ca_state == TCP_CA_Open)
vegas_enable(sk);
else
@@ -285,7 +283,6 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
/* Use normal slow start */
else if (tp->snd_cwnd <= tp->snd_ssthresh)
tcp_slow_start(tp, acked);
-
}
/* Extract info for Tcp socket info provided via netlink. */
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 8276977d2c85..a4d2d2d88dca 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -175,7 +175,6 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
} else
tp->snd_cwnd_cnt++;
}
-
}
if (tp->snd_cwnd < 2)
tp->snd_cwnd = 2;
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index b94a04ae2ed5..bb63fba47d47 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -42,7 +42,6 @@ struct westwood {
u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/
};
-
/* TCP Westwood functions and constants */
#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
@@ -153,7 +152,6 @@ static inline void update_rtt_min(struct westwood *w)
w->rtt_min = min(w->rtt, w->rtt_min);
}
-
/*
* @westwood_fast_bw
* It is called when we are in fast path. In particular it is called when
@@ -208,7 +206,6 @@ static inline u32 westwood_acked_count(struct sock *sk)
return w->cumul_ack;
}
-
/*
* TCP Westwood
* Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
@@ -219,47 +216,51 @@ static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct westwood *w = inet_csk_ca(sk);
+
return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
}
+static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
+{
+ if (ack_flags & CA_ACK_SLOWPATH) {
+ struct westwood *w = inet_csk_ca(sk);
+
+ westwood_update_window(sk);
+ w->bk += westwood_acked_count(sk);
+
+ update_rtt_min(w);
+ return;
+ }
+
+ westwood_fast_bw(sk);
+}
+
static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
{
struct tcp_sock *tp = tcp_sk(sk);
struct westwood *w = inet_csk_ca(sk);
switch (event) {
- case CA_EVENT_FAST_ACK:
- westwood_fast_bw(sk);
- break;
-
case CA_EVENT_COMPLETE_CWR:
tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
break;
-
case CA_EVENT_LOSS:
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
/* Update RTT_min when next ack arrives */
w->reset_rtt_min = 1;
break;
-
- case CA_EVENT_SLOW_ACK:
- westwood_update_window(sk);
- w->bk += westwood_acked_count(sk);
- update_rtt_min(w);
- break;
-
default:
/* don't care */
break;
}
}
-
/* Extract info for Tcp socket info provided via netlink. */
static void tcp_westwood_info(struct sock *sk, u32 ext,
struct sk_buff *skb)
{
const struct westwood *ca = inet_csk_ca(sk);
+
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
struct tcpvegas_info info = {
.tcpv_enabled = 1,
@@ -271,12 +272,12 @@ static void tcp_westwood_info(struct sock *sk, u32 ext,
}
}
-
static struct tcp_congestion_ops tcp_westwood __read_mostly = {
.init = tcp_westwood_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
.cwnd_event = tcp_westwood_event,
+ .in_ack_event = tcp_westwood_ack,
.get_info = tcp_westwood_info,
.pkts_acked = tcp_westwood_pkts_acked,
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 599b79b8eac0..cd7273218598 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -54,10 +54,8 @@ static void tcp_yeah_init(struct sock *sk)
/* Ensure the MD arithmetic works. This is somewhat pedantic,
* since I don't think we will see a cwnd this large. :) */
tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
-
}
-
static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -84,7 +82,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
/* Scalable */
tp->snd_cwnd_cnt += yeah->pkts_acked;
- if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
+ if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)) {
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
tp->snd_cwnd++;
tp->snd_cwnd_cnt = 0;
@@ -120,7 +118,6 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
*/
if (after(ack, yeah->vegas.beg_snd_nxt)) {
-
/* We do the Vegas calculations only if we got enough RTT
* samples that we can be reasonably sure that we got
* at least one RTT sample that wasn't from a delayed ACK.
@@ -189,7 +186,6 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
}
yeah->lastQ = queue;
-
}
/* Save the extent of the current window so we can use this
@@ -205,7 +201,8 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
}
}
-static u32 tcp_yeah_ssthresh(struct sock *sk) {
+static u32 tcp_yeah_ssthresh(struct sock *sk)
+{
const struct tcp_sock *tp = tcp_sk(sk);
struct yeah *yeah = inet_csk_ca(sk);
u32 reduction;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f57c0e4c2326..cd0db5471bb5 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -99,6 +99,7 @@
#include <linux/slab.h>
#include <net/tcp_states.h>
#include <linux/skbuff.h>
+#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/net_namespace.h>
@@ -224,7 +225,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
remaining = (high - low) + 1;
rand = prandom_u32();
- first = (((u64)rand * remaining) >> 32) + low;
+ first = reciprocal_scale(rand, remaining) + low;
/*
* force rand to be an odd multiple of UDP_HTABLE_SIZE
*/
@@ -448,7 +449,7 @@ begin:
}
} else if (score == badness && reuseport) {
matches++;
- if (((u64)hash * matches) >> 32 == 0)
+ if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
@@ -529,7 +530,7 @@ begin:
}
} else if (score == badness && reuseport) {
matches++;
- if (((u64)hash * matches) >> 32 == 0)
+ if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
@@ -1787,6 +1788,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (sk != NULL) {
int ret;
+ if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
+ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ inet_compute_pseudo);
+
ret = udp_queue_rcv_skb(sk, skb);
sock_put(sk);
@@ -1967,7 +1972,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
return;
skb->sk = sk;
- skb->destructor = sock_edemux;
+ skb->destructor = sock_efree;
dst = sk->sk_rx_dst;
if (dst)
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 59035bc3008d..507310ef4b56 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -25,30 +25,11 @@ struct udp_offload_priv {
struct udp_offload_priv __rcu *next;
};
-static int udp4_ufo_send_check(struct sk_buff *skb)
-{
- if (!pskb_may_pull(skb, sizeof(struct udphdr)))
- return -EINVAL;
-
- if (likely(!skb->encapsulation)) {
- const struct iphdr *iph;
- struct udphdr *uh;
-
- iph = ip_hdr(skb);
- uh = udp_hdr(skb);
-
- uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
- IPPROTO_UDP, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- skb->ip_summed = CHECKSUM_PARTIAL;
- }
-
- return 0;
-}
-
-struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features)
+static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
+ netdev_features_t features,
+ struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
+ netdev_features_t features),
+ __be16 new_protocol)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
u16 mac_offset = skb->mac_header;
@@ -70,7 +51,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb));
skb->mac_len = skb_inner_network_offset(skb);
- skb->protocol = htons(ETH_P_TEB);
+ skb->protocol = new_protocol;
need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
if (need_csum)
@@ -78,7 +59,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
- segs = skb_mac_gso_segment(skb, enc_features);
+ segs = gso_inner_segment(skb, enc_features);
if (IS_ERR_OR_NULL(segs)) {
skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
mac_len);
@@ -123,21 +104,63 @@ out:
return segs;
}
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+ netdev_features_t features,
+ bool is_ipv6)
+{
+ __be16 protocol = skb->protocol;
+ const struct net_offload **offloads;
+ const struct net_offload *ops;
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
+ netdev_features_t features);
+
+ rcu_read_lock();
+
+ switch (skb->inner_protocol_type) {
+ case ENCAP_TYPE_ETHER:
+ protocol = skb->inner_protocol;
+ gso_inner_segment = skb_mac_gso_segment;
+ break;
+ case ENCAP_TYPE_IPPROTO:
+ offloads = is_ipv6 ? inet6_offloads : inet_offloads;
+ ops = rcu_dereference(offloads[skb->inner_ipproto]);
+ if (!ops || !ops->callbacks.gso_segment)
+ goto out_unlock;
+ gso_inner_segment = ops->callbacks.gso_segment;
+ break;
+ default:
+ goto out_unlock;
+ }
+
+ segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
+ protocol);
+
+out_unlock:
+ rcu_read_unlock();
+
+ return segs;
+}
+
static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
- int offset;
__wsum csum;
+ struct udphdr *uh;
+ struct iphdr *iph;
if (skb->encapsulation &&
(skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
- segs = skb_udp_tunnel_segment(skb, features);
+ segs = skb_udp_tunnel_segment(skb, features, false);
goto out;
}
+ if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+ goto out;
+
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
goto out;
@@ -165,10 +188,16 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
* HW cannot do checksum of UDP packets sent as multiple
* IP fragments.
*/
- offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
- offset += skb->csum_offset;
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+
+ uh = udp_hdr(skb);
+ iph = ip_hdr(skb);
+
+ uh->check = 0;
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
skb->ip_summed = CHECKSUM_NONE;
/* Fragment the skb. IP headers of the fragments are updated in
@@ -228,30 +257,24 @@ unlock:
}
EXPORT_SYMBOL(udp_del_offload);
-static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+ struct udphdr *uh)
{
struct udp_offload_priv *uo_priv;
struct sk_buff *p, **pp = NULL;
- struct udphdr *uh, *uh2;
- unsigned int hlen, off;
+ struct udphdr *uh2;
+ unsigned int off = skb_gro_offset(skb);
int flush = 1;
if (NAPI_GRO_CB(skb)->udp_mark ||
- (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
+ (skb->ip_summed != CHECKSUM_PARTIAL &&
+ NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ !NAPI_GRO_CB(skb)->csum_valid))
goto out;
/* mark that this skb passed once through the udp gro layer */
NAPI_GRO_CB(skb)->udp_mark = 1;
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*uh);
- uh = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- uh = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!uh))
- goto out;
- }
-
rcu_read_lock();
uo_priv = rcu_dereference(udp_offload_base);
for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
@@ -269,7 +292,12 @@ unflush:
continue;
uh2 = (struct udphdr *)(p->data + off);
- if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
+
+ /* Match ports and either checksums are either both zero
+ * or nonzero.
+ */
+ if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
+ (!uh->check ^ !uh2->check)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
@@ -277,6 +305,7 @@ unflush:
skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
+ NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
pp = uo_priv->offload->callbacks.gro_receive(head, skb);
out_unlock:
@@ -286,7 +315,34 @@ out:
return pp;
}
-static int udp_gro_complete(struct sk_buff *skb, int nhoff)
+static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_gro_udphdr(skb);
+
+ if (unlikely(!uh))
+ goto flush;
+
+ /* Don't bother verifying checksum if we're going to flush anyway. */
+ if (NAPI_GRO_CB(skb)->flush)
+ goto skip;
+
+ if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
+ inet_gro_compute_pseudo))
+ goto flush;
+ else if (uh->check)
+ skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ inet_gro_compute_pseudo);
+skip:
+ NAPI_GRO_CB(skb)->is_ipv6 = 0;
+ return udp_gro_receive(head, skb, uh);
+
+flush:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+}
+
+int udp_gro_complete(struct sk_buff *skb, int nhoff)
{
struct udp_offload_priv *uo_priv;
__be16 newlen = htons(skb->len - nhoff);
@@ -304,19 +360,32 @@ static int udp_gro_complete(struct sk_buff *skb, int nhoff)
break;
}
- if (uo_priv != NULL)
+ if (uo_priv != NULL) {
+ NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr));
+ }
rcu_read_unlock();
return err;
}
+static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
+
+ if (uh->check)
+ uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
+ iph->daddr, 0);
+
+ return udp_gro_complete(skb, nhoff);
+}
+
static const struct net_offload udpv4_offload = {
.callbacks = {
- .gso_send_check = udp4_ufo_send_check,
.gso_segment = udp4_ufo_fragment,
- .gro_receive = udp_gro_receive,
- .gro_complete = udp_gro_complete,
+ .gro_receive = udp4_gro_receive,
+ .gro_complete = udp4_gro_complete,
},
};
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
index 61ec1a65207e..1671263e5fa0 100644
--- a/net/ipv4/udp_tunnel.c
+++ b/net/ipv4/udp_tunnel.c
@@ -8,83 +8,40 @@
#include <net/udp_tunnel.h>
#include <net/net_namespace.h>
-int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
- struct socket **sockp)
+int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp)
{
- int err = -EINVAL;
+ int err;
struct socket *sock = NULL;
+ struct sockaddr_in udp_addr;
-#if IS_ENABLED(CONFIG_IPV6)
- if (cfg->family == AF_INET6) {
- struct sockaddr_in6 udp6_addr;
+ err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
+ if (err < 0)
+ goto error;
- err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
- if (err < 0)
- goto error;
-
- sk_change_net(sock->sk, net);
-
- udp6_addr.sin6_family = AF_INET6;
- memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
- sizeof(udp6_addr.sin6_addr));
- udp6_addr.sin6_port = cfg->local_udp_port;
- err = kernel_bind(sock, (struct sockaddr *)&udp6_addr,
- sizeof(udp6_addr));
- if (err < 0)
- goto error;
-
- if (cfg->peer_udp_port) {
- udp6_addr.sin6_family = AF_INET6;
- memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
- sizeof(udp6_addr.sin6_addr));
- udp6_addr.sin6_port = cfg->peer_udp_port;
- err = kernel_connect(sock,
- (struct sockaddr *)&udp6_addr,
- sizeof(udp6_addr), 0);
- }
- if (err < 0)
- goto error;
+ sk_change_net(sock->sk, net);
- udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums);
- udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums);
- } else
-#endif
- if (cfg->family == AF_INET) {
- struct sockaddr_in udp_addr;
-
- err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
- if (err < 0)
- goto error;
-
- sk_change_net(sock->sk, net);
+ udp_addr.sin_family = AF_INET;
+ udp_addr.sin_addr = cfg->local_ip;
+ udp_addr.sin_port = cfg->local_udp_port;
+ err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
+ sizeof(udp_addr));
+ if (err < 0)
+ goto error;
+ if (cfg->peer_udp_port) {
udp_addr.sin_family = AF_INET;
- udp_addr.sin_addr = cfg->local_ip;
- udp_addr.sin_port = cfg->local_udp_port;
- err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
- sizeof(udp_addr));
+ udp_addr.sin_addr = cfg->peer_ip;
+ udp_addr.sin_port = cfg->peer_udp_port;
+ err = kernel_connect(sock, (struct sockaddr *)&udp_addr,
+ sizeof(udp_addr), 0);
if (err < 0)
goto error;
-
- if (cfg->peer_udp_port) {
- udp_addr.sin_family = AF_INET;
- udp_addr.sin_addr = cfg->peer_ip;
- udp_addr.sin_port = cfg->peer_udp_port;
- err = kernel_connect(sock,
- (struct sockaddr *)&udp_addr,
- sizeof(udp_addr), 0);
- if (err < 0)
- goto error;
- }
-
- sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
- } else {
- return -EPFNOSUPPORT;
}
+ sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
*sockp = sock;
-
return 0;
error:
@@ -95,6 +52,57 @@ error:
*sockp = NULL;
return err;
}
-EXPORT_SYMBOL(udp_sock_create);
+EXPORT_SYMBOL(udp_sock_create4);
+
+void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
+ struct udp_tunnel_sock_cfg *cfg)
+{
+ struct sock *sk = sock->sk;
+
+ /* Disable multicast loopback */
+ inet_sk(sk)->mc_loop = 0;
+
+ /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
+ udp_set_convert_csum(sk, true);
+
+ rcu_assign_sk_user_data(sk, cfg->sk_user_data);
+
+ udp_sk(sk)->encap_type = cfg->encap_type;
+ udp_sk(sk)->encap_rcv = cfg->encap_rcv;
+ udp_sk(sk)->encap_destroy = cfg->encap_destroy;
+
+ udp_tunnel_encap_enable(sock);
+}
+EXPORT_SYMBOL_GPL(setup_udp_tunnel_sock);
+
+int udp_tunnel_xmit_skb(struct socket *sock, struct rtable *rt,
+ struct sk_buff *skb, __be32 src, __be32 dst,
+ __u8 tos, __u8 ttl, __be16 df, __be16 src_port,
+ __be16 dst_port, bool xnet)
+{
+ struct udphdr *uh;
+
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ uh->dest = dst_port;
+ uh->source = src_port;
+ uh->len = htons(skb->len);
+
+ udp_set_csum(sock->sk->sk_no_check_tx, skb, src, dst, skb->len);
+
+ return iptunnel_xmit(sock->sk, rt, skb, src, dst, IPPROTO_UDP,
+ tos, ttl, df, xnet);
+}
+EXPORT_SYMBOL_GPL(udp_tunnel_xmit_skb);
+
+void udp_tunnel_sock_release(struct socket *sock)
+{
+ rcu_assign_sk_user_data(sock->sk, NULL);
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sk_release_kernel(sock->sk);
+}
+EXPORT_SYMBOL_GPL(udp_tunnel_sock_release);
MODULE_LICENSE("GPL");
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 2fe68364bb20..2e8c06108ab9 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -45,3 +45,7 @@ obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o ip6_icmp.o
obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
+
+ifneq ($(CONFIG_IPV6),)
+obj-$(CONFIG_NET_UDP_TUNNEL) += ip6_udp_tunnel.o
+endif
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3e118dfddd02..725c763270a0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -180,7 +180,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
.rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
- .use_tempaddr = 0,
+ .use_tempaddr = 0,
.temp_valid_lft = TEMP_VALID_LIFETIME,
.temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
.regen_max_retry = REGEN_MAX_RETRY,
@@ -1105,8 +1105,8 @@ retry:
spin_unlock_bh(&ifp->lock);
regen_advance = idev->cnf.regen_max_retry *
- idev->cnf.dad_transmits *
- NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
+ idev->cnf.dad_transmits *
+ NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
write_unlock_bh(&idev->lock);
/* A temporary address is created only if this calculated Preferred
@@ -1725,7 +1725,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
return;
- ipv6_dev_ac_inc(ifp->idev->dev, &addr);
+ __ipv6_dev_ac_inc(ifp->idev, &addr);
}
/* caller must hold RTNL */
@@ -2844,6 +2844,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
if (dev->flags & IFF_SLAVE)
break;
+ if (idev && idev->cnf.disable_ipv6)
+ break;
+
if (event == NETDEV_UP) {
if (!addrconf_qdisc_ok(dev)) {
/* device is not ready yet. */
@@ -3030,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
struct hlist_head *h = &inet6_addr_lst[i];
spin_lock_bh(&addrconf_hash_lock);
- restart:
+restart:
hlist_for_each_entry_rcu(ifa, h, addr_lst) {
if (ifa->idev == idev) {
hlist_del_init_rcu(&ifa->addr_lst);
@@ -3544,8 +3547,8 @@ static void __net_exit if6_proc_net_exit(struct net *net)
}
static struct pernet_operations if6_proc_net_ops = {
- .init = if6_proc_net_init,
- .exit = if6_proc_net_exit,
+ .init = if6_proc_net_init,
+ .exit = if6_proc_net_exit,
};
int __init if6_proc_init(void)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 2daa3a133e49..e8c4400f23e9 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -7,15 +7,15 @@
*
* Adapted from linux/net/ipv4/af_inet.c
*
- * Fixes:
+ * Fixes:
* piggy, Karl Knutson : Socket protocol table
- * Hideaki YOSHIFUJI : sin6_scope_id support
- * Arnaldo Melo : check proc_net_create return, cleanups
+ * Hideaki YOSHIFUJI : sin6_scope_id support
+ * Arnaldo Melo : check proc_net_create return, cleanups
*
* This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "IPv6: " fmt
@@ -302,7 +302,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Reproduce AF_INET checks to make the bindings consistent */
v4addr = addr->sin6_addr.s6_addr32[3];
chk_addr_ret = inet_addr_type(net, v4addr);
- if (!sysctl_ip_nonlocal_bind &&
+ if (!net->ipv4.sysctl_ip_nonlocal_bind &&
!(inet->freebind || inet->transparent) &&
v4addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
@@ -672,10 +672,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
}
EXPORT_SYMBOL_GPL(inet6_sk_rebuild_header);
-bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb)
+bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb,
+ const struct inet6_skb_parm *opt)
{
const struct ipv6_pinfo *np = inet6_sk(sk);
- const struct inet6_skb_parm *opt = IP6CB(skb);
if (np->rxopt.all) {
if ((opt->hop && (np->rxopt.bits.hopopts ||
@@ -766,7 +766,7 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.icmpv6_time = 1*HZ;
net->ipv6.sysctl.flowlabel_consistency = 1;
net->ipv6.sysctl.auto_flowlabels = 0;
- atomic_set(&net->ipv6.rt_genid, 0);
+ atomic_set(&net->ipv6.fib6_sernum, 1);
err = ipv6_init_mibs(net);
if (err)
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 72a4930bdc0a..6d16eb0e0c7f 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -17,10 +17,10 @@
* Authors
*
* Mitsuru KANDA @USAGI : IPv6 Support
- * Kazunori MIYAZAWA @USAGI :
- * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
+ * Kazunori MIYAZAWA @USAGI :
+ * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
*
- * This file is derived from net/ipv4/ah.c.
+ * This file is derived from net/ipv4/ah.c.
*/
#define pr_fmt(fmt) "IPv6: " fmt
@@ -284,7 +284,7 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
ipv6_rearrange_rthdr(iph, exthdr.rth);
break;
- default :
+ default:
return 0;
}
@@ -478,7 +478,7 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
auth_data = ah_tmp_auth(work_iph, hdr_len);
icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
- err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
+ err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
if (err)
goto out;
@@ -622,7 +622,7 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
goto out_free;
}
- err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
+ err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
if (err)
goto out_free;
@@ -647,8 +647,8 @@ static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct net *net = dev_net(skb->dev);
- struct ipv6hdr *iph = (struct ipv6hdr*)skb->data;
- struct ip_auth_hdr *ah = (struct ip_auth_hdr*)(skb->data+offset);
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+ struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
struct xfrm_state *x;
if (type != ICMPV6_PKT_TOOBIG &&
@@ -713,8 +713,6 @@ static int ah6_init_state(struct xfrm_state *x)
ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
- BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
-
x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
ahp->icv_trunc_len);
switch (x->props.mode) {
@@ -755,11 +753,10 @@ static int ah6_rcv_cb(struct sk_buff *skb, int err)
return 0;
}
-static const struct xfrm_type ah6_type =
-{
+static const struct xfrm_type ah6_type = {
.description = "AH6",
.owner = THIS_MODULE,
- .proto = IPPROTO_AH,
+ .proto = IPPROTO_AH,
.flags = XFRM_TYPE_REPLAY_PROT,
.init_state = ah6_init_state,
.destructor = ah6_destroy,
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 9a386842fd62..f5e319a8d4e2 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -46,10 +46,6 @@
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
-/* Big ac list lock for all the sockets */
-static DEFINE_SPINLOCK(ipv6_sk_ac_lock);
-
-
/*
* socket join an anycast group
*/
@@ -78,7 +74,6 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
pac->acl_addr = *addr;
rtnl_lock();
- rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
@@ -91,11 +86,11 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
goto error;
} else {
/* router, no matching interface: just pick one */
- dev = dev_get_by_flags_rcu(net, IFF_UP,
- IFF_UP | IFF_LOOPBACK);
+ dev = __dev_get_by_flags(net, IFF_UP,
+ IFF_UP | IFF_LOOPBACK);
}
} else
- dev = dev_get_by_index_rcu(net, ifindex);
+ dev = __dev_get_by_index(net, ifindex);
if (dev == NULL) {
err = -ENODEV;
@@ -127,17 +122,14 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
goto error;
}
- err = ipv6_dev_ac_inc(dev, addr);
+ err = __ipv6_dev_ac_inc(idev, addr);
if (!err) {
- spin_lock_bh(&ipv6_sk_ac_lock);
pac->acl_next = np->ipv6_ac_list;
np->ipv6_ac_list = pac;
- spin_unlock_bh(&ipv6_sk_ac_lock);
pac = NULL;
}
error:
- rcu_read_unlock();
rtnl_unlock();
if (pac)
sock_kfree_s(sk, pac, sizeof(*pac));
@@ -154,7 +146,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
struct ipv6_ac_socklist *pac, *prev_pac;
struct net *net = sock_net(sk);
- spin_lock_bh(&ipv6_sk_ac_lock);
+ rtnl_lock();
prev_pac = NULL;
for (pac = np->ipv6_ac_list; pac; pac = pac->acl_next) {
if ((ifindex == 0 || pac->acl_ifindex == ifindex) &&
@@ -163,7 +155,7 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
prev_pac = pac;
}
if (!pac) {
- spin_unlock_bh(&ipv6_sk_ac_lock);
+ rtnl_unlock();
return -ENOENT;
}
if (prev_pac)
@@ -171,14 +163,9 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
else
np->ipv6_ac_list = pac->acl_next;
- spin_unlock_bh(&ipv6_sk_ac_lock);
-
- rtnl_lock();
- rcu_read_lock();
- dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
+ dev = __dev_get_by_index(net, pac->acl_ifindex);
if (dev)
ipv6_dev_ac_dec(dev, &pac->acl_addr);
- rcu_read_unlock();
rtnl_unlock();
sock_kfree_s(sk, pac, sizeof(*pac));
@@ -196,19 +183,16 @@ void ipv6_sock_ac_close(struct sock *sk)
if (!np->ipv6_ac_list)
return;
- spin_lock_bh(&ipv6_sk_ac_lock);
+ rtnl_lock();
pac = np->ipv6_ac_list;
np->ipv6_ac_list = NULL;
- spin_unlock_bh(&ipv6_sk_ac_lock);
prev_index = 0;
- rtnl_lock();
- rcu_read_lock();
while (pac) {
struct ipv6_ac_socklist *next = pac->acl_next;
if (pac->acl_ifindex != prev_index) {
- dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
+ dev = __dev_get_by_index(net, pac->acl_ifindex);
prev_index = pac->acl_ifindex;
}
if (dev)
@@ -216,10 +200,14 @@ void ipv6_sock_ac_close(struct sock *sk)
sock_kfree_s(sk, pac, sizeof(*pac));
pac = next;
}
- rcu_read_unlock();
rtnl_unlock();
}
+static void aca_get(struct ifacaddr6 *aca)
+{
+ atomic_inc(&aca->aca_refcnt);
+}
+
static void aca_put(struct ifacaddr6 *ac)
{
if (atomic_dec_and_test(&ac->aca_refcnt)) {
@@ -229,23 +217,40 @@ static void aca_put(struct ifacaddr6 *ac)
}
}
+static struct ifacaddr6 *aca_alloc(struct rt6_info *rt,
+ const struct in6_addr *addr)
+{
+ struct inet6_dev *idev = rt->rt6i_idev;
+ struct ifacaddr6 *aca;
+
+ aca = kzalloc(sizeof(*aca), GFP_ATOMIC);
+ if (aca == NULL)
+ return NULL;
+
+ aca->aca_addr = *addr;
+ in6_dev_hold(idev);
+ aca->aca_idev = idev;
+ aca->aca_rt = rt;
+ aca->aca_users = 1;
+ /* aca_tstamp should be updated upon changes */
+ aca->aca_cstamp = aca->aca_tstamp = jiffies;
+ atomic_set(&aca->aca_refcnt, 1);
+ spin_lock_init(&aca->aca_lock);
+
+ return aca;
+}
+
/*
* device anycast group inc (add if not found)
*/
-int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
+int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct ifacaddr6 *aca;
- struct inet6_dev *idev;
struct rt6_info *rt;
int err;
ASSERT_RTNL();
- idev = in6_dev_get(dev);
-
- if (idev == NULL)
- return -EINVAL;
-
write_lock_bh(&idev->lock);
if (idev->dead) {
err = -ENODEV;
@@ -260,46 +265,35 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
}
}
- /*
- * not found: create a new one.
- */
-
- aca = kzalloc(sizeof(struct ifacaddr6), GFP_ATOMIC);
-
- if (aca == NULL) {
- err = -ENOMEM;
- goto out;
- }
-
rt = addrconf_dst_alloc(idev, addr, true);
if (IS_ERR(rt)) {
- kfree(aca);
err = PTR_ERR(rt);
goto out;
}
-
- aca->aca_addr = *addr;
- aca->aca_idev = idev;
- aca->aca_rt = rt;
- aca->aca_users = 1;
- /* aca_tstamp should be updated upon changes */
- aca->aca_cstamp = aca->aca_tstamp = jiffies;
- atomic_set(&aca->aca_refcnt, 2);
- spin_lock_init(&aca->aca_lock);
+ aca = aca_alloc(rt, addr);
+ if (aca == NULL) {
+ ip6_rt_put(rt);
+ err = -ENOMEM;
+ goto out;
+ }
aca->aca_next = idev->ac_list;
idev->ac_list = aca;
+
+ /* Hold this for addrconf_join_solict() below before we unlock,
+ * it is already exposed via idev->ac_list.
+ */
+ aca_get(aca);
write_unlock_bh(&idev->lock);
ip6_ins_rt(rt);
- addrconf_join_solict(dev, &aca->aca_addr);
+ addrconf_join_solict(idev->dev, &aca->aca_addr);
aca_put(aca);
return 0;
out:
write_unlock_bh(&idev->lock);
- in6_dev_put(idev);
return err;
}
@@ -341,7 +335,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
return 0;
}
-/* called with rcu_read_lock() */
+/* called with rtnl_lock() */
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
{
struct inet6_dev *idev = __in6_dev_get(dev);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 2753319524f1..2cdc38338be3 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -43,13 +43,13 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
- struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct in6_addr *daddr, *final_p, final;
+ struct inet_sock *inet = inet_sk(sk);
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct in6_addr *daddr, *final_p, final;
struct dst_entry *dst;
struct flowi6 fl6;
struct ip6_flowlabel *flowlabel = NULL;
- struct ipv6_txoptions *opt;
+ struct ipv6_txoptions *opt;
int addr_type;
int err;
@@ -332,7 +332,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sock_exterr_skb *serr;
- struct sk_buff *skb, *skb2;
+ struct sk_buff *skb;
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin, msg->msg_name);
struct {
struct sock_extended_err ee;
@@ -342,7 +342,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
int copied;
err = -EAGAIN;
- skb = skb_dequeue(&sk->sk_error_queue);
+ skb = sock_dequeue_err_skb(sk);
if (skb == NULL)
goto out;
@@ -415,17 +415,6 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
- /* Reset and regenerate socket error */
- spin_lock_bh(&sk->sk_error_queue.lock);
- sk->sk_err = 0;
- if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
- sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_bh(&sk->sk_error_queue.lock);
- sk->sk_error_report(sk);
- } else {
- spin_unlock_bh(&sk->sk_error_queue.lock);
- }
-
out_free_skb:
kfree_skb(skb);
out:
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index d15da1377149..83fc3a385a26 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -17,10 +17,10 @@
* Authors
*
* Mitsuru KANDA @USAGI : IPv6 Support
- * Kazunori MIYAZAWA @USAGI :
- * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
+ * Kazunori MIYAZAWA @USAGI :
+ * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
*
- * This file is derived from net/ipv4/esp.c
+ * This file is derived from net/ipv4/esp.c
*/
#define pr_fmt(fmt) "IPv6: " fmt
@@ -598,7 +598,7 @@ static int esp6_init_state(struct xfrm_state *x)
case XFRM_MODE_BEET:
if (x->sel.family != AF_INET6)
x->props.header_len += IPV4_BEET_PHMAXLEN +
- (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
+ (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
break;
case XFRM_MODE_TRANSPORT:
break;
@@ -621,11 +621,10 @@ static int esp6_rcv_cb(struct sk_buff *skb, int err)
return 0;
}
-static const struct xfrm_type esp6_type =
-{
+static const struct xfrm_type esp6_type = {
.description = "ESP6",
- .owner = THIS_MODULE,
- .proto = IPPROTO_ESP,
+ .owner = THIS_MODULE,
+ .proto = IPPROTO_ESP,
.flags = XFRM_TYPE_REPLAY_PROT,
.init_state = esp6_init_state,
.destructor = esp6_destroy,
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 8d67900aa003..bfde361b6134 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -142,7 +142,7 @@ static bool ip6_parse_tlv(const struct tlvtype_proc *procs, struct sk_buff *skb)
default: /* Other TLV code so scan list */
if (optlen > len)
goto bad;
- for (curr=procs; curr->type >= 0; curr++) {
+ for (curr = procs; curr->type >= 0; curr++) {
if (curr->type == nh[off]) {
/* type specific length/alignment
checks will be performed in the
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 06ba3e58320b..97ae70077a4f 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -170,11 +170,11 @@ static bool is_ineligible(const struct sk_buff *skb)
/*
* Check the ICMP output rate limit
*/
-static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
- struct flowi6 *fl6)
+static bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
+ struct flowi6 *fl6)
{
- struct dst_entry *dst;
struct net *net = sock_net(sk);
+ struct dst_entry *dst;
bool res = false;
/* Informational messages are not limited. */
@@ -199,16 +199,20 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
} else {
struct rt6_info *rt = (struct rt6_info *)dst;
int tmo = net->ipv6.sysctl.icmpv6_time;
- struct inet_peer *peer;
/* Give more bandwidth to wider prefixes. */
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
- res = inet_peer_xrlim_allow(peer, tmo);
- if (peer)
- inet_putpeer(peer);
+ if (icmp_global_allow()) {
+ struct inet_peer *peer;
+
+ peer = inet_getpeer_v6(net->ipv6.peers,
+ &rt->rt6i_dst.addr, 1);
+ res = inet_peer_xrlim_allow(peer, tmo);
+ if (peer)
+ inet_putpeer(peer);
+ }
}
dst_release(dst);
return res;
@@ -503,7 +507,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
msg.type = type;
len = skb->len - msg.offset;
- len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) -sizeof(struct icmp6hdr));
+ len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr));
if (len < 0) {
LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n");
goto out_dst_release;
@@ -636,7 +640,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
/* now skip over extension headers */
inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
&nexthdr, &frag_off);
- if (inner_offset<0)
+ if (inner_offset < 0)
goto out;
} else {
inner_offset = sizeof(struct ipv6hdr);
@@ -773,12 +777,12 @@ static int icmpv6_rcv(struct sk_buff *skb)
break;
default:
- LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
-
/* informational */
if (type & ICMPV6_INFOMSG_MASK)
break;
+ LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n");
+
/*
* error of unknown type.
* must pass to upper level
@@ -808,7 +812,7 @@ void icmpv6_flow_init(struct sock *sk, struct flowi6 *fl6,
memset(fl6, 0, sizeof(*fl6));
fl6->saddr = *saddr;
fl6->daddr = *daddr;
- fl6->flowi6_proto = IPPROTO_ICMPV6;
+ fl6->flowi6_proto = IPPROTO_ICMPV6;
fl6->fl6_icmp_type = type;
fl6->fl6_icmp_code = 0;
fl6->flowi6_oif = oif;
@@ -875,8 +879,8 @@ static void __net_exit icmpv6_sk_exit(struct net *net)
}
static struct pernet_operations icmpv6_sk_ops = {
- .init = icmpv6_sk_init,
- .exit = icmpv6_sk_exit,
+ .init = icmpv6_sk_init,
+ .exit = icmpv6_sk_exit,
};
int __init icmpv6_init(void)
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index a245e5ddffbd..29b32206e494 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -63,7 +63,6 @@ int inet6_csk_bind_conflict(const struct sock *sk,
return sk2 != NULL;
}
-
EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
struct dst_entry *inet6_csk_route_req(struct sock *sk,
@@ -144,7 +143,6 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
return NULL;
}
-
EXPORT_SYMBOL_GPL(inet6_csk_search_req);
void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
@@ -160,10 +158,9 @@ void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
inet_csk_reqsk_queue_added(sk, timeout);
}
-
EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
-void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
+void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
{
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
@@ -175,7 +172,6 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
sk->sk_bound_dev_if);
}
-
EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
static inline
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 262e13c02ec2..051dffb49c90 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -6,7 +6,7 @@
* Generic INET6 transport hashtables
*
* Authors: Lotsa people, from code originally in tcp, generalised here
- * by Arnaldo Carvalho de Melo <acme@mandriva.com>
+ * by Arnaldo Carvalho de Melo <acme@mandriva.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -198,7 +198,7 @@ begin:
}
} else if (score == hiscore && reuseport) {
matches++;
- if (((u64)phash * matches) >> 32 == 0)
+ if (reciprocal_scale(phash, matches) == 0)
result = sk;
phash = next_pseudo_random32(phash);
}
@@ -222,7 +222,6 @@ begin:
rcu_read_unlock();
return result;
}
-
EXPORT_SYMBOL_GPL(inet6_lookup_listener);
struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
@@ -238,7 +237,6 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
return sk;
}
-
EXPORT_SYMBOL_GPL(inet6_lookup);
static int __inet6_check_established(struct inet_timewait_death_row *death_row,
@@ -324,5 +322,4 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row,
return __inet_hash_connect(death_row, sk, inet6_sk_port_offset(sk),
__inet6_check_established, __inet6_hash);
}
-
EXPORT_SYMBOL_GPL(inet6_hash_connect);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 97b9fa8de377..b2d1838897c9 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -46,20 +46,11 @@
static struct kmem_cache *fib6_node_kmem __read_mostly;
-enum fib_walk_state_t {
-#ifdef CONFIG_IPV6_SUBTREES
- FWS_S,
-#endif
- FWS_L,
- FWS_R,
- FWS_C,
- FWS_U
-};
-
-struct fib6_cleaner_t {
- struct fib6_walker_t w;
+struct fib6_cleaner {
+ struct fib6_walker w;
struct net *net;
int (*func)(struct rt6_info *, void *arg);
+ int sernum;
void *arg;
};
@@ -74,8 +65,8 @@ static DEFINE_RWLOCK(fib6_walker_lock);
static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
-static int fib6_walk(struct fib6_walker_t *w);
-static int fib6_walk_continue(struct fib6_walker_t *w);
+static int fib6_walk(struct fib6_walker *w);
+static int fib6_walk_continue(struct fib6_walker *w);
/*
* A routing update causes an increase of the serial number on the
@@ -84,34 +75,41 @@ static int fib6_walk_continue(struct fib6_walker_t *w);
* result of redirects, path MTU changes, etc.
*/
-static __u32 rt_sernum;
-
static void fib6_gc_timer_cb(unsigned long arg);
static LIST_HEAD(fib6_walkers);
#define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh)
-static inline void fib6_walker_link(struct fib6_walker_t *w)
+static void fib6_walker_link(struct fib6_walker *w)
{
write_lock_bh(&fib6_walker_lock);
list_add(&w->lh, &fib6_walkers);
write_unlock_bh(&fib6_walker_lock);
}
-static inline void fib6_walker_unlink(struct fib6_walker_t *w)
+static void fib6_walker_unlink(struct fib6_walker *w)
{
write_lock_bh(&fib6_walker_lock);
list_del(&w->lh);
write_unlock_bh(&fib6_walker_lock);
}
-static __inline__ u32 fib6_new_sernum(void)
+
+static int fib6_new_sernum(struct net *net)
{
- u32 n = ++rt_sernum;
- if ((__s32)n <= 0)
- rt_sernum = n = 1;
- return n;
+ int new, old;
+
+ do {
+ old = atomic_read(&net->ipv6.fib6_sernum);
+ new = old < INT_MAX ? old + 1 : 1;
+ } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
+ old, new) != old);
+ return new;
}
+enum {
+ FIB6_NO_SERNUM_CHANGE = 0,
+};
+
/*
* Auxiliary address test functions for the radix tree.
*
@@ -128,7 +126,7 @@ static __inline__ u32 fib6_new_sernum(void)
# define BITOP_BE32_SWIZZLE 0
#endif
-static __inline__ __be32 addr_bit_set(const void *token, int fn_bit)
+static __be32 addr_bit_set(const void *token, int fn_bit)
{
const __be32 *addr = token;
/*
@@ -142,7 +140,7 @@ static __inline__ __be32 addr_bit_set(const void *token, int fn_bit)
addr[fn_bit >> 5];
}
-static __inline__ struct fib6_node *node_alloc(void)
+static struct fib6_node *node_alloc(void)
{
struct fib6_node *fn;
@@ -151,12 +149,12 @@ static __inline__ struct fib6_node *node_alloc(void)
return fn;
}
-static __inline__ void node_free(struct fib6_node *fn)
+static void node_free(struct fib6_node *fn)
{
kmem_cache_free(fib6_node_kmem, fn);
}
-static __inline__ void rt6_release(struct rt6_info *rt)
+static void rt6_release(struct rt6_info *rt)
{
if (atomic_dec_and_test(&rt->rt6i_ref))
dst_free(&rt->dst);
@@ -267,7 +265,7 @@ static void __net_init fib6_tables_init(struct net *net)
#endif
-static int fib6_dump_node(struct fib6_walker_t *w)
+static int fib6_dump_node(struct fib6_walker *w)
{
int res;
struct rt6_info *rt;
@@ -287,7 +285,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
static void fib6_dump_end(struct netlink_callback *cb)
{
- struct fib6_walker_t *w = (void *)cb->args[2];
+ struct fib6_walker *w = (void *)cb->args[2];
if (w) {
if (cb->args[4]) {
@@ -310,7 +308,7 @@ static int fib6_dump_done(struct netlink_callback *cb)
static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct fib6_walker_t *w;
+ struct fib6_walker *w;
int res;
w = (void *)cb->args[2];
@@ -355,7 +353,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct rt6_rtnl_dump_arg arg;
- struct fib6_walker_t *w;
+ struct fib6_walker *w;
struct fib6_table *tb;
struct hlist_head *head;
int res = 0;
@@ -423,14 +421,13 @@ out:
static struct fib6_node *fib6_add_1(struct fib6_node *root,
struct in6_addr *addr, int plen,
int offset, int allow_create,
- int replace_required)
+ int replace_required, int sernum)
{
struct fib6_node *fn, *in, *ln;
struct fib6_node *pn = NULL;
struct rt6key *key;
int bit;
__be32 dir = 0;
- __u32 sernum = fib6_new_sernum();
RT6_TRACE("fib6_add_1\n");
@@ -627,7 +624,7 @@ insert_above:
return ln;
}
-static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
+static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
{
return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
RTF_GATEWAY;
@@ -820,7 +817,7 @@ add:
return 0;
}
-static __inline__ void fib6_start_gc(struct net *net, struct rt6_info *rt)
+static void fib6_start_gc(struct net *net, struct rt6_info *rt)
{
if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
(rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE)))
@@ -848,6 +845,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
int err = -ENOMEM;
int allow_create = 1;
int replace_required = 0;
+ int sernum = fib6_new_sernum(info->nl_net);
if (info->nlh) {
if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
@@ -860,7 +858,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
offsetof(struct rt6_info, rt6i_dst), allow_create,
- replace_required);
+ replace_required, sernum);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
fn = NULL;
@@ -894,14 +892,14 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
sfn->fn_flags = RTN_ROOT;
- sfn->fn_sernum = fib6_new_sernum();
+ sfn->fn_sernum = sernum;
/* Now add the first leaf node to new subtree */
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
- allow_create, replace_required);
+ allow_create, replace_required, sernum);
if (IS_ERR(sn)) {
/* If it is failed, discard just allocated
@@ -920,7 +918,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
- allow_create, replace_required);
+ allow_create, replace_required, sernum);
if (IS_ERR(sn)) {
err = PTR_ERR(sn);
@@ -1174,7 +1172,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
int children;
int nstate;
struct fib6_node *child, *pn;
- struct fib6_walker_t *w;
+ struct fib6_walker *w;
int iter = 0;
for (;;) {
@@ -1276,7 +1274,7 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
struct nl_info *info)
{
- struct fib6_walker_t *w;
+ struct fib6_walker *w;
struct rt6_info *rt = *rtp;
struct net *net = info->nl_net;
@@ -1414,7 +1412,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
* <0 -> walk is terminated by an error.
*/
-static int fib6_walk_continue(struct fib6_walker_t *w)
+static int fib6_walk_continue(struct fib6_walker *w)
{
struct fib6_node *fn, *pn;
@@ -1498,7 +1496,7 @@ skip:
}
}
-static int fib6_walk(struct fib6_walker_t *w)
+static int fib6_walk(struct fib6_walker *w)
{
int res;
@@ -1512,15 +1510,25 @@ static int fib6_walk(struct fib6_walker_t *w)
return res;
}
-static int fib6_clean_node(struct fib6_walker_t *w)
+static int fib6_clean_node(struct fib6_walker *w)
{
int res;
struct rt6_info *rt;
- struct fib6_cleaner_t *c = container_of(w, struct fib6_cleaner_t, w);
+ struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w);
struct nl_info info = {
.nl_net = c->net,
};
+ if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
+ w->node->fn_sernum != c->sernum)
+ w->node->fn_sernum = c->sernum;
+
+ if (!c->func) {
+ WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
+ w->leaf = NULL;
+ return 0;
+ }
+
for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
res = c->func(rt, c->arg);
if (res < 0) {
@@ -1554,9 +1562,9 @@ static int fib6_clean_node(struct fib6_walker_t *w)
static void fib6_clean_tree(struct net *net, struct fib6_node *root,
int (*func)(struct rt6_info *, void *arg),
- int prune, void *arg)
+ bool prune, int sernum, void *arg)
{
- struct fib6_cleaner_t c;
+ struct fib6_cleaner c;
c.w.root = root;
c.w.func = fib6_clean_node;
@@ -1564,14 +1572,16 @@ static void fib6_clean_tree(struct net *net, struct fib6_node *root,
c.w.count = 0;
c.w.skip = 0;
c.func = func;
+ c.sernum = sernum;
c.arg = arg;
c.net = net;
fib6_walk(&c.w);
}
-void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
- void *arg)
+static void __fib6_clean_all(struct net *net,
+ int (*func)(struct rt6_info *, void *),
+ int sernum, void *arg)
{
struct fib6_table *table;
struct hlist_head *head;
@@ -1583,13 +1593,19 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
hlist_for_each_entry_rcu(table, head, tb6_hlist) {
write_lock_bh(&table->tb6_lock);
fib6_clean_tree(net, &table->tb6_root,
- func, 0, arg);
+ func, false, sernum, arg);
write_unlock_bh(&table->tb6_lock);
}
}
rcu_read_unlock();
}
+void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *),
+ void *arg)
+{
+ __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg);
+}
+
static int fib6_prune_clone(struct rt6_info *rt, void *arg)
{
if (rt->rt6i_flags & RTF_CACHE) {
@@ -1602,25 +1618,15 @@ static int fib6_prune_clone(struct rt6_info *rt, void *arg)
static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
{
- fib6_clean_tree(net, fn, fib6_prune_clone, 1, NULL);
-}
-
-static int fib6_update_sernum(struct rt6_info *rt, void *arg)
-{
- __u32 sernum = *(__u32 *)arg;
-
- if (rt->rt6i_node &&
- rt->rt6i_node->fn_sernum != sernum)
- rt->rt6i_node->fn_sernum = sernum;
-
- return 0;
+ fib6_clean_tree(net, fn, fib6_prune_clone, true,
+ FIB6_NO_SERNUM_CHANGE, NULL);
}
static void fib6_flush_trees(struct net *net)
{
- __u32 new_sernum = fib6_new_sernum();
+ int new_sernum = fib6_new_sernum(net);
- fib6_clean_all(net, fib6_update_sernum, &new_sernum);
+ __fib6_clean_all(net, NULL, new_sernum, NULL);
}
/*
@@ -1828,10 +1834,10 @@ void fib6_gc_cleanup(void)
struct ipv6_route_iter {
struct seq_net_private p;
- struct fib6_walker_t w;
+ struct fib6_walker w;
loff_t skip;
struct fib6_table *tbl;
- __u32 sernum;
+ int sernum;
};
static int ipv6_route_seq_show(struct seq_file *seq, void *v)
@@ -1859,7 +1865,7 @@ static int ipv6_route_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static int ipv6_route_yield(struct fib6_walker_t *w)
+static int ipv6_route_yield(struct fib6_walker *w)
{
struct ipv6_route_iter *iter = w->args;
@@ -1980,7 +1986,7 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
{
- struct fib6_walker_t *w = &iter->w;
+ struct fib6_walker *w = &iter->w;
return w->node && !(w->state == FWS_U && w->node == w->root);
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 4052694c6f2c..3dd7d4ebd7cd 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -136,7 +136,7 @@ static void ip6_fl_gc(unsigned long dummy)
spin_lock(&ip6_fl_lock);
- for (i=0; i<=FL_HASH_MASK; i++) {
+ for (i = 0; i <= FL_HASH_MASK; i++) {
struct ip6_flowlabel *fl;
struct ip6_flowlabel __rcu **flp;
@@ -239,7 +239,7 @@ static struct ip6_flowlabel *fl_intern(struct net *net,
/* Socket flowlabel lists */
-struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
+struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
{
struct ipv6_fl_socklist *sfl;
struct ipv6_pinfo *np = inet6_sk(sk);
@@ -259,7 +259,6 @@ struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
rcu_read_unlock_bh();
return NULL;
}
-
EXPORT_SYMBOL_GPL(fl6_sock_lookup);
void fl6_free_socklist(struct sock *sk)
@@ -293,11 +292,11 @@ void fl6_free_socklist(struct sock *sk)
following rthdr.
*/
-struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
- struct ip6_flowlabel * fl,
- struct ipv6_txoptions * fopt)
+struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ struct ip6_flowlabel *fl,
+ struct ipv6_txoptions *fopt)
{
- struct ipv6_txoptions * fl_opt = fl->opt;
+ struct ipv6_txoptions *fl_opt = fl->opt;
if (fopt == NULL || fopt->opt_flen == 0)
return fl_opt;
@@ -388,7 +387,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
goto done;
msg.msg_controllen = olen;
- msg.msg_control = (void*)(fl->opt+1);
+ msg.msg_control = (void *)(fl->opt+1);
memset(&flowi6, 0, sizeof(flowi6));
err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt,
@@ -517,7 +516,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
struct net *net = sock_net(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_flowlabel_req freq;
- struct ipv6_fl_socklist *sfl1=NULL;
+ struct ipv6_fl_socklist *sfl1 = NULL;
struct ipv6_fl_socklist *sfl;
struct ipv6_fl_socklist __rcu **sflp;
struct ip6_flowlabel *fl, *fl1 = NULL;
@@ -542,7 +541,7 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
}
spin_lock_bh(&ip6_sk_fl_lock);
for (sflp = &np->ipv6_fl_list;
- (sfl = rcu_dereference(*sflp))!=NULL;
+ (sfl = rcu_dereference(*sflp)) != NULL;
sflp = &sfl->next) {
if (sfl->fl->label == freq.flr_label) {
if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 97299d76c1b0..12c3c8ef3849 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -618,6 +618,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
int err = -1;
u8 proto;
struct sk_buff *new_skb;
+ __be16 protocol;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@@ -734,8 +735,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
ipv6h->daddr = fl6->daddr;
((__be16 *)(ipv6h + 1))[0] = tunnel->parms.o_flags;
- ((__be16 *)(ipv6h + 1))[1] = (dev->type == ARPHRD_ETHER) ?
- htons(ETH_P_TEB) : skb->protocol;
+ protocol = (dev->type == ARPHRD_ETHER) ?
+ htons(ETH_P_TEB) : skb->protocol;
+ ((__be16 *)(ipv6h + 1))[1] = protocol;
if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
__be32 *ptr = (__be32 *)(((u8 *)ipv6h) + tunnel->hlen - 4);
@@ -756,6 +758,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
}
}
+ skb_set_inner_protocol(skb, protocol);
+
ip6tunnel_xmit(skb, dev);
if (ndst)
ip6_tnl_dst_store(tunnel, ndst);
@@ -1238,7 +1242,7 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->iflink = 0;
dev->addr_len = sizeof(struct in6_addr);
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
}
static int ip6gre_tunnel_init(struct net_device *dev)
diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
index 4578e23834f7..14dacc544c3e 100644
--- a/net/ipv6/ip6_icmp.c
+++ b/net/ipv6/ip6_icmp.c
@@ -13,7 +13,7 @@ static ip6_icmp_send_t __rcu *ip6_icmp_send;
int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
{
return (cmpxchg((ip6_icmp_send_t **)&ip6_icmp_send, NULL, fn) == NULL) ?
- 0 : -EBUSY;
+ 0 : -EBUSY;
}
EXPORT_SYMBOL(inet6_register_icmp_sender);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 51d54dc376f3..a3084ab5df6c 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -15,8 +15,8 @@
*/
/* Changes
*
- * Mitsuru KANDA @USAGI and
- * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
+ * Mitsuru KANDA @USAGI and
+ * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
*/
#include <linux/errno.h>
@@ -65,7 +65,7 @@ int ip6_rcv_finish(struct sk_buff *skb)
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
const struct ipv6hdr *hdr;
- u32 pkt_len;
+ u32 pkt_len;
struct inet6_dev *idev;
struct net *net = dev_net(skb->dev);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 65eda2a8af48..9034f76ae013 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -53,31 +53,6 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
return proto;
}
-static int ipv6_gso_send_check(struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- const struct net_offload *ops;
- int err = -EINVAL;
-
- if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
- goto out;
-
- ipv6h = ipv6_hdr(skb);
- __skb_pull(skb, sizeof(*ipv6h));
- err = -EPROTONOSUPPORT;
-
- ops = rcu_dereference(inet6_offloads[
- ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
-
- if (likely(ops && ops->callbacks.gso_send_check)) {
- skb_reset_transport_header(skb);
- err = ops->callbacks.gso_send_check(skb);
- }
-
-out:
- return err;
-}
-
static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -244,7 +219,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
continue;
iph2 = (struct ipv6hdr *)(p->data + off);
- first_word = *(__be32 *)iph ^ *(__be32 *)iph2 ;
+ first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
/* All fields must match except length and Traffic Class.
* XXX skbs on the gro_list have all been parsed and pulled
@@ -261,6 +236,9 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
/* flush if Traffic Class fields are different */
NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
NAPI_GRO_CB(p)->flush |= flush;
+
+ /* Clear flush_id, there's really no concept of ID in IPv6. */
+ NAPI_GRO_CB(p)->flush_id = 0;
}
NAPI_GRO_CB(skb)->flush |= flush;
@@ -303,7 +281,6 @@ out_unlock:
static struct packet_offload ipv6_packet_offload __read_mostly = {
.type = cpu_to_be16(ETH_P_IPV6),
.callbacks = {
- .gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
.gro_receive = ipv6_gro_receive,
.gro_complete = ipv6_gro_complete,
@@ -312,8 +289,9 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
static const struct net_offload sit_offload = {
.callbacks = {
- .gso_send_check = ipv6_gso_send_check,
.gso_segment = ipv6_gso_segment,
+ .gro_receive = ipv6_gro_receive,
+ .gro_complete = ipv6_gro_complete,
},
};
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 0a3448b2888f..8e950c250ada 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -20,7 +20,7 @@
* etc.
*
* H. von Brand : Added missing #include <linux/string.h>
- * Imran Patel : frag id should be in NBO
+ * Imran Patel : frag id should be in NBO
* Kazunori MIYAZAWA @USAGI
* : add ip6_append_data and related functions
* for datagram xmit
@@ -233,7 +233,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -EMSGSIZE;
}
-
EXPORT_SYMBOL(ip6_xmit);
static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
@@ -555,14 +554,14 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct sk_buff *frag;
- struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
+ struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;
int hroom, troom;
__be32 frag_id = 0;
- int ptr, offset = 0, err=0;
+ int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0;
struct net *net = dev_net(skb_dst(skb)->dev);
@@ -637,7 +636,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
}
__skb_pull(skb, hlen);
- fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
+ fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
__skb_push(skb, hlen);
skb_reset_network_header(skb);
memcpy(skb_network_header(skb), tmp_hdr, hlen);
@@ -662,7 +661,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
if (frag) {
frag->ip_summed = CHECKSUM_NONE;
skb_reset_transport_header(frag);
- fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
+ fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
__skb_push(frag, hlen);
skb_reset_network_header(frag);
memcpy(skb_network_header(frag), tmp_hdr,
@@ -681,7 +680,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
}
err = output(skb);
- if(!err)
+ if (!err)
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGCREATES);
@@ -702,11 +701,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
return 0;
}
- while (frag) {
- skb = frag->next;
- kfree_skb(frag);
- frag = skb;
- }
+ kfree_skb_list(frag);
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
IPSTATS_MIB_FRAGFAILS);
@@ -742,7 +737,7 @@ slow_path:
/*
* Keep copying data until we run out.
*/
- while(left > 0) {
+ while (left > 0) {
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
@@ -865,7 +860,7 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
/* Yes, checking route validity in not connected
* case is not very simple. Take into account,
* that we do not support routing by source, TOS,
- * and MSG_DONTROUTE --ANK (980726)
+ * and MSG_DONTROUTE --ANK (980726)
*
* 1. ip6_rt_check(): If route was host route,
* check that cached destination is current.
@@ -1049,7 +1044,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
int getfrag(void *from, char *to, int offset, int len,
int odd, struct sk_buff *skb),
void *from, int length, int hh_len, int fragheaderlen,
- int transhdrlen, int mtu,unsigned int flags,
+ int transhdrlen, int mtu, unsigned int flags,
struct rt6_info *rt)
{
@@ -1072,7 +1067,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
skb_reserve(skb, hh_len);
/* create space for UDP/IP header */
- skb_put(skb,fragheaderlen + transhdrlen);
+ skb_put(skb, fragheaderlen + transhdrlen);
/* initialize network header pointer */
skb_reset_network_header(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 69a84b464009..9409887fb664 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -412,12 +412,12 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
{
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
__u8 nexthdr = ipv6h->nexthdr;
- __u16 off = sizeof (*ipv6h);
+ __u16 off = sizeof(*ipv6h);
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
__u16 optlen = 0;
struct ipv6_opt_hdr *hdr;
- if (raw + off + sizeof (*hdr) > skb->data &&
+ if (raw + off + sizeof(*hdr) > skb->data &&
!pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
break;
@@ -534,7 +534,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
- if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
+ if ((len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
rel_type = ICMPV6_PKT_TOOBIG;
rel_code = 0;
rel_info = mtu;
@@ -995,7 +995,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
t->parms.name);
goto tx_err_dst_release;
}
- mtu = dst_mtu(dst) - sizeof (*ipv6h);
+ mtu = dst_mtu(dst) - sizeof(*ipv6h);
if (encap_limit >= 0) {
max_headroom += 8;
mtu -= 8;
@@ -1087,7 +1087,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
- memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPIP;
dsfield = ipv4_get_dsfield(iph);
@@ -1139,7 +1139,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
encap_limit = t->parms.encap_limit;
- memcpy(&fl6, &t->fl.u.ip6, sizeof (fl6));
+ memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_IPV6;
dsfield = ipv6_get_dsfield(ipv6h);
@@ -1233,11 +1233,11 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
if (rt->dst.dev) {
dev->hard_header_len = rt->dst.dev->hard_header_len +
- sizeof (struct ipv6hdr);
+ sizeof(struct ipv6hdr);
- dev->mtu = rt->dst.dev->mtu - sizeof (struct ipv6hdr);
+ dev->mtu = rt->dst.dev->mtu - sizeof(struct ipv6hdr);
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- dev->mtu-=8;
+ dev->mtu -= 8;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
@@ -1354,7 +1354,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
switch (cmd) {
case SIOCGETTUNNEL:
if (dev == ip6n->fb_tnl_dev) {
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
err = -EFAULT;
break;
}
@@ -1366,7 +1366,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
memset(&p, 0, sizeof(p));
}
ip6_tnl_parm_to_user(&p, &t->parms);
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
err = -EFAULT;
}
break;
@@ -1376,7 +1376,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
break;
err = -EFAULT;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
break;
err = -EINVAL;
if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
@@ -1411,7 +1411,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (dev == ip6n->fb_tnl_dev) {
err = -EFAULT;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
break;
err = -ENOENT;
ip6_tnl_parm_from_user(&p1, &p);
@@ -1486,14 +1486,14 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->destructor = ip6_dev_free;
dev->type = ARPHRD_TUNNEL6;
- dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
- dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
+ dev->hard_header_len = LL_MAX_HEADER + sizeof(struct ipv6hdr);
+ dev->mtu = ETH_DATA_LEN - sizeof(struct ipv6hdr);
t = netdev_priv(dev);
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
- dev->mtu-=8;
+ dev->mtu -= 8;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
/* This perm addr will be used as interface identifier by IPv6 */
dev->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(dev->perm_addr);
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
new file mode 100644
index 000000000000..b04ed72c4542
--- /dev/null
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -0,0 +1,107 @@
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/udp.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/in6.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/ip6_tunnel.h>
+#include <net/ip6_checksum.h>
+
+int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
+ struct socket **sockp)
+{
+ struct sockaddr_in6 udp6_addr;
+ int err;
+ struct socket *sock = NULL;
+
+ err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
+ if (err < 0)
+ goto error;
+
+ sk_change_net(sock->sk, net);
+
+ udp6_addr.sin6_family = AF_INET6;
+ memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
+ sizeof(udp6_addr.sin6_addr));
+ udp6_addr.sin6_port = cfg->local_udp_port;
+ err = kernel_bind(sock, (struct sockaddr *)&udp6_addr,
+ sizeof(udp6_addr));
+ if (err < 0)
+ goto error;
+
+ if (cfg->peer_udp_port) {
+ udp6_addr.sin6_family = AF_INET6;
+ memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
+ sizeof(udp6_addr.sin6_addr));
+ udp6_addr.sin6_port = cfg->peer_udp_port;
+ err = kernel_connect(sock,
+ (struct sockaddr *)&udp6_addr,
+ sizeof(udp6_addr), 0);
+ }
+ if (err < 0)
+ goto error;
+
+ udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums);
+ udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums);
+
+ *sockp = sock;
+ return 0;
+
+error:
+ if (sock) {
+ kernel_sock_shutdown(sock, SHUT_RDWR);
+ sk_release_kernel(sock->sk);
+ }
+ *sockp = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(udp_sock_create6);
+
+int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
+ struct sk_buff *skb, struct net_device *dev,
+ struct in6_addr *saddr, struct in6_addr *daddr,
+ __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port)
+{
+ struct udphdr *uh;
+ struct ipv6hdr *ip6h;
+ struct sock *sk = sock->sk;
+
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ uh->dest = dst_port;
+ uh->source = src_port;
+
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
+ | IPSKB_REROUTED);
+ skb_dst_set(skb, dst);
+
+ udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr,
+ &sk->sk_v6_daddr, skb->len);
+
+ __skb_push(skb, sizeof(*ip6h));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6_flow_hdr(ip6h, prio, htonl(0));
+ ip6h->payload_len = htons(skb->len);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = *daddr;
+ ip6h->saddr = *saddr;
+
+ ip6tunnel_xmit(skb, dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(udp_tunnel6_xmit_skb);
+
+MODULE_LICENSE("GPL");
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 5833a2244467..d440bb585524 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -807,7 +807,7 @@ static void vti6_dev_setup(struct net_device *dev)
dev->mtu = ETH_DATA_LEN;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
}
/**
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f9a3fd320d1d..0171f08325c3 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -845,7 +845,7 @@ static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
atomic_dec(&mrt->cache_resolve_queue_len);
- while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
+ while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
if (ipv6_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
nlh->nlmsg_type = NLMSG_ERROR;
@@ -1103,7 +1103,7 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
* Play the pending entries through our router
*/
- while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
+ while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ipv6_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index d1c793cffcb5..1b9316e1386a 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -181,8 +181,7 @@ static int ipcomp6_rcv_cb(struct sk_buff *skb, int err)
return 0;
}
-static const struct xfrm_type ipcomp6_type =
-{
+static const struct xfrm_type ipcomp6_type = {
.description = "IPCOMP6",
.owner = THIS_MODULE,
.proto = IPPROTO_COMP,
@@ -193,8 +192,7 @@ static const struct xfrm_type ipcomp6_type =
.hdr_offset = xfrm6_find_1stfragopt,
};
-static struct xfrm6_protocol ipcomp6_protocol =
-{
+static struct xfrm6_protocol ipcomp6_protocol = {
.handler = xfrm6_rcv,
.cb_handler = ipcomp6_rcv_cb,
.err_handler = ipcomp6_err,
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0c289982796d..e1a9583bb419 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -66,12 +66,12 @@ int ip6_ra_control(struct sock *sk, int sel)
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num != IPPROTO_RAW)
return -ENOPROTOOPT;
- new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
+ new_ra = (sel >= 0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
write_lock_bh(&ip6_ra_lock);
- for (rap = &ip6_ra_chain; (ra=*rap) != NULL; rap = &ra->next) {
+ for (rap = &ip6_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
if (ra->sk == sk) {
- if (sel>=0) {
+ if (sel >= 0) {
write_unlock_bh(&ip6_ra_lock);
kfree(new_ra);
return -EADDRINUSE;
@@ -130,7 +130,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
int retv = -ENOPROTOOPT;
if (optval == NULL)
- val=0;
+ val = 0;
else {
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
@@ -139,7 +139,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
val = 0;
}
- valbool = (val!=0);
+ valbool = (val != 0);
if (ip6_mroute_opt(optname))
return ip6_mroute_setsockopt(sk, optname, optval, optlen);
@@ -474,7 +474,7 @@ sticky_done:
goto done;
msg.msg_controllen = optlen;
- msg.msg_control = (void*)(opt+1);
+ msg.msg_control = (void *)(opt+1);
retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk,
&junk, &junk);
@@ -687,7 +687,7 @@ done:
retv = -ENOBUFS;
break;
}
- gsf = kmalloc(optlen,GFP_KERNEL);
+ gsf = kmalloc(optlen, GFP_KERNEL);
if (!gsf) {
retv = -ENOBUFS;
break;
@@ -873,7 +873,6 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
#endif
return err;
}
-
EXPORT_SYMBOL(ipv6_setsockopt);
#ifdef CONFIG_COMPAT
@@ -909,7 +908,6 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
#endif
return err;
}
-
EXPORT_SYMBOL(compat_ipv6_setsockopt);
#endif
@@ -921,7 +919,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
if (!opt)
return 0;
- switch(optname) {
+ switch (optname) {
case IPV6_HOPOPTS:
hdr = opt->hopopt;
break;
@@ -1284,9 +1282,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
return -ENOPROTOOPT;
}
len = min_t(unsigned int, sizeof(int), len);
- if(put_user(len, optlen))
+ if (put_user(len, optlen))
return -EFAULT;
- if(copy_to_user(optval,&val,len))
+ if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
@@ -1299,7 +1297,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
if (level == SOL_IP && sk->sk_type != SOCK_RAW)
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
- if(level != SOL_IPV6)
+ if (level != SOL_IPV6)
return -ENOPROTOOPT;
err = do_ipv6_getsockopt(sk, level, optname, optval, optlen, 0);
@@ -1321,7 +1319,6 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
#endif
return err;
}
-
EXPORT_SYMBOL(ipv6_getsockopt);
#ifdef CONFIG_COMPAT
@@ -1364,7 +1361,6 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
#endif
return err;
}
-
EXPORT_SYMBOL(compat_ipv6_getsockopt);
#endif
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index a23b655a7627..9648de2b6745 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -64,15 +64,6 @@
#include <net/ip6_checksum.h>
-/* Set to 3 to get tracing... */
-#define MCAST_DEBUG 2
-
-#if MCAST_DEBUG >= 3
-#define MDBG(x) printk x
-#else
-#define MDBG(x)
-#endif
-
/* Ensure that we have struct in6_addr aligned on 32bit word. */
static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
BUILD_BUG_ON_NULL(offsetof(struct mld2_query, mld2q_srcs) % 4),
@@ -82,9 +73,6 @@ static void *__mld2_query_bugs[] __attribute__((__unused__)) = {
static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
-/* Big mc list lock for all the sockets */
-static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
-
static void igmp6_join_group(struct ifmcaddr6 *ma);
static void igmp6_leave_group(struct ifmcaddr6 *ma);
static void igmp6_timer_handler(unsigned long data);
@@ -121,6 +109,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
#define IPV6_MLD_MAX_MSF 64
int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
+int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
/*
* socket join on multicast group
@@ -173,7 +162,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
mc_lst->addr = *addr;
rtnl_lock();
- rcu_read_lock();
if (ifindex == 0) {
struct rt6_info *rt;
rt = rt6_lookup(net, addr, NULL, 0, 0);
@@ -182,10 +170,9 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
ip6_rt_put(rt);
}
} else
- dev = dev_get_by_index_rcu(net, ifindex);
+ dev = __dev_get_by_index(net, ifindex);
if (dev == NULL) {
- rcu_read_unlock();
rtnl_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return -ENODEV;
@@ -203,18 +190,14 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
err = ipv6_dev_mc_inc(dev, addr);
if (err) {
- rcu_read_unlock();
rtnl_unlock();
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
return err;
}
- spin_lock(&ipv6_sk_mc_lock);
mc_lst->next = np->ipv6_mc_list;
rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
- spin_unlock(&ipv6_sk_mc_lock);
- rcu_read_unlock();
rtnl_unlock();
return 0;
@@ -234,20 +217,16 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
return -EINVAL;
rtnl_lock();
- spin_lock(&ipv6_sk_mc_lock);
for (lnk = &np->ipv6_mc_list;
- (mc_lst = rcu_dereference_protected(*lnk,
- lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
+ (mc_lst = rtnl_dereference(*lnk)) != NULL;
lnk = &mc_lst->next) {
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
ipv6_addr_equal(&mc_lst->addr, addr)) {
struct net_device *dev;
*lnk = mc_lst->next;
- spin_unlock(&ipv6_sk_mc_lock);
- rcu_read_lock();
- dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
+ dev = __dev_get_by_index(net, mc_lst->ifindex);
if (dev != NULL) {
struct inet6_dev *idev = __in6_dev_get(dev);
@@ -256,7 +235,6 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
- rcu_read_unlock();
rtnl_unlock();
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
@@ -264,7 +242,6 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
return 0;
}
}
- spin_unlock(&ipv6_sk_mc_lock);
rtnl_unlock();
return -EADDRNOTAVAIL;
@@ -311,16 +288,12 @@ void ipv6_sock_mc_close(struct sock *sk)
return;
rtnl_lock();
- spin_lock(&ipv6_sk_mc_lock);
- while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
- lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
+ while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
struct net_device *dev;
np->ipv6_mc_list = mc_lst->next;
- spin_unlock(&ipv6_sk_mc_lock);
- rcu_read_lock();
- dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
+ dev = __dev_get_by_index(net, mc_lst->ifindex);
if (dev) {
struct inet6_dev *idev = __in6_dev_get(dev);
@@ -329,14 +302,11 @@ void ipv6_sock_mc_close(struct sock *sk)
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
} else
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
- rcu_read_unlock();
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
kfree_rcu(mc_lst, rcu);
- spin_lock(&ipv6_sk_mc_lock);
}
- spin_unlock(&ipv6_sk_mc_lock);
rtnl_unlock();
}
@@ -400,7 +370,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
if (!psl)
goto done; /* err = -EADDRNOTAVAIL */
rv = !0;
- for (i=0; i<psl->sl_count; i++) {
+ for (i = 0; i < psl->sl_count; i++) {
rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
if (rv == 0)
break;
@@ -417,7 +387,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
/* update the interface filter */
ip6_mc_del_src(idev, group, omode, 1, source, 1);
- for (j=i+1; j<psl->sl_count; j++)
+ for (j = i+1; j < psl->sl_count; j++)
psl->sl_addr[j-1] = psl->sl_addr[j];
psl->sl_count--;
err = 0;
@@ -443,19 +413,19 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
newpsl->sl_max = count;
newpsl->sl_count = count - IP6_SFBLOCK;
if (psl) {
- for (i=0; i<psl->sl_count; i++)
+ for (i = 0; i < psl->sl_count; i++)
newpsl->sl_addr[i] = psl->sl_addr[i];
sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
}
pmc->sflist = psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
- for (i=0; i<psl->sl_count; i++) {
+ for (i = 0; i < psl->sl_count; i++) {
rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
if (rv == 0) /* There is an error in the address. */
goto done;
}
- for (j=psl->sl_count-1; j>=i; j--)
+ for (j = psl->sl_count-1; j >= i; j--)
psl->sl_addr[j+1] = psl->sl_addr[j];
psl->sl_addr[i] = *source;
psl->sl_count++;
@@ -524,7 +494,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
goto done;
}
newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
- for (i=0; i<newpsl->sl_count; ++i) {
+ for (i = 0; i < newpsl->sl_count; ++i) {
struct sockaddr_in6 *psin6;
psin6 = (struct sockaddr_in6 *)&gsf->gf_slist[i];
@@ -586,9 +556,8 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
}
err = -EADDRNOTAVAIL;
- /*
- * changes to the ipv6_mc_list require the socket lock and
- * a read lock on ip6_sk_mc_lock. We have the socket lock,
+ /* changes to the ipv6_mc_list require the socket lock and
+ * rtnl lock. We have the socket lock and rcu read lock,
* so reading the list is safe.
*/
@@ -612,11 +581,10 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
return -EFAULT;
}
- /* changes to psl require the socket lock, a read lock on
- * on ipv6_sk_mc_lock and a write lock on pmc->sflock. We
- * have the socket lock, so reading here is safe.
+ /* changes to psl require the socket lock, and a write lock
+ * on pmc->sflock. We have the socket lock so reading here is safe.
*/
- for (i=0; i<copycount; i++) {
+ for (i = 0; i < copycount; i++) {
struct sockaddr_in6 *psin6;
struct sockaddr_storage ss;
@@ -658,7 +626,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
} else {
int i;
- for (i=0; i<psl->sl_count; i++) {
+ for (i = 0; i < psl->sl_count; i++) {
if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
break;
}
@@ -673,14 +641,6 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
return rv;
}
-static void ma_put(struct ifmcaddr6 *mc)
-{
- if (atomic_dec_and_test(&mc->mca_refcnt)) {
- in6_dev_put(mc->idev);
- kfree(mc);
- }
-}
-
static void igmp6_group_added(struct ifmcaddr6 *mc)
{
struct net_device *dev = mc->idev->dev;
@@ -772,7 +732,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
pmc->mca_tomb = im->mca_tomb;
pmc->mca_sources = im->mca_sources;
im->mca_tomb = im->mca_sources = NULL;
- for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
psf->sf_crcount = pmc->mca_crcount;
}
spin_unlock_bh(&im->mca_lock);
@@ -790,7 +750,7 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
spin_lock_bh(&idev->mc_lock);
pmc_prev = NULL;
- for (pmc=idev->mc_tomb; pmc; pmc=pmc->next) {
+ for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) {
if (ipv6_addr_equal(&pmc->mca_addr, pmca))
break;
pmc_prev = pmc;
@@ -804,7 +764,7 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
spin_unlock_bh(&idev->mc_lock);
if (pmc) {
- for (psf=pmc->mca_tomb; psf; psf=psf_next) {
+ for (psf = pmc->mca_tomb; psf; psf = psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
@@ -831,14 +791,14 @@ static void mld_clear_delrec(struct inet6_dev *idev)
/* clear dead sources, too */
read_lock_bh(&idev->lock);
- for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
struct ip6_sf_list *psf, *psf_next;
spin_lock_bh(&pmc->mca_lock);
psf = pmc->mca_tomb;
pmc->mca_tomb = NULL;
spin_unlock_bh(&pmc->mca_lock);
- for (; psf; psf=psf_next) {
+ for (; psf; psf = psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
@@ -846,6 +806,48 @@ static void mld_clear_delrec(struct inet6_dev *idev)
read_unlock_bh(&idev->lock);
}
+static void mca_get(struct ifmcaddr6 *mc)
+{
+ atomic_inc(&mc->mca_refcnt);
+}
+
+static void ma_put(struct ifmcaddr6 *mc)
+{
+ if (atomic_dec_and_test(&mc->mca_refcnt)) {
+ in6_dev_put(mc->idev);
+ kfree(mc);
+ }
+}
+
+static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
+ const struct in6_addr *addr)
+{
+ struct ifmcaddr6 *mc;
+
+ mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
+ if (mc == NULL)
+ return NULL;
+
+ setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
+
+ mc->mca_addr = *addr;
+ mc->idev = idev; /* reference taken by caller */
+ mc->mca_users = 1;
+ /* mca_stamp should be updated upon changes */
+ mc->mca_cstamp = mc->mca_tstamp = jiffies;
+ atomic_set(&mc->mca_refcnt, 1);
+ spin_lock_init(&mc->mca_lock);
+
+ /* initial mode is (EX, empty) */
+ mc->mca_sfmode = MCAST_EXCLUDE;
+ mc->mca_sfcount[MCAST_EXCLUDE] = 1;
+
+ if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
+ IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
+ mc->mca_flags |= MAF_NOREPORT;
+
+ return mc;
+}
/*
* device multicast group inc (add if not found)
@@ -881,38 +883,20 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
}
}
- /*
- * not found: create a new one.
- */
-
- mc = kzalloc(sizeof(struct ifmcaddr6), GFP_ATOMIC);
-
- if (mc == NULL) {
+ mc = mca_alloc(idev, addr);
+ if (!mc) {
write_unlock_bh(&idev->lock);
in6_dev_put(idev);
return -ENOMEM;
}
- setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
-
- mc->mca_addr = *addr;
- mc->idev = idev; /* (reference taken) */
- mc->mca_users = 1;
- /* mca_stamp should be updated upon changes */
- mc->mca_cstamp = mc->mca_tstamp = jiffies;
- atomic_set(&mc->mca_refcnt, 2);
- spin_lock_init(&mc->mca_lock);
-
- /* initial mode is (EX, empty) */
- mc->mca_sfmode = MCAST_EXCLUDE;
- mc->mca_sfcount[MCAST_EXCLUDE] = 1;
-
- if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
- IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
- mc->mca_flags |= MAF_NOREPORT;
-
mc->next = idev->mc_list;
idev->mc_list = mc;
+
+ /* Hold this for the code below before we unlock,
+ * it is already exposed via idev->mc_list.
+ */
+ mca_get(mc);
write_unlock_bh(&idev->lock);
mld_del_delrec(idev, &mc->mca_addr);
@@ -931,7 +915,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
ASSERT_RTNL();
write_lock_bh(&idev->lock);
- for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) {
+ for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) {
if (ipv6_addr_equal(&ma->mca_addr, addr)) {
if (--ma->mca_users == 0) {
*map = ma->next;
@@ -956,7 +940,7 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
struct inet6_dev *idev;
int err;
- rcu_read_lock();
+ ASSERT_RTNL();
idev = __in6_dev_get(dev);
if (!idev)
@@ -964,7 +948,6 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
else
err = __ipv6_dev_mc_dec(idev, addr);
- rcu_read_unlock();
return err;
}
@@ -982,7 +965,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
idev = __in6_dev_get(dev);
if (idev) {
read_lock_bh(&idev->lock);
- for (mc = idev->mc_list; mc; mc=mc->next) {
+ for (mc = idev->mc_list; mc; mc = mc->next) {
if (ipv6_addr_equal(&mc->mca_addr, group))
break;
}
@@ -991,7 +974,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
struct ip6_sf_list *psf;
spin_lock_bh(&mc->mca_lock);
- for (psf=mc->mca_sources;psf;psf=psf->sf_next) {
+ for (psf = mc->mca_sources; psf; psf = psf->sf_next) {
if (ipv6_addr_equal(&psf->sf_addr, src_addr))
break;
}
@@ -1000,7 +983,7 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
psf->sf_count[MCAST_EXCLUDE] !=
mc->mca_sfcount[MCAST_EXCLUDE];
else
- rv = mc->mca_sfcount[MCAST_EXCLUDE] !=0;
+ rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
spin_unlock_bh(&mc->mca_lock);
} else
rv = true; /* don't filter unspecified source */
@@ -1091,10 +1074,10 @@ static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
int i, scount;
scount = 0;
- for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
if (scount == nsrcs)
break;
- for (i=0; i<nsrcs; i++) {
+ for (i = 0; i < nsrcs; i++) {
/* skip inactive filters */
if (psf->sf_count[MCAST_INCLUDE] ||
pmc->mca_sfcount[MCAST_EXCLUDE] !=
@@ -1124,10 +1107,10 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
/* mark INCLUDE-mode sources */
scount = 0;
- for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
if (scount == nsrcs)
break;
- for (i=0; i<nsrcs; i++) {
+ for (i = 0; i < nsrcs; i++) {
if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
psf->sf_gsresp = 1;
scount++;
@@ -1205,15 +1188,16 @@ static void mld_update_qrv(struct inet6_dev *idev,
* and SHOULD NOT be one. Catch this here if we ever run
* into such a case in future.
*/
+ const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
WARN_ON(idev->mc_qrv == 0);
if (mlh2->mld2q_qrv > 0)
idev->mc_qrv = mlh2->mld2q_qrv;
- if (unlikely(idev->mc_qrv < 2)) {
+ if (unlikely(idev->mc_qrv < min_qrv)) {
net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
- idev->mc_qrv, MLD_QRV_DEFAULT);
- idev->mc_qrv = MLD_QRV_DEFAULT;
+ idev->mc_qrv, min_qrv);
+ idev->mc_qrv = min_qrv;
}
}
@@ -1253,7 +1237,7 @@ static void mld_update_qri(struct inet6_dev *idev,
}
static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
- unsigned long *max_delay)
+ unsigned long *max_delay, bool v1_query)
{
unsigned long mldv1_md;
@@ -1261,11 +1245,32 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
if (mld_in_v2_mode_only(idev))
return -EINVAL;
- /* MLDv1 router present */
mldv1_md = ntohs(mld->mld_maxdelay);
+
+ /* When in MLDv1 fallback and a MLDv2 router start-up being
+ * unaware of current MLDv1 operation, the MRC == MRD mapping
+ * only works when the exponential algorithm is not being
+ * used (as MLDv1 is unaware of such things).
+ *
+ * According to the RFC author, the MLDv2 implementations
+ * he's aware of all use a MRC < 32768 on start up queries.
+ *
+ * Thus, should we *ever* encounter something else larger
+ * than that, just assume the maximum possible within our
+ * reach.
+ */
+ if (!v1_query)
+ mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
+
*max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
- mld_set_v1_mode(idev);
+ /* MLDv1 router present: we need to go into v1 mode *only*
+ * when an MLDv1 query is received as per section 9.12. of
+ * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
+ * queries MUST be of exactly 24 octets.
+ */
+ if (v1_query)
+ mld_set_v1_mode(idev);
/* cancel MLDv2 report timer */
mld_gq_stop_timer(idev);
@@ -1280,10 +1285,6 @@ static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
unsigned long *max_delay)
{
- /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */
- if (mld_in_v1_mode(idev))
- return -EINVAL;
-
*max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
mld_update_qrv(idev, mld);
@@ -1340,8 +1341,11 @@ int igmp6_event_query(struct sk_buff *skb)
!(group_type&IPV6_ADDR_MULTICAST))
return -EINVAL;
- if (len == MLD_V1_QUERY_LEN) {
- err = mld_process_v1(idev, mld, &max_delay);
+ if (len < MLD_V1_QUERY_LEN) {
+ return -EINVAL;
+ } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
+ err = mld_process_v1(idev, mld, &max_delay,
+ len == MLD_V1_QUERY_LEN);
if (err < 0)
return err;
} else if (len >= MLD_V2_QUERY_LEN_MIN) {
@@ -1373,18 +1377,19 @@ int igmp6_event_query(struct sk_buff *skb)
mlh2 = (struct mld2_query *)skb_transport_header(skb);
mark = 1;
}
- } else
+ } else {
return -EINVAL;
+ }
read_lock_bh(&idev->lock);
if (group_type == IPV6_ADDR_ANY) {
- for (ma = idev->mc_list; ma; ma=ma->next) {
+ for (ma = idev->mc_list; ma; ma = ma->next) {
spin_lock_bh(&ma->mca_lock);
igmp6_group_queried(ma, max_delay);
spin_unlock_bh(&ma->mca_lock);
}
} else {
- for (ma = idev->mc_list; ma; ma=ma->next) {
+ for (ma = idev->mc_list; ma; ma = ma->next) {
if (!ipv6_addr_equal(group, &ma->mca_addr))
continue;
spin_lock_bh(&ma->mca_lock);
@@ -1448,7 +1453,7 @@ int igmp6_event_report(struct sk_buff *skb)
*/
read_lock_bh(&idev->lock);
- for (ma = idev->mc_list; ma; ma=ma->next) {
+ for (ma = idev->mc_list; ma; ma = ma->next) {
if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
spin_lock(&ma->mca_lock);
if (del_timer(&ma->mca_timer))
@@ -1512,7 +1517,7 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
struct ip6_sf_list *psf;
int scount = 0;
- for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
if (!is_in(pmc, psf, type, gdeleted, sdeleted))
continue;
scount++;
@@ -1726,7 +1731,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
}
first = 1;
psf_prev = NULL;
- for (psf=*psf_list; psf; psf=psf_next) {
+ for (psf = *psf_list; psf; psf = psf_next) {
struct in6_addr *psrc;
psf_next = psf->sf_next;
@@ -1805,7 +1810,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
read_lock_bh(&idev->lock);
if (!pmc) {
- for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
if (pmc->mca_flags & MAF_NOREPORT)
continue;
spin_lock_bh(&pmc->mca_lock);
@@ -1838,7 +1843,7 @@ static void mld_clear_zeros(struct ip6_sf_list **ppsf)
struct ip6_sf_list *psf_prev, *psf_next, *psf;
psf_prev = NULL;
- for (psf=*ppsf; psf; psf = psf_next) {
+ for (psf = *ppsf; psf; psf = psf_next) {
psf_next = psf->sf_next;
if (psf->sf_crcount == 0) {
if (psf_prev)
@@ -1862,7 +1867,7 @@ static void mld_send_cr(struct inet6_dev *idev)
/* deleted MCA's */
pmc_prev = NULL;
- for (pmc=idev->mc_tomb; pmc; pmc=pmc_next) {
+ for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) {
pmc_next = pmc->next;
if (pmc->mca_sfmode == MCAST_INCLUDE) {
type = MLD2_BLOCK_OLD_SOURCES;
@@ -1895,7 +1900,7 @@ static void mld_send_cr(struct inet6_dev *idev)
spin_unlock(&idev->mc_lock);
/* change recs */
- for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
spin_lock_bh(&pmc->mca_lock);
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
type = MLD2_BLOCK_OLD_SOURCES;
@@ -2032,7 +2037,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev)
skb = NULL;
read_lock_bh(&idev->lock);
- for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
spin_lock_bh(&pmc->mca_lock);
if (pmc->mca_sfcount[MCAST_EXCLUDE])
type = MLD2_CHANGE_TO_EXCLUDE;
@@ -2077,7 +2082,7 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
int rv = 0;
psf_prev = NULL;
- for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
break;
psf_prev = psf;
@@ -2118,7 +2123,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
if (!idev)
return -ENODEV;
read_lock_bh(&idev->lock);
- for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
break;
}
@@ -2138,7 +2143,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
pmc->mca_sfcount[sfmode]--;
}
err = 0;
- for (i=0; i<sfcount; i++) {
+ for (i = 0; i < sfcount; i++) {
int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
changerec |= rv > 0;
@@ -2154,7 +2159,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
pmc->mca_sfmode = MCAST_INCLUDE;
pmc->mca_crcount = idev->mc_qrv;
idev->mc_ifc_count = pmc->mca_crcount;
- for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
mld_ifc_event(pmc->idev);
} else if (sf_setstate(pmc) || changerec)
@@ -2173,7 +2178,7 @@ static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
struct ip6_sf_list *psf, *psf_prev;
psf_prev = NULL;
- for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
break;
psf_prev = psf;
@@ -2198,7 +2203,7 @@ static void sf_markstate(struct ifmcaddr6 *pmc)
struct ip6_sf_list *psf;
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
- for (psf=pmc->mca_sources; psf; psf=psf->sf_next)
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
psf->sf_oldin = mca_xcount ==
psf->sf_count[MCAST_EXCLUDE] &&
@@ -2215,7 +2220,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
int new_in, rv;
rv = 0;
- for (psf=pmc->mca_sources; psf; psf=psf->sf_next) {
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
!psf->sf_count[MCAST_INCLUDE];
@@ -2225,8 +2230,8 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
if (!psf->sf_oldin) {
struct ip6_sf_list *prev = NULL;
- for (dpsf=pmc->mca_tomb; dpsf;
- dpsf=dpsf->sf_next) {
+ for (dpsf = pmc->mca_tomb; dpsf;
+ dpsf = dpsf->sf_next) {
if (ipv6_addr_equal(&dpsf->sf_addr,
&psf->sf_addr))
break;
@@ -2248,7 +2253,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
* add or update "delete" records if an active filter
* is now inactive
*/
- for (dpsf=pmc->mca_tomb; dpsf; dpsf=dpsf->sf_next)
+ for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next)
if (ipv6_addr_equal(&dpsf->sf_addr,
&psf->sf_addr))
break;
@@ -2282,7 +2287,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
if (!idev)
return -ENODEV;
read_lock_bh(&idev->lock);
- for (pmc=idev->mc_list; pmc; pmc=pmc->next) {
+ for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
break;
}
@@ -2298,7 +2303,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
if (!delta)
pmc->mca_sfcount[sfmode]++;
err = 0;
- for (i=0; i<sfcount; i++) {
+ for (i = 0; i < sfcount; i++) {
err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
if (err)
break;
@@ -2308,7 +2313,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
if (!delta)
pmc->mca_sfcount[sfmode]--;
- for (j=0; j<i; j++)
+ for (j = 0; j < i; j++)
ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
struct ip6_sf_list *psf;
@@ -2322,7 +2327,7 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
pmc->mca_crcount = idev->mc_qrv;
idev->mc_ifc_count = pmc->mca_crcount;
- for (psf=pmc->mca_sources; psf; psf = psf->sf_next)
+ for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
mld_ifc_event(idev);
} else if (sf_setstate(pmc))
@@ -2336,12 +2341,12 @@ static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
{
struct ip6_sf_list *psf, *nextpsf;
- for (psf=pmc->mca_tomb; psf; psf=nextpsf) {
+ for (psf = pmc->mca_tomb; psf; psf = nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
pmc->mca_tomb = NULL;
- for (psf=pmc->mca_sources; psf; psf=nextpsf) {
+ for (psf = pmc->mca_sources; psf; psf = nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
@@ -2380,7 +2385,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
{
int err;
- /* callers have the socket lock and a write lock on ipv6_sk_mc_lock,
+ /* callers have the socket lock and rtnl lock
* so no other readers or writers of iml or its sflist
*/
if (!iml->sflist) {
@@ -2485,13 +2490,21 @@ void ipv6_mc_down(struct inet6_dev *idev)
mld_gq_stop_timer(idev);
mld_dad_stop_timer(idev);
- for (i = idev->mc_list; i; i=i->next)
+ for (i = idev->mc_list; i; i = i->next)
igmp6_group_dropped(i);
read_unlock_bh(&idev->lock);
mld_clear_delrec(idev);
}
+static void ipv6_mc_reset(struct inet6_dev *idev)
+{
+ idev->mc_qrv = sysctl_mld_qrv;
+ idev->mc_qi = MLD_QI_DEFAULT;
+ idev->mc_qri = MLD_QRI_DEFAULT;
+ idev->mc_v1_seen = 0;
+ idev->mc_maxdelay = unsolicited_report_interval(idev);
+}
/* Device going up */
@@ -2502,7 +2515,8 @@ void ipv6_mc_up(struct inet6_dev *idev)
/* Install multicast list, except for all-nodes (already installed) */
read_lock_bh(&idev->lock);
- for (i = idev->mc_list; i; i=i->next)
+ ipv6_mc_reset(idev);
+ for (i = idev->mc_list; i; i = i->next)
igmp6_group_added(i);
read_unlock_bh(&idev->lock);
}
@@ -2522,13 +2536,7 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
(unsigned long)idev);
setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
(unsigned long)idev);
-
- idev->mc_qrv = MLD_QRV_DEFAULT;
- idev->mc_qi = MLD_QI_DEFAULT;
- idev->mc_qri = MLD_QRI_DEFAULT;
-
- idev->mc_maxdelay = unsolicited_report_interval(idev);
- idev->mc_v1_seen = 0;
+ ipv6_mc_reset(idev);
write_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index db9b6cbc9db3..f61429d391d3 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -336,11 +336,10 @@ static void mip6_destopt_destroy(struct xfrm_state *x)
{
}
-static const struct xfrm_type mip6_destopt_type =
-{
+static const struct xfrm_type mip6_destopt_type = {
.description = "MIP6DESTOPT",
.owner = THIS_MODULE,
- .proto = IPPROTO_DSTOPTS,
+ .proto = IPPROTO_DSTOPTS,
.flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_LOCAL_COADDR,
.init_state = mip6_destopt_init_state,
.destructor = mip6_destopt_destroy,
@@ -469,11 +468,10 @@ static void mip6_rthdr_destroy(struct xfrm_state *x)
{
}
-static const struct xfrm_type mip6_rthdr_type =
-{
+static const struct xfrm_type mip6_rthdr_type = {
.description = "MIP6RT",
.owner = THIS_MODULE,
- .proto = IPPROTO_ROUTING,
+ .proto = IPPROTO_ROUTING,
.flags = XFRM_TYPE_NON_FRAGMENT | XFRM_TYPE_REMOTE_COADDR,
.init_state = mip6_rthdr_init_state,
.destructor = mip6_rthdr_destroy,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 339078f95d1b..4cb45c1079a2 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -175,7 +175,7 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
type = cur->nd_opt_type;
do {
cur = ((void *)cur) + (cur->nd_opt_len << 3);
- } while(cur < end && cur->nd_opt_type != type);
+ } while (cur < end && cur->nd_opt_type != type);
return cur <= end && cur->nd_opt_type == type ? cur : NULL;
}
@@ -192,7 +192,7 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
return NULL;
do {
cur = ((void *)cur) + (cur->nd_opt_len << 3);
- } while(cur < end && !ndisc_is_useropt(cur));
+ } while (cur < end && !ndisc_is_useropt(cur));
return cur <= end && ndisc_is_useropt(cur) ? cur : NULL;
}
@@ -284,7 +284,6 @@ int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev,
}
return -EINVAL;
}
-
EXPORT_SYMBOL(ndisc_mc_map);
static u32 ndisc_hash(const void *pkey,
@@ -296,7 +295,7 @@ static u32 ndisc_hash(const void *pkey,
static int ndisc_constructor(struct neighbour *neigh)
{
- struct in6_addr *addr = (struct in6_addr*)&neigh->primary_key;
+ struct in6_addr *addr = (struct in6_addr *)&neigh->primary_key;
struct net_device *dev = neigh->dev;
struct inet6_dev *in6_dev;
struct neigh_parms *parms;
@@ -344,7 +343,7 @@ static int ndisc_constructor(struct neighbour *neigh)
static int pndisc_constructor(struct pneigh_entry *n)
{
- struct in6_addr *addr = (struct in6_addr*)&n->key;
+ struct in6_addr *addr = (struct in6_addr *)&n->key;
struct in6_addr maddr;
struct net_device *dev = n->dev;
@@ -357,7 +356,7 @@ static int pndisc_constructor(struct pneigh_entry *n)
static void pndisc_destructor(struct pneigh_entry *n)
{
- struct in6_addr *addr = (struct in6_addr*)&n->key;
+ struct in6_addr *addr = (struct in6_addr *)&n->key;
struct in6_addr maddr;
struct net_device *dev = n->dev;
@@ -1065,7 +1064,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
int optlen;
unsigned int pref = 0;
- __u8 * opt = (__u8 *)(ra_msg + 1);
+ __u8 *opt = (__u8 *)(ra_msg + 1);
optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) -
sizeof(struct ra_msg);
@@ -1319,7 +1318,7 @@ skip_linkparms:
continue;
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
continue;
- rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
+ rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3,
&ipv6_hdr(skb)->saddr);
}
}
@@ -1352,7 +1351,7 @@ skip_routeinfo:
__be32 n;
u32 mtu;
- memcpy(&n, ((u8*)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu));
+ memcpy(&n, ((u8 *)(ndopts.nd_opts_mtu+1))+2, sizeof(mtu));
mtu = ntohl(n);
if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) {
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 2812816aabdc..6af874fc187f 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -40,18 +40,13 @@ config NFT_CHAIN_ROUTE_IPV6
fields such as the source, destination, flowlabel, hop-limit and
the packet mark.
-config NFT_CHAIN_NAT_IPV6
- depends on NF_TABLES_IPV6
- depends on NF_NAT_IPV6 && NFT_NAT
- tristate "IPv6 nf_tables nat chain support"
- help
- This option enables the "nat" chain for IPv6 in nf_tables. This
- chain type is used to perform Network Address Translation (NAT)
- packet transformations such as the source, destination address and
- source and destination ports.
+config NF_REJECT_IPV6
+ tristate "IPv6 packet rejection"
+ default m if NETFILTER_ADVANCED=n
config NFT_REJECT_IPV6
depends on NF_TABLES_IPV6
+ select NF_REJECT_IPV6
default NFT_REJECT
tristate
@@ -70,6 +65,34 @@ config NF_NAT_IPV6
forms of full Network Address Port Translation. This can be
controlled by iptables or nft.
+if NF_NAT_IPV6
+
+config NFT_CHAIN_NAT_IPV6
+ depends on NF_TABLES_IPV6
+ tristate "IPv6 nf_tables nat chain support"
+ help
+ This option enables the "nat" chain for IPv6 in nf_tables. This
+ chain type is used to perform Network Address Translation (NAT)
+ packet transformations such as the source, destination address and
+ source and destination ports.
+
+config NF_NAT_MASQUERADE_IPV6
+ tristate "IPv6 masquerade support"
+ help
+ This is the kernel functionality to provide NAT in the masquerade
+ flavour (automatic source address selection) for IPv6.
+
+config NFT_MASQ_IPV6
+ tristate "IPv6 masquerade support for nf_tables"
+ depends on NF_TABLES_IPV6
+ depends on NFT_MASQ
+ select NF_NAT_MASQUERADE_IPV6
+ help
+ This is the expression that provides IPv4 masquerading support for
+ nf_tables.
+
+endif # NF_NAT_IPV6
+
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6
@@ -190,6 +213,7 @@ config IP6_NF_FILTER
config IP6_NF_TARGET_REJECT
tristate "REJECT target support"
depends on IP6_NF_FILTER
+ select NF_REJECT_IPV6
default m if NETFILTER_ADVANCED=n
help
The REJECT target allows a filtering rule to specify that an ICMPv6
@@ -260,6 +284,7 @@ if IP6_NF_NAT
config IP6_NF_TARGET_MASQUERADE
tristate "MASQUERADE target support"
+ select NF_NAT_MASQUERADE_IPV6
help
Masquerading is a special case of NAT: all outgoing connections are
changed to seem to come from a particular interface's address, and
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index c3d3286db4bb..fbb25f01143c 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
+obj-$(CONFIG_NF_NAT_MASQUERADE_IPV6) += nf_nat_masquerade_ipv6.o
# defrag
nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
@@ -26,11 +27,15 @@ obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
# logging
obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o
+# reject
+obj-$(CONFIG_NF_REJECT_IPV6) += nf_reject_ipv6.o
+
# nf_tables
obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
+obj-$(CONFIG_NFT_MASQ_IPV6) += nft_masq_ipv6.o
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 3e4e92d5e157..7f9f45d829d2 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -19,33 +19,12 @@
#include <net/netfilter/nf_nat.h>
#include <net/addrconf.h>
#include <net/ipv6.h>
+#include <net/netfilter/ipv6/nf_nat_masquerade.h>
static unsigned int
masquerade_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
- const struct nf_nat_range *range = par->targinfo;
- enum ip_conntrack_info ctinfo;
- struct in6_addr src;
- struct nf_conn *ct;
- struct nf_nat_range newrange;
-
- ct = nf_ct_get(skb, &ctinfo);
- NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
- ctinfo == IP_CT_RELATED_REPLY));
-
- if (ipv6_dev_get_saddr(dev_net(par->out), par->out,
- &ipv6_hdr(skb)->daddr, 0, &src) < 0)
- return NF_DROP;
-
- nfct_nat(ct)->masq_index = par->out->ifindex;
-
- newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
- newrange.min_addr.in6 = src;
- newrange.max_addr.in6 = src;
- newrange.min_proto = range->min_proto;
- newrange.max_proto = range->max_proto;
-
- return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+ return nf_nat_masquerade_ipv6(skb, par->targinfo, par->out);
}
static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
@@ -57,48 +36,6 @@ static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par)
return 0;
}
-static int device_cmp(struct nf_conn *ct, void *ifindex)
-{
- const struct nf_conn_nat *nat = nfct_nat(ct);
-
- if (!nat)
- return 0;
- if (nf_ct_l3num(ct) != NFPROTO_IPV6)
- return 0;
- return nat->masq_index == (int)(long)ifindex;
-}
-
-static int masq_device_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct net *net = dev_net(dev);
-
- if (event == NETDEV_DOWN)
- nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex, 0, 0);
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block masq_dev_notifier = {
- .notifier_call = masq_device_event,
-};
-
-static int masq_inet_event(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct inet6_ifaddr *ifa = ptr;
- struct netdev_notifier_info info;
-
- netdev_notifier_info_init(&info, ifa->idev->dev);
- return masq_device_event(this, event, &info);
-}
-
-static struct notifier_block masq_inet_notifier = {
- .notifier_call = masq_inet_event,
-};
-
static struct xt_target masquerade_tg6_reg __read_mostly = {
.name = "MASQUERADE",
.family = NFPROTO_IPV6,
@@ -115,17 +52,14 @@ static int __init masquerade_tg6_init(void)
int err;
err = xt_register_target(&masquerade_tg6_reg);
- if (err == 0) {
- register_netdevice_notifier(&masq_dev_notifier);
- register_inet6addr_notifier(&masq_inet_notifier);
- }
+ if (err == 0)
+ nf_nat_masquerade_ipv6_register_notifier();
return err;
}
static void __exit masquerade_tg6_exit(void)
{
- unregister_inet6addr_notifier(&masq_inet_notifier);
- unregister_netdevice_notifier(&masq_dev_notifier);
+ nf_nat_masquerade_ipv6_unregister_notifier();
xt_unregister_target(&masquerade_tg6_reg);
}
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 387d8b8fc18d..b0634ac996b7 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -30,222 +30,57 @@ static const struct xt_table nf_nat_ipv6_table = {
.af = NFPROTO_IPV6,
};
-static unsigned int alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
-{
- /* Force range to this IP; let proto decide mapping for
- * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
- */
- struct nf_nat_range range;
-
- range.flags = 0;
- pr_debug("Allocating NULL binding for %p (%pI6)\n", ct,
- HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6 :
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6);
-
- return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
-}
-
-static unsigned int nf_nat_rule_find(struct sk_buff *skb, unsigned int hooknum,
- const struct net_device *in,
- const struct net_device *out,
- struct nf_conn *ct)
+static unsigned int ip6table_nat_do_chain(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- unsigned int ret;
- ret = ip6t_do_table(skb, hooknum, in, out, net->ipv6.ip6table_nat);
- if (ret == NF_ACCEPT) {
- if (!nf_nat_initialized(ct, HOOK2MANIP(hooknum)))
- ret = alloc_null_binding(ct, hooknum);
- }
- return ret;
+ return ip6t_do_table(skb, ops->hooknum, in, out, net->ipv6.ip6table_nat);
}
-static unsigned int
-nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int ip6table_nat_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- struct nf_conn_nat *nat;
- enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
- __be16 frag_off;
- int hdrlen;
- u8 nexthdr;
-
- ct = nf_ct_get(skb, &ctinfo);
- /* Can't track? It's not due to stress, or conntrack would
- * have dropped it. Hence it's the user's responsibilty to
- * packet filter it out, or implement conntrack/NAT for that
- * protocol. 8) --RR
- */
- if (!ct)
- return NF_ACCEPT;
-
- /* Don't try to NAT if this packet is not conntracked */
- if (nf_ct_is_untracked(ct))
- return NF_ACCEPT;
-
- nat = nf_ct_nat_ext_add(ct);
- if (nat == NULL)
- return NF_ACCEPT;
-
- switch (ctinfo) {
- case IP_CT_RELATED:
- case IP_CT_RELATED_REPLY:
- nexthdr = ipv6_hdr(skb)->nexthdr;
- hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
- &nexthdr, &frag_off);
-
- if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
- if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
- ops->hooknum,
- hdrlen))
- return NF_DROP;
- else
- return NF_ACCEPT;
- }
- /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
- case IP_CT_NEW:
- /* Seen it before? This can happen for loopback, retrans,
- * or local packets.
- */
- if (!nf_nat_initialized(ct, maniptype)) {
- unsigned int ret;
-
- ret = nf_nat_rule_find(skb, ops->hooknum, in, out, ct);
- if (ret != NF_ACCEPT)
- return ret;
- } else {
- pr_debug("Already setup manip %s for ct %p\n",
- maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
- ct);
- if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
- goto oif_changed;
- }
- break;
-
- default:
- /* ESTABLISHED */
- NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
- ctinfo == IP_CT_ESTABLISHED_REPLY);
- if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
- goto oif_changed;
- }
-
- return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
-
-oif_changed:
- nf_ct_kill_acct(ct, ctinfo, skb);
- return NF_DROP;
+ return nf_nat_ipv6_fn(ops, skb, in, out, ip6table_nat_do_chain);
}
-static unsigned int
-nf_nat_ipv6_in(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int ip6table_nat_in(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- unsigned int ret;
- struct in6_addr daddr = ipv6_hdr(skb)->daddr;
-
- ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
- skb_dst_drop(skb);
-
- return ret;
+ return nf_nat_ipv6_in(ops, skb, in, out, ip6table_nat_do_chain);
}
-static unsigned int
-nf_nat_ipv6_out(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int ip6table_nat_out(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
-#ifdef CONFIG_XFRM
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- int err;
-#endif
- unsigned int ret;
-
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct ipv6hdr))
- return NF_ACCEPT;
-
- ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
-#ifdef CONFIG_XFRM
- if (ret != NF_DROP && ret != NF_STOLEN &&
- !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
- &ct->tuplehash[!dir].tuple.dst.u3) ||
- (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
- ct->tuplehash[dir].tuple.src.u.all !=
- ct->tuplehash[!dir].tuple.dst.u.all)) {
- err = nf_xfrm_me_harder(skb, AF_INET6);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
- }
-#endif
- return ret;
+ return nf_nat_ipv6_out(ops, skb, in, out, ip6table_nat_do_chain);
}
-static unsigned int
-nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int ip6table_nat_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- unsigned int ret;
- int err;
-
- /* root is playing with raw sockets. */
- if (skb->len < sizeof(struct ipv6hdr))
- return NF_ACCEPT;
-
- ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
- &ct->tuplehash[!dir].tuple.src.u3)) {
- err = ip6_route_me_harder(skb);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
-#ifdef CONFIG_XFRM
- else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
- ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
- ct->tuplehash[dir].tuple.dst.u.all !=
- ct->tuplehash[!dir].tuple.src.u.all) {
- err = nf_xfrm_me_harder(skb, AF_INET6);
- if (err < 0)
- ret = NF_DROP_ERR(err);
- }
-#endif
- }
- return ret;
+ return nf_nat_ipv6_local_fn(ops, skb, in, out, ip6table_nat_do_chain);
}
static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
/* Before packet filtering, change destination */
{
- .hook = nf_nat_ipv6_in,
+ .hook = ip6table_nat_in,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
@@ -253,7 +88,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
},
/* After packet filtering, change source */
{
- .hook = nf_nat_ipv6_out,
+ .hook = ip6table_nat_out,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
@@ -261,7 +96,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
},
/* Before packet filtering, change destination */
{
- .hook = nf_nat_ipv6_local_fn,
+ .hook = ip6table_nat_local_fn,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
@@ -269,7 +104,7 @@ static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = {
},
/* After packet filtering, change source */
{
- .hook = nf_nat_ipv6_fn,
+ .hook = ip6table_nat_fn,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 7b9a748c6bac..e70382e4dfb5 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -40,7 +40,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
zone = nf_ct_zone((struct nf_conn *)skb->nfct);
#endif
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (skb->nf_bridge &&
skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)
return IP6_DEFRAG_CONNTRACK_BRIDGE_IN + zone;
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
index fc8e49b2ff3e..c5812e1c1ffb 100644
--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
@@ -261,6 +261,205 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation);
+unsigned int
+nf_nat_ipv6_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct))
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn_nat *nat;
+ enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
+ __be16 frag_off;
+ int hdrlen;
+ u8 nexthdr;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ /* Can't track? It's not due to stress, or conntrack would
+ * have dropped it. Hence it's the user's responsibilty to
+ * packet filter it out, or implement conntrack/NAT for that
+ * protocol. 8) --RR
+ */
+ if (!ct)
+ return NF_ACCEPT;
+
+ /* Don't try to NAT if this packet is not conntracked */
+ if (nf_ct_is_untracked(ct))
+ return NF_ACCEPT;
+
+ nat = nf_ct_nat_ext_add(ct);
+ if (nat == NULL)
+ return NF_ACCEPT;
+
+ switch (ctinfo) {
+ case IP_CT_RELATED:
+ case IP_CT_RELATED_REPLY:
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
+ &nexthdr, &frag_off);
+
+ if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
+ if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
+ ops->hooknum,
+ hdrlen))
+ return NF_DROP;
+ else
+ return NF_ACCEPT;
+ }
+ /* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
+ case IP_CT_NEW:
+ /* Seen it before? This can happen for loopback, retrans,
+ * or local packets.
+ */
+ if (!nf_nat_initialized(ct, maniptype)) {
+ unsigned int ret;
+
+ ret = do_chain(ops, skb, in, out, ct);
+ if (ret != NF_ACCEPT)
+ return ret;
+
+ if (nf_nat_initialized(ct, HOOK2MANIP(ops->hooknum)))
+ break;
+
+ ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
+ if (ret != NF_ACCEPT)
+ return ret;
+ } else {
+ pr_debug("Already setup manip %s for ct %p\n",
+ maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
+ ct);
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+ goto oif_changed;
+ }
+ break;
+
+ default:
+ /* ESTABLISHED */
+ NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
+ ctinfo == IP_CT_ESTABLISHED_REPLY);
+ if (nf_nat_oif_changed(ops->hooknum, ctinfo, nat, out))
+ goto oif_changed;
+ }
+
+ return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+
+oif_changed:
+ nf_ct_kill_acct(ct, ctinfo, skb);
+ return NF_DROP;
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv6_fn);
+
+unsigned int
+nf_nat_ipv6_in(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct))
+{
+ unsigned int ret;
+ struct in6_addr daddr = ipv6_hdr(skb)->daddr;
+
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
+ skb_dst_drop(skb);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv6_in);
+
+unsigned int
+nf_nat_ipv6_out(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct))
+{
+#ifdef CONFIG_XFRM
+ const struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ int err;
+#endif
+ unsigned int ret;
+
+ /* root is playing with raw sockets. */
+ if (skb->len < sizeof(struct ipv6hdr))
+ return NF_ACCEPT;
+
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+#ifdef CONFIG_XFRM
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
+ &ct->tuplehash[!dir].tuple.dst.u3) ||
+ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
+ ct->tuplehash[dir].tuple.src.u.all !=
+ ct->tuplehash[!dir].tuple.dst.u.all)) {
+ err = nf_xfrm_me_harder(skb, AF_INET6);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+ }
+#endif
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv6_out);
+
+unsigned int
+nf_nat_ipv6_local_fn(const struct nf_hook_ops *ops, struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int (*do_chain)(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct))
+{
+ const struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ unsigned int ret;
+ int err;
+
+ /* root is playing with raw sockets. */
+ if (skb->len < sizeof(struct ipv6hdr))
+ return NF_ACCEPT;
+
+ ret = nf_nat_ipv6_fn(ops, skb, in, out, do_chain);
+ if (ret != NF_DROP && ret != NF_STOLEN &&
+ (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+
+ if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
+ &ct->tuplehash[!dir].tuple.src.u3)) {
+ err = ip6_route_me_harder(skb);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+#ifdef CONFIG_XFRM
+ else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
+ ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 &&
+ ct->tuplehash[dir].tuple.dst.u.all !=
+ ct->tuplehash[!dir].tuple.src.u.all) {
+ err = nf_xfrm_me_harder(skb, AF_INET6);
+ if (err < 0)
+ ret = NF_DROP_ERR(err);
+ }
+#endif
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_nat_ipv6_local_fn);
+
static int __init nf_nat_l3proto_ipv6_init(void)
{
int err;
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
new file mode 100644
index 000000000000..7745609665cd
--- /dev/null
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2011 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on Rusty Russell's IPv6 MASQUERADE target. Development of IPv6
+ * NAT funded by Astaro.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/netfilter/ipv6/nf_nat_masquerade.h>
+
+unsigned int
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
+ const struct net_device *out)
+{
+ enum ip_conntrack_info ctinfo;
+ struct in6_addr src;
+ struct nf_conn *ct;
+ struct nf_nat_range newrange;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
+ ctinfo == IP_CT_RELATED_REPLY));
+
+ if (ipv6_dev_get_saddr(dev_net(out), out,
+ &ipv6_hdr(skb)->daddr, 0, &src) < 0)
+ return NF_DROP;
+
+ nfct_nat(ct)->masq_index = out->ifindex;
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS;
+ newrange.min_addr.in6 = src;
+ newrange.max_addr.in6 = src;
+ newrange.min_proto = range->min_proto;
+ newrange.max_proto = range->max_proto;
+
+ return nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
+
+static int device_cmp(struct nf_conn *ct, void *ifindex)
+{
+ const struct nf_conn_nat *nat = nfct_nat(ct);
+
+ if (!nat)
+ return 0;
+ if (nf_ct_l3num(ct) != NFPROTO_IPV6)
+ return 0;
+ return nat->masq_index == (int)(long)ifindex;
+}
+
+static int masq_device_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ const struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+
+ if (event == NETDEV_DOWN)
+ nf_ct_iterate_cleanup(net, device_cmp,
+ (void *)(long)dev->ifindex, 0, 0);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block masq_dev_notifier = {
+ .notifier_call = masq_device_event,
+};
+
+static int masq_inet_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct inet6_ifaddr *ifa = ptr;
+ struct netdev_notifier_info info;
+
+ netdev_notifier_info_init(&info, ifa->idev->dev);
+ return masq_device_event(this, event, &info);
+}
+
+static struct notifier_block masq_inet_notifier = {
+ .notifier_call = masq_inet_event,
+};
+
+static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
+
+void nf_nat_masquerade_ipv6_register_notifier(void)
+{
+ /* check if the notifier is already set */
+ if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
+ return;
+
+ register_netdevice_notifier(&masq_dev_notifier);
+ register_inet6addr_notifier(&masq_inet_notifier);
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
+
+void nf_nat_masquerade_ipv6_unregister_notifier(void)
+{
+ /* check if the notifier still has clients */
+ if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
+ return;
+
+ unregister_inet6addr_notifier(&masq_inet_notifier);
+ unregister_netdevice_notifier(&masq_dev_notifier);
+}
+EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
new file mode 100644
index 000000000000..5f5f0438d74d
--- /dev/null
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -0,0 +1,163 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_checksum.h>
+#include <linux/netfilter_ipv6.h>
+
+void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
+{
+ struct sk_buff *nskb;
+ struct tcphdr otcph, *tcph;
+ unsigned int otcplen, hh_len;
+ int tcphoff, needs_ack;
+ const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
+ struct ipv6hdr *ip6h;
+#define DEFAULT_TOS_VALUE 0x0U
+ const __u8 tclass = DEFAULT_TOS_VALUE;
+ struct dst_entry *dst = NULL;
+ u8 proto;
+ __be16 frag_off;
+ struct flowi6 fl6;
+
+ if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
+ (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
+ pr_debug("addr is not unicast.\n");
+ return;
+ }
+
+ proto = oip6h->nexthdr;
+ tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);
+
+ if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
+ pr_debug("Cannot get TCP header.\n");
+ return;
+ }
+
+ otcplen = oldskb->len - tcphoff;
+
+ /* IP header checks: fragment, too short. */
+ if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
+ pr_debug("proto(%d) != IPPROTO_TCP, "
+ "or too short. otcplen = %d\n",
+ proto, otcplen);
+ return;
+ }
+
+ if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
+ BUG();
+
+ /* No RST for RST. */
+ if (otcph.rst) {
+ pr_debug("RST is set\n");
+ return;
+ }
+
+ /* Check checksum. */
+ if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
+ pr_debug("TCP checksum is invalid\n");
+ return;
+ }
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.saddr = oip6h->daddr;
+ fl6.daddr = oip6h->saddr;
+ fl6.fl6_sport = otcph.dest;
+ fl6.fl6_dport = otcph.source;
+ security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst == NULL || dst->error) {
+ dst_release(dst);
+ return;
+ }
+ dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+ if (IS_ERR(dst))
+ return;
+
+ hh_len = (dst->dev->hard_header_len + 15)&~15;
+ nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
+ + sizeof(struct tcphdr) + dst->trailer_len,
+ GFP_ATOMIC);
+
+ if (!nskb) {
+ net_dbg_ratelimited("cannot alloc skb\n");
+ dst_release(dst);
+ return;
+ }
+
+ skb_dst_set(nskb, dst);
+
+ skb_reserve(nskb, hh_len + dst->header_len);
+
+ skb_put(nskb, sizeof(struct ipv6hdr));
+ skb_reset_network_header(nskb);
+ ip6h = ipv6_hdr(nskb);
+ ip6_flow_hdr(ip6h, tclass, 0);
+ ip6h->hop_limit = ip6_dst_hoplimit(dst);
+ ip6h->nexthdr = IPPROTO_TCP;
+ ip6h->saddr = oip6h->daddr;
+ ip6h->daddr = oip6h->saddr;
+
+ skb_reset_transport_header(nskb);
+ tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
+ /* Truncate to length (no data) */
+ tcph->doff = sizeof(struct tcphdr)/4;
+ tcph->source = otcph.dest;
+ tcph->dest = otcph.source;
+
+ if (otcph.ack) {
+ needs_ack = 0;
+ tcph->seq = otcph.ack_seq;
+ tcph->ack_seq = 0;
+ } else {
+ needs_ack = 1;
+ tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
+ + otcplen - (otcph.doff<<2));
+ tcph->seq = 0;
+ }
+
+ /* Reset flags */
+ ((u_int8_t *)tcph)[13] = 0;
+ tcph->rst = 1;
+ tcph->ack = needs_ack;
+ tcph->window = 0;
+ tcph->urg_ptr = 0;
+ tcph->check = 0;
+
+ /* Adjust TCP checksum */
+ tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
+ &ipv6_hdr(nskb)->daddr,
+ sizeof(struct tcphdr), IPPROTO_TCP,
+ csum_partial(tcph,
+ sizeof(struct tcphdr), 0));
+
+ nf_ct_attach(nskb, oldskb);
+
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ /* If we use ip6_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ nskb->protocol = htons(ETH_P_IPV6);
+ ip6h->payload_len = htons(sizeof(struct tcphdr));
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ return;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip6_local_out(nskb);
+}
+EXPORT_SYMBOL_GPL(nf_send_reset6);
diff --git a/net/ipv6/netfilter/nft_chain_nat_ipv6.c b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
index d189fcb437fe..1c4b75dd425b 100644
--- a/net/ipv6/netfilter/nft_chain_nat_ipv6.c
+++ b/net/ipv6/netfilter/nft_chain_nat_ipv6.c
@@ -24,144 +24,53 @@
#include <net/netfilter/nf_nat_l3proto.h>
#include <net/ipv6.h>
-/*
- * IPv6 NAT chains
- */
-
-static unsigned int nf_nat_ipv6_fn(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int nft_nat_do_chain(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ struct nf_conn *ct)
{
- enum ip_conntrack_info ctinfo;
- struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_nat *nat;
- enum nf_nat_manip_type maniptype = HOOK2MANIP(ops->hooknum);
- __be16 frag_off;
- int hdrlen;
- u8 nexthdr;
struct nft_pktinfo pkt;
- unsigned int ret;
-
- if (ct == NULL || nf_ct_is_untracked(ct))
- return NF_ACCEPT;
-
- nat = nf_ct_nat_ext_add(ct);
- if (nat == NULL)
- return NF_ACCEPT;
-
- switch (ctinfo) {
- case IP_CT_RELATED:
- case IP_CT_RELATED + IP_CT_IS_REPLY:
- nexthdr = ipv6_hdr(skb)->nexthdr;
- hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
- &nexthdr, &frag_off);
-
- if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
- if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo,
- ops->hooknum,
- hdrlen))
- return NF_DROP;
- else
- return NF_ACCEPT;
- }
- /* Fall through */
- case IP_CT_NEW:
- if (nf_nat_initialized(ct, maniptype))
- break;
-
- nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
- ret = nft_do_chain(&pkt, ops);
- if (ret != NF_ACCEPT)
- return ret;
- if (!nf_nat_initialized(ct, maniptype)) {
- ret = nf_nat_alloc_null_binding(ct, ops->hooknum);
- if (ret != NF_ACCEPT)
- return ret;
- }
- default:
- break;
- }
+ nft_set_pktinfo_ipv6(&pkt, ops, skb, in, out);
- return nf_nat_packet(ct, ctinfo, ops->hooknum, skb);
+ return nft_do_chain(&pkt, ops);
}
-static unsigned int nf_nat_ipv6_prerouting(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int nft_nat_ipv6_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- struct in6_addr daddr = ipv6_hdr(skb)->daddr;
- unsigned int ret;
-
- ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr))
- skb_dst_drop(skb);
-
- return ret;
+ return nf_nat_ipv6_fn(ops, skb, in, out, nft_nat_do_chain);
}
-static unsigned int nf_nat_ipv6_postrouting(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int nft_nat_ipv6_in(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- enum ip_conntrack_info ctinfo __maybe_unused;
- const struct nf_conn *ct __maybe_unused;
- unsigned int ret;
-
- ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
-#ifdef CONFIG_XFRM
- if (ret != NF_DROP && ret != NF_STOLEN &&
- !(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-
- if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
- &ct->tuplehash[!dir].tuple.dst.u3) ||
- (ct->tuplehash[dir].tuple.src.u.all !=
- ct->tuplehash[!dir].tuple.dst.u.all))
- if (nf_xfrm_me_harder(skb, AF_INET6) < 0)
- ret = NF_DROP;
- }
-#endif
- return ret;
+ return nf_nat_ipv6_in(ops, skb, in, out, nft_nat_do_chain);
}
-static unsigned int nf_nat_ipv6_output(const struct nf_hook_ops *ops,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int nft_nat_ipv6_out(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
- enum ip_conntrack_info ctinfo;
- const struct nf_conn *ct;
- unsigned int ret;
-
- ret = nf_nat_ipv6_fn(ops, skb, in, out, okfn);
- if (ret != NF_DROP && ret != NF_STOLEN &&
- (ct = nf_ct_get(skb, &ctinfo)) != NULL) {
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ return nf_nat_ipv6_out(ops, skb, in, out, nft_nat_do_chain);
+}
- if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
- &ct->tuplehash[!dir].tuple.src.u3)) {
- if (ip6_route_me_harder(skb))
- ret = NF_DROP;
- }
-#ifdef CONFIG_XFRM
- else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) &&
- ct->tuplehash[dir].tuple.dst.u.all !=
- ct->tuplehash[!dir].tuple.src.u.all)
- if (nf_xfrm_me_harder(skb, AF_INET6))
- ret = NF_DROP;
-#endif
- }
- return ret;
+static unsigned int nft_nat_ipv6_local_fn(const struct nf_hook_ops *ops,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return nf_nat_ipv6_local_fn(ops, skb, in, out, nft_nat_do_chain);
}
static const struct nf_chain_type nft_chain_nat_ipv6 = {
@@ -174,10 +83,10 @@ static const struct nf_chain_type nft_chain_nat_ipv6 = {
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_LOCAL_IN),
.hooks = {
- [NF_INET_PRE_ROUTING] = nf_nat_ipv6_prerouting,
- [NF_INET_POST_ROUTING] = nf_nat_ipv6_postrouting,
- [NF_INET_LOCAL_OUT] = nf_nat_ipv6_output,
- [NF_INET_LOCAL_IN] = nf_nat_ipv6_fn,
+ [NF_INET_PRE_ROUTING] = nft_nat_ipv6_in,
+ [NF_INET_POST_ROUTING] = nft_nat_ipv6_out,
+ [NF_INET_LOCAL_OUT] = nft_nat_ipv6_local_fn,
+ [NF_INET_LOCAL_IN] = nft_nat_ipv6_fn,
},
};
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
new file mode 100644
index 000000000000..556262f40761
--- /dev/null
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nft_masq.h>
+#include <net/netfilter/ipv6/nf_nat_masquerade.h>
+
+static void nft_masq_ipv6_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_masq *priv = nft_expr_priv(expr);
+ struct nf_nat_range range;
+ unsigned int verdict;
+
+ range.flags = priv->flags;
+
+ verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out);
+
+ data[NFT_REG_VERDICT].verdict = verdict;
+}
+
+static struct nft_expr_type nft_masq_ipv6_type;
+static const struct nft_expr_ops nft_masq_ipv6_ops = {
+ .type = &nft_masq_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_masq)),
+ .eval = nft_masq_ipv6_eval,
+ .init = nft_masq_init,
+ .dump = nft_masq_dump,
+};
+
+static struct nft_expr_type nft_masq_ipv6_type __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .name = "masq",
+ .ops = &nft_masq_ipv6_ops,
+ .policy = nft_masq_policy,
+ .maxattr = NFTA_MASQ_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_masq_ipv6_module_init(void)
+{
+ int ret;
+
+ ret = nft_register_expr(&nft_masq_ipv6_type);
+ if (ret < 0)
+ return ret;
+
+ nf_nat_masquerade_ipv6_register_notifier();
+
+ return ret;
+}
+
+static void __exit nft_masq_ipv6_module_exit(void)
+{
+ nft_unregister_expr(&nft_masq_ipv6_type);
+ nf_nat_masquerade_ipv6_unregister_notifier();
+}
+
+module_init(nft_masq_ipv6_module_init);
+module_exit(nft_masq_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "masq");
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 5ec867e4a8b7..fc24c390af05 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -35,7 +35,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
if (found_rhdr)
return offset;
break;
- default :
+ default:
return offset;
}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 2d6f860e5c1e..1752cd0b4882 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -8,7 +8,7 @@
* except it reports the sockets in the INET6 address family.
*
* Authors: David S. Miller (davem@caip.rutgers.edu)
- * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+ * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index e048cf1bb6a2..e3770abe688a 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -51,6 +51,7 @@ EXPORT_SYMBOL(inet6_del_protocol);
#endif
const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS] __read_mostly;
+EXPORT_SYMBOL(inet6_offloads);
int inet6_add_offload(const struct net_offload *prot, unsigned char protocol)
{
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 39d44226e402..896af8807979 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -889,7 +889,7 @@ back_from_confirm:
else {
lock_sock(sk);
err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
- len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info*)dst,
+ len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
if (err)
@@ -902,7 +902,7 @@ done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
- return err<0?err:len;
+ return err < 0 ? err : len;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
@@ -1045,7 +1045,7 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
struct raw6_sock *rp = raw6_sk(sk);
int val, len;
- if (get_user(len,optlen))
+ if (get_user(len, optlen))
return -EFAULT;
switch (optname) {
@@ -1069,7 +1069,7 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval,&val,len))
+ if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index c6557d9f7808..1a157ca2ebc1 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -62,13 +62,12 @@
static const char ip6_frag_cache_name[] = "ip6-frags";
-struct ip6frag_skb_cb
-{
+struct ip6frag_skb_cb {
struct inet6_skb_parm h;
int offset;
};
-#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
+#define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb))
static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
{
@@ -289,7 +288,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
goto found;
}
prev = NULL;
- for(next = fq->q.fragments; next != NULL; next = next->next) {
+ for (next = fq->q.fragments; next != NULL; next = next->next) {
if (FRAG6_CB(next)->offset >= offset)
break; /* bingo! */
prev = next;
@@ -529,7 +528,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
/* Jumbo payload inhibits frag. header */
- if (hdr->payload_len==0)
+ if (hdr->payload_len == 0)
goto fail_hdr;
if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
@@ -575,8 +574,7 @@ fail_hdr:
return -1;
}
-static const struct inet6_protocol frag_protocol =
-{
+static const struct inet6_protocol frag_protocol = {
.handler = ipv6_frag_rcv,
.flags = INET6_PROTO_NOPOLICY,
};
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bafde82324c5..a318dd89b6d9 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -812,7 +812,7 @@ out:
}
-struct dst_entry * ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
int flags)
{
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
@@ -842,7 +842,6 @@ struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
return NULL;
}
-
EXPORT_SYMBOL(rt6_lookup);
/* ip6_ins_rt is called with FREE table->tb6_lock.
@@ -1023,7 +1022,7 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
}
-struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
+struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
struct flowi6 *fl6)
{
int flags = 0;
@@ -1040,7 +1039,6 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
}
-
EXPORT_SYMBOL(ip6_route_output);
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
@@ -1145,7 +1143,7 @@ static void ip6_link_failure(struct sk_buff *skb)
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
- struct rt6_info *rt6 = (struct rt6_info*)dst;
+ struct rt6_info *rt6 = (struct rt6_info *)dst;
dst_confirm(dst);
if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
@@ -1920,7 +1918,7 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
return NULL;
read_lock_bh(&table->tb6_lock);
- fn = fib6_locate(&table->tb6_root, prefix ,prefixlen, NULL, 0);
+ fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
if (!fn)
goto out;
@@ -1979,7 +1977,7 @@ struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_dev
return NULL;
read_lock_bh(&table->tb6_lock);
- for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
+ for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
if (dev == rt->dst.dev &&
((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
ipv6_addr_equal(&rt->rt6i_gateway, addr))
@@ -2064,7 +2062,7 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
struct in6_rtmsg rtmsg;
int err;
- switch(cmd) {
+ switch (cmd) {
case SIOCADDRT: /* Add a route */
case SIOCDELRT: /* Delete a route */
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -2187,7 +2185,7 @@ int ip6_route_get_saddr(struct net *net,
unsigned int prefs,
struct in6_addr *saddr)
{
- struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
+ struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
int err = 0;
if (rt->rt6i_prefsrc.plen)
*saddr = rt->rt6i_prefsrc.addr;
@@ -2482,7 +2480,7 @@ beginning:
return last_err;
}
-static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
+static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct fib6_config cfg;
int err;
@@ -2497,7 +2495,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
return ip6_route_del(&cfg);
}
-static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh)
+static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct fib6_config cfg;
int err;
@@ -2689,7 +2687,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
prefix, 0, NLM_F_MULTI);
}
-static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
+static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[RTA_MAX+1];
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 6163f851dc01..6eab37cf5345 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -812,9 +812,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
const struct ipv6hdr *iph6 = ipv6_hdr(skb);
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
- struct rtable *rt; /* Route to the other host */
- struct net_device *tdev; /* Device to other host */
- unsigned int max_headroom; /* The extra header space needed */
+ struct rtable *rt; /* Route to the other host */
+ struct net_device *tdev; /* Device to other host */
+ unsigned int max_headroom; /* The extra header space needed */
__be32 dst = tiph->daddr;
struct flowi4 fl4;
int mtu;
@@ -822,6 +822,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
int addr_type;
u8 ttl;
int err;
+ u8 protocol = IPPROTO_IPV6;
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
if (skb->protocol != htons(ETH_P_IPV6))
goto tx_error;
@@ -911,8 +913,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
goto tx_error;
}
+ skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
+ if (IS_ERR(skb)) {
+ ip_rt_put(rt);
+ goto out;
+ }
+
if (df) {
- mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
+ mtu = dst_mtu(&rt->dst) - t_hlen;
if (mtu < 68) {
dev->stats.collisions++;
@@ -947,7 +955,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
- max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr);
+ max_headroom = LL_RESERVED_SPACE(tdev) + t_hlen;
if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
@@ -969,14 +977,15 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
ttl = iph6->hop_limit;
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
- skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
- if (IS_ERR(skb)) {
+ if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) {
ip_rt_put(rt);
- goto out;
+ goto tx_error;
}
+ skb_set_inner_ipproto(skb, IPPROTO_IPV6);
+
err = iptunnel_xmit(skb->sk, rt, skb, fl4.saddr, fl4.daddr,
- IPPROTO_IPV6, tos, ttl, df,
+ protocol, tos, ttl, df,
!net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
@@ -999,6 +1008,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (IS_ERR(skb))
goto out;
+ skb_set_inner_ipproto(skb, IPPROTO_IPIP);
+
ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
return NETDEV_TX_OK;
out:
@@ -1059,8 +1070,10 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev) {
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+
dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
- dev->mtu = tdev->mtu - sizeof(struct iphdr);
+ dev->mtu = tdev->mtu - t_hlen;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
@@ -1123,7 +1136,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
#endif
static int
-ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
+ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip_tunnel_parm p;
@@ -1307,7 +1320,10 @@ done:
static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
{
- if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - sizeof(struct iphdr))
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+
+ if (new_mtu < IPV6_MIN_MTU || new_mtu > 0xFFF8 - t_hlen)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
@@ -1338,14 +1354,17 @@ static void ipip6_dev_free(struct net_device *dev)
static void ipip6_tunnel_setup(struct net_device *dev)
{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+
dev->netdev_ops = &ipip6_netdev_ops;
- dev->destructor = ipip6_dev_free;
+ dev->destructor = ipip6_dev_free;
dev->type = ARPHRD_SIT;
- dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
- dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
+ dev->hard_header_len = LL_MAX_HEADER + t_hlen;
+ dev->mtu = ETH_DATA_LEN - t_hlen;
dev->flags = IFF_NOARP;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_LLTX;
@@ -1466,6 +1485,40 @@ static void ipip6_netlink_parms(struct nlattr *data[],
}
+/* This function returns true when ENCAP attributes are present in the nl msg */
+static bool ipip6_netlink_encap_parms(struct nlattr *data[],
+ struct ip_tunnel_encap *ipencap)
+{
+ bool ret = false;
+
+ memset(ipencap, 0, sizeof(*ipencap));
+
+ if (!data)
+ return ret;
+
+ if (data[IFLA_IPTUN_ENCAP_TYPE]) {
+ ret = true;
+ ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
+ ret = true;
+ ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_SPORT]) {
+ ret = true;
+ ipencap->sport = nla_get_u16(data[IFLA_IPTUN_ENCAP_SPORT]);
+ }
+
+ if (data[IFLA_IPTUN_ENCAP_DPORT]) {
+ ret = true;
+ ipencap->dport = nla_get_u16(data[IFLA_IPTUN_ENCAP_DPORT]);
+ }
+
+ return ret;
+}
+
#ifdef CONFIG_IPV6_SIT_6RD
/* This function returns true when 6RD attributes are present in the nl msg */
static bool ipip6_netlink_6rd_parms(struct nlattr *data[],
@@ -1509,12 +1562,20 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
{
struct net *net = dev_net(dev);
struct ip_tunnel *nt;
+ struct ip_tunnel_encap ipencap;
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd ip6rd;
#endif
int err;
nt = netdev_priv(dev);
+
+ if (ipip6_netlink_encap_parms(data, &ipencap)) {
+ err = ip_tunnel_encap_setup(nt, &ipencap);
+ if (err < 0)
+ return err;
+ }
+
ipip6_netlink_parms(data, &nt->parms);
if (ipip6_tunnel_locate(net, &nt->parms, 0))
@@ -1537,15 +1598,23 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct ip_tunnel *t = netdev_priv(dev);
struct ip_tunnel_parm p;
+ struct ip_tunnel_encap ipencap;
struct net *net = t->net;
struct sit_net *sitn = net_generic(net, sit_net_id);
#ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel_6rd ip6rd;
#endif
+ int err;
if (dev == sitn->fb_tunnel_dev)
return -EINVAL;
+ if (ipip6_netlink_encap_parms(data, &ipencap)) {
+ err = ip_tunnel_encap_setup(t, &ipencap);
+ if (err < 0)
+ return err;
+ }
+
ipip6_netlink_parms(data, &p);
if (((dev->flags & IFF_POINTOPOINT) && !p.iph.daddr) ||
@@ -1599,6 +1668,14 @@ static size_t ipip6_get_size(const struct net_device *dev)
/* IFLA_IPTUN_6RD_RELAY_PREFIXLEN */
nla_total_size(2) +
#endif
+ /* IFLA_IPTUN_ENCAP_TYPE */
+ nla_total_size(2) +
+ /* IFLA_IPTUN_ENCAP_FLAGS */
+ nla_total_size(2) +
+ /* IFLA_IPTUN_ENCAP_SPORT */
+ nla_total_size(2) +
+ /* IFLA_IPTUN_ENCAP_DPORT */
+ nla_total_size(2) +
0;
}
@@ -1630,6 +1707,16 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev)
goto nla_put_failure;
#endif
+ if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE,
+ tunnel->encap.type) ||
+ nla_put_u16(skb, IFLA_IPTUN_ENCAP_SPORT,
+ tunnel->encap.sport) ||
+ nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT,
+ tunnel->encap.dport) ||
+ nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS,
+ tunnel->encap.dport))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
@@ -1651,6 +1738,10 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_6RD_PREFIXLEN] = { .type = NLA_U16 },
[IFLA_IPTUN_6RD_RELAY_PREFIXLEN] = { .type = NLA_U16 },
#endif
+ [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
+ [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
+ [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
+ [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
};
static void ipip6_dellink(struct net_device *dev, struct list_head *head)
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 83cea1d39466..9a2838e93cc5 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -24,7 +24,7 @@
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS];
+static u32 syncookie6_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
/* RFC 2460, Section 8.3:
* [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
@@ -203,7 +203,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->ir_num = ntohs(th->dest);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
- if (ipv6_opt_accepted(sk, skb) ||
+ if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 0c56c93619e0..c5c10fafcfe2 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -16,6 +16,8 @@
#include <net/addrconf.h>
#include <net/inet_frag.h>
+static int one = 1;
+
static struct ctl_table ipv6_table_template[] = {
{
.procname = "bindv6only",
@@ -63,6 +65,14 @@ static struct ctl_table ipv6_rotable[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "mld_qrv",
+ .data = &sysctl_mld_qrv,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one
+ },
{ }
};
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 29964c3d363c..132bac137aed 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -93,13 +93,16 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- const struct rt6_info *rt = (const struct rt6_info *)dst;
- dst_hold(dst);
- sk->sk_rx_dst = dst;
- inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
- if (rt->rt6i_node)
- inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+ if (dst) {
+ const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+ dst_hold(dst);
+ sk->sk_rx_dst = dst;
+ inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+ if (rt->rt6i_node)
+ inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
+ }
}
static void tcp_v6_hash(struct sock *sk)
@@ -738,8 +741,9 @@ static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = inet6_iif(skb);
- if (!TCP_SKB_CB(skb)->when &&
- (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
+ if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
+ (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
+ np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
np->rxopt.bits.rxohlim || np->repflow)) {
atomic_inc(&skb->users);
@@ -1364,7 +1368,7 @@ ipv6_pktoptions:
np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
if (np->repflow)
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
- if (ipv6_opt_accepted(sk, opt_skb)) {
+ if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
} else {
@@ -1408,11 +1412,19 @@ static int tcp_v6_rcv(struct sk_buff *skb)
th = tcp_hdr(skb);
hdr = ipv6_hdr(skb);
+ /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
+ * barrier() makes sure compiler wont play fool^Waliasing games.
+ */
+ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
+ sizeof(struct inet6_skb_parm));
+ barrier();
+
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff*4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
- TCP_SKB_CB(skb)->when = 0;
+ TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+ TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
TCP_SKB_CB(skb)->sacked = 0;
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 01b0ff9a0c2c..c1ab77105b4c 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -15,54 +15,17 @@
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
-static int tcp_v6_gso_send_check(struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct tcphdr *th;
-
- if (!pskb_may_pull(skb, sizeof(*th)))
- return -EINVAL;
-
- ipv6h = ipv6_hdr(skb);
- th = tcp_hdr(skb);
-
- th->check = 0;
- skb->ip_summed = CHECKSUM_PARTIAL;
- __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
- return 0;
-}
-
static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
- const struct ipv6hdr *iph = skb_gro_network_header(skb);
- __wsum wsum;
-
/* Don't bother verifying checksum if we're going to flush anyway. */
- if (NAPI_GRO_CB(skb)->flush)
- goto skip_csum;
-
- wsum = NAPI_GRO_CB(skb)->csum;
-
- switch (skb->ip_summed) {
- case CHECKSUM_NONE:
- wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
- wsum);
-
- /* fall through */
-
- case CHECKSUM_COMPLETE:
- if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
- wsum)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- }
-
+ if (!NAPI_GRO_CB(skb)->flush &&
+ skb_gro_checksum_validate(skb, IPPROTO_TCP,
+ ip6_gro_compute_pseudo)) {
NAPI_GRO_CB(skb)->flush = 1;
return NULL;
}
-skip_csum:
return tcp_gro_receive(head, skb);
}
@@ -78,10 +41,32 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
return tcp_gro_complete(skb);
}
+struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ struct tcphdr *th;
+
+ if (!pskb_may_pull(skb, sizeof(*th)))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct tcphdr *th = tcp_hdr(skb);
+
+ /* Set up pseudo header, usually expect stack to have done
+ * this.
+ */
+
+ th->check = 0;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
+ }
+
+ return tcp_gso_segment(skb, features);
+}
static const struct net_offload tcpv6_offload = {
.callbacks = {
- .gso_send_check = tcp_v6_gso_send_check,
- .gso_segment = tcp_gso_segment,
+ .gso_segment = tcp6_gso_segment,
.gro_receive = tcp6_gro_receive,
.gro_complete = tcp6_gro_complete,
},
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 2c4e4c5c7614..3c758007b327 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -15,7 +15,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors Mitsuru KANDA <mk@linux-ipv6.org>
- * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+ * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*/
#define pr_fmt(fmt) "IPv6: " fmt
@@ -64,7 +64,6 @@ err:
return ret;
}
-
EXPORT_SYMBOL(xfrm6_tunnel_register);
int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
@@ -92,7 +91,6 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
return ret;
}
-
EXPORT_SYMBOL(xfrm6_tunnel_deregister);
#define for_each_tunnel_rcu(head, handler) \
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4836af8f582d..f6ba535b6feb 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -243,7 +243,7 @@ begin:
goto exact_match;
} else if (score == badness && reuseport) {
matches++;
- if (((u64)hash * matches) >> 32 == 0)
+ if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
@@ -323,7 +323,7 @@ begin:
}
} else if (score == badness && reuseport) {
matches++;
- if (((u64)hash * matches) >> 32 == 0)
+ if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
@@ -373,8 +373,8 @@ EXPORT_SYMBOL_GPL(udp6_lib_lookup);
/*
- * This should be easy, if there is something there we
- * return it, otherwise we block.
+ * This should be easy, if there is something there we
+ * return it, otherwise we block.
*/
int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
@@ -530,7 +530,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
const struct in6_addr *daddr = &hdr->daddr;
- struct udphdr *uh = (struct udphdr*)(skb->data+offset);
+ struct udphdr *uh = (struct udphdr *)(skb->data+offset);
struct sock *sk;
int err;
struct net *net = dev_net(skb->dev);
@@ -596,7 +596,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
static __inline__ void udpv6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt, u8 type,
- u8 code, int offset, __be32 info )
+ u8 code, int offset, __be32 info)
{
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
@@ -891,6 +891,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
goto csum_error;
}
+ if (udp_sk(sk)->convert_csum && uh->check && !IS_UDPLITE(sk))
+ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ ip6_compute_pseudo);
+
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
@@ -960,10 +964,10 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
}
/**
- * udp6_hwcsum_outgoing - handle outgoing HW checksumming
- * @sk: socket we are sending on
- * @skb: sk_buff containing the filled-in UDP header
- * (checksum field must be zeroed out)
+ * udp6_hwcsum_outgoing - handle outgoing HW checksumming
+ * @sk: socket we are sending on
+ * @skb: sk_buff containing the filled-in UDP header
+ * (checksum field must be zeroed out)
*/
static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
const struct in6_addr *saddr,
@@ -1294,7 +1298,7 @@ do_append_data:
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
- (struct rt6_info*)dst,
+ (struct rt6_info *)dst,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 0ae3d98f83e0..6b8f543f6ac6 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -10,34 +10,13 @@
* UDPv6 GSO support
*/
#include <linux/skbuff.h>
+#include <linux/netdevice.h>
#include <net/protocol.h>
#include <net/ipv6.h>
#include <net/udp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
-static int udp6_ufo_send_check(struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct udphdr *uh;
-
- if (!pskb_may_pull(skb, sizeof(*uh)))
- return -EINVAL;
-
- if (likely(!skb->encapsulation)) {
- ipv6h = ipv6_hdr(skb);
- uh = udp_hdr(skb);
-
- uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
- IPPROTO_UDP, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- skb->ip_summed = CHECKSUM_PARTIAL;
- }
-
- return 0;
-}
-
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
{
@@ -48,7 +27,6 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
u8 *packet_start, *prevhdr;
u8 nexthdr;
u8 frag_hdr_sz = sizeof(struct frag_hdr);
- int offset;
__wsum csum;
int tnl_hlen;
@@ -80,15 +58,29 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
if (skb->encapsulation && skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
- segs = skb_udp_tunnel_segment(skb, features);
+ segs = skb_udp_tunnel_segment(skb, features, true);
else {
+ const struct ipv6hdr *ipv6h;
+ struct udphdr *uh;
+
+ if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+ goto out;
+
/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
* do checksum of UDP packets sent as multiple IP fragments.
*/
- offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
- offset += skb->csum_offset;
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+
+ uh = udp_hdr(skb);
+ ipv6h = ipv6_hdr(skb);
+
+ uh->check = 0;
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
+ &ipv6h->daddr, csum);
+
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
skb->ip_summed = CHECKSUM_NONE;
/* Check if there is enough headroom to insert fragment header. */
@@ -127,10 +119,52 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
out:
return segs;
}
+
+static struct sk_buff **udp6_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_gro_udphdr(skb);
+
+ if (unlikely(!uh))
+ goto flush;
+
+ /* Don't bother verifying checksum if we're going to flush anyway. */
+ if (NAPI_GRO_CB(skb)->flush)
+ goto skip;
+
+ if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
+ ip6_gro_compute_pseudo))
+ goto flush;
+ else if (uh->check)
+ skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ ip6_gro_compute_pseudo);
+
+skip:
+ NAPI_GRO_CB(skb)->is_ipv6 = 1;
+ return udp_gro_receive(head, skb, uh);
+
+flush:
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+}
+
+static int udp6_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
+
+ if (uh->check)
+ uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr,
+ &ipv6h->daddr, 0);
+
+ return udp_gro_complete(skb, nhoff);
+}
+
static const struct net_offload udpv6_offload = {
.callbacks = {
- .gso_send_check = udp6_ufo_send_check,
.gso_segment = udp6_ufo_fragment,
+ .gro_receive = udp6_gro_receive,
+ .gro_complete = udp6_gro_complete,
},
};
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index f8c3cf842f53..f48fbe4d16f5 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -3,8 +3,8 @@
*
* Authors:
* Mitsuru KANDA @USAGI
- * Kazunori MIYAZAWA @USAGI
- * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
+ * Kazunori MIYAZAWA @USAGI
+ * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
* YOSHIFUJI Hideaki @USAGI
* IPv6 support
*/
@@ -52,7 +52,6 @@ int xfrm6_rcv(struct sk_buff *skb)
return xfrm6_rcv_spi(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
0);
}
-
EXPORT_SYMBOL(xfrm6_rcv);
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
@@ -142,5 +141,4 @@ int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
drop:
return -1;
}
-
EXPORT_SYMBOL(xfrm6_input_addr);
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 433672d07d0b..ca3f29b98ae5 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -25,7 +25,6 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
{
return ip6_find_1stfragopt(skb, prevhdr);
}
-
EXPORT_SYMBOL(xfrm6_find_1stfragopt);
static int xfrm6_local_dontfrag(struct sk_buff *skb)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 2a0bbda2c76a..ac49f84fe2c3 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -3,11 +3,11 @@
*
* Authors:
* Mitsuru KANDA @USAGI
- * Kazunori MIYAZAWA @USAGI
- * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
- * IPv6 support
- * YOSHIFUJI Hideaki
- * Split up af-specific portion
+ * Kazunori MIYAZAWA @USAGI
+ * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
+ * IPv6 support
+ * YOSHIFUJI Hideaki
+ * Split up af-specific portion
*
*/
@@ -84,7 +84,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
int nfheader_len)
{
if (dst->ops->family == AF_INET6) {
- struct rt6_info *rt = (struct rt6_info*)dst;
+ struct rt6_info *rt = (struct rt6_info *)dst;
if (rt->rt6i_node)
path->path_cookie = rt->rt6i_node->fn_sernum;
}
@@ -97,7 +97,7 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
- struct rt6_info *rt = (struct rt6_info*)xdst->route;
+ struct rt6_info *rt = (struct rt6_info *)xdst->route;
xdst->u.dst.dev = dev;
dev_hold(dev);
@@ -296,7 +296,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
.family = AF_INET6,
.dst_ops = &xfrm6_dst_ops,
.dst_lookup = xfrm6_dst_lookup,
- .get_saddr = xfrm6_get_saddr,
+ .get_saddr = xfrm6_get_saddr,
.decode_session = _decode_session6,
.get_tos = xfrm6_get_tos,
.init_dst = xfrm6_init_dst,
@@ -319,9 +319,9 @@ static void xfrm6_policy_fini(void)
static struct ctl_table xfrm6_policy_table[] = {
{
.procname = "xfrm6_gc_thresh",
- .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh,
- .maxlen = sizeof(int),
- .mode = 0644,
+ .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 3fc970135fc6..8a1f9c0d2a13 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -3,11 +3,11 @@
*
* Authors:
* Mitsuru KANDA @USAGI
- * Kazunori MIYAZAWA @USAGI
- * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
- * IPv6 support
- * YOSHIFUJI Hideaki @USAGI
- * Split up af-specific portion
+ * Kazunori MIYAZAWA @USAGI
+ * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
+ * IPv6 support
+ * YOSHIFUJI Hideaki @USAGI
+ * Split up af-specific portion
*
*/
@@ -45,10 +45,10 @@ xfrm6_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
const xfrm_address_t *daddr, const xfrm_address_t *saddr)
{
x->id = tmpl->id;
- if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
+ if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
- if (ipv6_addr_any((struct in6_addr*)&x->props.saddr))
+ if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
x->props.mode = tmpl->mode;
x->props.reqid = tmpl->reqid;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 1c66465a42dd..5743044cd660 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -15,7 +15,7 @@
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*
* Authors Mitsuru KANDA <mk@linux-ipv6.org>
- * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
+ * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
*
* Based on net/ipv4/xfrm4_tunnel.c
*
@@ -110,7 +110,6 @@ __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
rcu_read_unlock_bh();
return htonl(spi);
}
-
EXPORT_SYMBOL(xfrm6_tunnel_spi_lookup);
static int __xfrm6_tunnel_spi_check(struct net *net, u32 spi)
@@ -187,7 +186,6 @@ __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
return htonl(spi);
}
-
EXPORT_SYMBOL(xfrm6_tunnel_alloc_spi);
static void x6spi_destroy_rcu(struct rcu_head *head)
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 1bc49edf2296..5a2d0a695529 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -98,7 +98,7 @@ static const struct file_operations irlan_fops = {
extern struct proc_dir_entry *proc_irda;
#endif /* CONFIG_PROC_FS */
-static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr);
+static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr);
static void __irlan_close(struct irlan_cb *self);
static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
__u8 value_byte, __u16 value_short,
@@ -196,7 +196,7 @@ static void __exit irlan_cleanup(void)
* Open new instance of a client/provider, we should only register the
* network device if this instance is ment for a particular client/provider
*/
-static struct irlan_cb *irlan_open(__u32 saddr, __u32 daddr)
+static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr)
{
struct net_device *dev;
struct irlan_cb *self;
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index da787930df0a..2a6a1fdd62c0 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -493,8 +493,8 @@ static void iucv_declare_cpu(void *data)
err = "Paging or storage error";
break;
}
- pr_warning("Defining an interrupt buffer on CPU %i"
- " failed with 0x%02x (%s)\n", cpu, rc, err);
+ pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n",
+ cpu, rc, err);
return;
}
@@ -1831,7 +1831,7 @@ static void iucv_external_interrupt(struct ext_code ext_code,
BUG_ON(p->iptype < 0x01 || p->iptype > 0x09);
work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
if (!work) {
- pr_warning("iucv_external_interrupt: out of memory\n");
+ pr_warn("iucv_external_interrupt: out of memory\n");
return;
}
memcpy(&work->data, p, sizeof(work->data));
@@ -1974,8 +1974,7 @@ static int iucv_pm_restore(struct device *dev)
printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table);
#endif
if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table)
- pr_warning("Suspending Linux did not completely close all IUCV "
- "connections\n");
+ pr_warn("Suspending Linux did not completely close all IUCV connections\n");
iucv_pm_state = IUCV_PM_RESTORING;
if (cpumask_empty(&iucv_irq_cpumask)) {
rc = iucv_query_maxconn();
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 1109d3bb8dac..895348e44c7d 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -148,7 +148,7 @@ do { \
atomic_read(&_t->ref_count)); \
l2tp_tunnel_inc_refcount_1(_t); \
} while (0)
-#define l2tp_tunnel_dec_refcount(_t)
+#define l2tp_tunnel_dec_refcount(_t) \
do { \
pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
__func__, __LINE__, (_t)->name, \
@@ -1582,19 +1582,17 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
tunnel->encap = encap;
if (encap == L2TP_ENCAPTYPE_UDP) {
- /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
- udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
- udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
- udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
-#if IS_ENABLED(CONFIG_IPV6)
- if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
- udpv6_encap_enable();
- else
-#endif
- udp_encap_enable();
- }
+ struct udp_tunnel_sock_cfg udp_cfg;
+
+ udp_cfg.sk_user_data = tunnel;
+ udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
+ udp_cfg.encap_rcv = l2tp_udp_encap_recv;
+ udp_cfg.encap_destroy = l2tp_udp_encap_destroy;
- sk->sk_user_data = tunnel;
+ setup_udp_tunnel_sock(net, sock, &udp_cfg);
+ } else {
+ sk->sk_user_data = tunnel;
+ }
/* Hook on the tunnel socket destructor so that we can cleanup
* if the tunnel socket goes away.
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index f0e84bc48038..a48bad468880 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -227,7 +227,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
void __ieee80211_start_rx_ba_session(struct sta_info *sta,
u8 dialog_token, u16 timeout,
u16 start_seq_num, u16 ba_policy, u16 tid,
- u16 buf_size, bool tx)
+ u16 buf_size, bool tx, bool auto_seq)
{
struct ieee80211_local *local = sta->sdata->local;
struct tid_ampdu_rx *tid_agg_rx;
@@ -326,6 +326,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
tid_agg_rx->buf_size = buf_size;
tid_agg_rx->timeout = timeout;
tid_agg_rx->stored_mpdu_num = 0;
+ tid_agg_rx->auto_seq = auto_seq;
status = WLAN_STATUS_SUCCESS;
/* activate it for RX */
@@ -367,7 +368,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
__ieee80211_start_rx_ba_session(sta, dialog_token, timeout,
start_seq_num, ba_policy, tid,
- buf_size, true);
+ buf_size, true, false);
}
void ieee80211_start_rx_ba_session_offl(struct ieee80211_vif *vif,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 927b4ea0128b..fb6a1502b6df 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2,6 +2,7 @@
* mac80211 configuration hooks for cfg80211
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This file is GPLv2 as found in COPYING.
*/
@@ -682,8 +683,19 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
return -EALREADY;
- /* TODO: make hostapd tell us what it wants */
- sdata->smps_mode = IEEE80211_SMPS_OFF;
+ switch (params->smps_mode) {
+ case NL80211_SMPS_OFF:
+ sdata->smps_mode = IEEE80211_SMPS_OFF;
+ break;
+ case NL80211_SMPS_STATIC:
+ sdata->smps_mode = IEEE80211_SMPS_STATIC;
+ break;
+ case NL80211_SMPS_DYNAMIC:
+ sdata->smps_mode = IEEE80211_SMPS_DYNAMIC;
+ break;
+ default:
+ return -EINVAL;
+ }
sdata->needed_rx_chains = sdata->local->rx_chains;
mutex_lock(&local->mtx);
@@ -1011,15 +1023,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
clear_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE);
}
- if (mask & BIT(NL80211_STA_FLAG_WME)) {
- if (set & BIT(NL80211_STA_FLAG_WME)) {
- set_sta_flag(sta, WLAN_STA_WME);
- sta->sta.wme = true;
- } else {
- clear_sta_flag(sta, WLAN_STA_WME);
- sta->sta.wme = false;
- }
- }
+ if (mask & BIT(NL80211_STA_FLAG_WME))
+ sta->sta.wme = set & BIT(NL80211_STA_FLAG_WME);
if (mask & BIT(NL80211_STA_FLAG_MFP)) {
if (set & BIT(NL80211_STA_FLAG_MFP))
@@ -1984,8 +1989,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
return err;
}
- if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
- err = drv_set_coverage_class(local, wiphy->coverage_class);
+ if ((changed & WIPHY_PARAM_COVERAGE_CLASS) ||
+ (changed & WIPHY_PARAM_DYN_ACK)) {
+ s16 coverage_class;
+
+ coverage_class = changed & WIPHY_PARAM_COVERAGE_CLASS ?
+ wiphy->coverage_class : -1;
+ err = drv_set_coverage_class(local, coverage_class);
if (err)
return err;
@@ -2358,6 +2368,58 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return 0;
}
+static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
+ struct ieee80211_roc_work *new_roc,
+ struct ieee80211_roc_work *cur_roc)
+{
+ unsigned long j = jiffies;
+ unsigned long cur_roc_end = cur_roc->hw_start_time +
+ msecs_to_jiffies(cur_roc->duration);
+ struct ieee80211_roc_work *next_roc;
+ int new_dur;
+
+ if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun))
+ return false;
+
+ if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end))
+ return false;
+
+ ieee80211_handle_roc_started(new_roc);
+
+ new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j);
+
+ /* cur_roc is long enough - add new_roc to the dependents list. */
+ if (new_dur <= 0) {
+ list_add_tail(&new_roc->list, &cur_roc->dependents);
+ return true;
+ }
+
+ new_roc->duration = new_dur;
+
+ /*
+ * if cur_roc was already coalesced before, we might
+ * want to extend the next roc instead of adding
+ * a new one.
+ */
+ next_roc = list_entry(cur_roc->list.next,
+ struct ieee80211_roc_work, list);
+ if (&next_roc->list != &local->roc_list &&
+ next_roc->chan == new_roc->chan &&
+ next_roc->sdata == new_roc->sdata &&
+ !WARN_ON(next_roc->started)) {
+ list_add_tail(&new_roc->list, &next_roc->dependents);
+ next_roc->duration = max(next_roc->duration,
+ new_roc->duration);
+ next_roc->type = max(next_roc->type, new_roc->type);
+ return true;
+ }
+
+ /* add right after cur_roc */
+ list_add(&new_roc->list, &cur_roc->list);
+
+ return true;
+}
+
static int ieee80211_start_roc_work(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel,
@@ -2463,8 +2525,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
/* If it has already started, it's more difficult ... */
if (local->ops->remain_on_channel) {
- unsigned long j = jiffies;
-
/*
* In the offloaded ROC case, if it hasn't begun, add
* this new one to the dependent list to be handled
@@ -2487,28 +2547,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
break;
}
- if (time_before(j + IEEE80211_ROC_MIN_LEFT,
- tmp->hw_start_time +
- msecs_to_jiffies(tmp->duration))) {
- int new_dur;
-
- ieee80211_handle_roc_started(roc);
-
- new_dur = roc->duration -
- jiffies_to_msecs(tmp->hw_start_time +
- msecs_to_jiffies(
- tmp->duration) -
- j);
-
- if (new_dur > 0) {
- /* add right after tmp */
- list_add(&roc->list, &tmp->list);
- } else {
- list_add_tail(&roc->list,
- &tmp->dependents);
- }
+ if (ieee80211_coalesce_started_roc(local, roc, tmp))
queued = true;
- }
} else if (del_timer_sync(&tmp->work.timer)) {
unsigned long new_end;
@@ -3352,7 +3392,7 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
band = chanctx_conf->def.chan->band;
sta = sta_info_get_bss(sdata, peer);
if (sta) {
- qos = test_sta_flag(sta, WLAN_STA_WME);
+ qos = sta->sta.wme;
} else {
rcu_read_unlock();
return -ENOLINK;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 399ad82c997f..4c74e8da64b9 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -549,12 +549,12 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local,
compat = cfg80211_chandef_compatible(
&sdata->vif.bss_conf.chandef, compat);
- if (!compat)
+ if (WARN_ON_ONCE(!compat))
break;
}
rcu_read_unlock();
- if (WARN_ON_ONCE(!compat))
+ if (!compat)
return;
ieee80211_change_chanctx(local, ctx, compat);
@@ -639,41 +639,6 @@ out:
return ret;
}
-static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_chanctx_conf *conf;
- struct ieee80211_chanctx *ctx;
- bool use_reserved_switch = false;
-
- lockdep_assert_held(&local->chanctx_mtx);
-
- conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
- lockdep_is_held(&local->chanctx_mtx));
- if (!conf)
- return;
-
- ctx = container_of(conf, struct ieee80211_chanctx, conf);
-
- if (sdata->reserved_chanctx) {
- if (sdata->reserved_chanctx->replace_state ==
- IEEE80211_CHANCTX_REPLACES_OTHER &&
- ieee80211_chanctx_num_reserved(local,
- sdata->reserved_chanctx) > 1)
- use_reserved_switch = true;
-
- ieee80211_vif_unreserve_chanctx(sdata);
- }
-
- ieee80211_assign_vif_chanctx(sdata, NULL);
- if (ieee80211_chanctx_refcount(local, ctx) == 0)
- ieee80211_free_chanctx(local, ctx);
-
- /* Unreserving may ready an in-place reservation. */
- if (use_reserved_switch)
- ieee80211_vif_use_reserved_switch(local);
-}
-
void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
struct ieee80211_chanctx *chanctx)
{
@@ -764,63 +729,6 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
drv_change_chanctx(local, chanctx, IEEE80211_CHANCTX_CHANGE_RX_CHAINS);
}
-int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
- const struct cfg80211_chan_def *chandef,
- enum ieee80211_chanctx_mode mode)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_chanctx *ctx;
- u8 radar_detect_width = 0;
- int ret;
-
- lockdep_assert_held(&local->mtx);
-
- WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
-
- mutex_lock(&local->chanctx_mtx);
-
- ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
- chandef,
- sdata->wdev.iftype);
- if (ret < 0)
- goto out;
- if (ret > 0)
- radar_detect_width = BIT(chandef->width);
-
- sdata->radar_required = ret;
-
- ret = ieee80211_check_combinations(sdata, chandef, mode,
- radar_detect_width);
- if (ret < 0)
- goto out;
-
- __ieee80211_vif_release_channel(sdata);
-
- ctx = ieee80211_find_chanctx(local, chandef, mode);
- if (!ctx)
- ctx = ieee80211_new_chanctx(local, chandef, mode);
- if (IS_ERR(ctx)) {
- ret = PTR_ERR(ctx);
- goto out;
- }
-
- sdata->vif.bss_conf.chandef = *chandef;
-
- ret = ieee80211_assign_vif_chanctx(sdata, ctx);
- if (ret) {
- /* if assign fails refcount stays the same */
- if (ieee80211_chanctx_refcount(local, ctx) == 0)
- ieee80211_free_chanctx(local, ctx);
- goto out;
- }
-
- ieee80211_recalc_smps_chanctx(local, ctx);
- ieee80211_recalc_radar_chanctx(local, ctx);
- out:
- mutex_unlock(&local->chanctx_mtx);
- return ret;
-}
-
static void
__ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
bool clear)
@@ -1269,8 +1177,7 @@ err:
return err;
}
-int
-ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
+static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata, *sdata_tmp;
struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
@@ -1522,6 +1429,98 @@ err:
return err;
}
+static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *conf;
+ struct ieee80211_chanctx *ctx;
+ bool use_reserved_switch = false;
+
+ lockdep_assert_held(&local->chanctx_mtx);
+
+ conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
+ if (!conf)
+ return;
+
+ ctx = container_of(conf, struct ieee80211_chanctx, conf);
+
+ if (sdata->reserved_chanctx) {
+ if (sdata->reserved_chanctx->replace_state ==
+ IEEE80211_CHANCTX_REPLACES_OTHER &&
+ ieee80211_chanctx_num_reserved(local,
+ sdata->reserved_chanctx) > 1)
+ use_reserved_switch = true;
+
+ ieee80211_vif_unreserve_chanctx(sdata);
+ }
+
+ ieee80211_assign_vif_chanctx(sdata, NULL);
+ if (ieee80211_chanctx_refcount(local, ctx) == 0)
+ ieee80211_free_chanctx(local, ctx);
+
+ /* Unreserving may ready an in-place reservation. */
+ if (use_reserved_switch)
+ ieee80211_vif_use_reserved_switch(local);
+}
+
+int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ enum ieee80211_chanctx_mode mode)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx *ctx;
+ u8 radar_detect_width = 0;
+ int ret;
+
+ lockdep_assert_held(&local->mtx);
+
+ WARN_ON(sdata->dev && netif_carrier_ok(sdata->dev));
+
+ mutex_lock(&local->chanctx_mtx);
+
+ ret = cfg80211_chandef_dfs_required(local->hw.wiphy,
+ chandef,
+ sdata->wdev.iftype);
+ if (ret < 0)
+ goto out;
+ if (ret > 0)
+ radar_detect_width = BIT(chandef->width);
+
+ sdata->radar_required = ret;
+
+ ret = ieee80211_check_combinations(sdata, chandef, mode,
+ radar_detect_width);
+ if (ret < 0)
+ goto out;
+
+ __ieee80211_vif_release_channel(sdata);
+
+ ctx = ieee80211_find_chanctx(local, chandef, mode);
+ if (!ctx)
+ ctx = ieee80211_new_chanctx(local, chandef, mode);
+ if (IS_ERR(ctx)) {
+ ret = PTR_ERR(ctx);
+ goto out;
+ }
+
+ sdata->vif.bss_conf.chandef = *chandef;
+
+ ret = ieee80211_assign_vif_chanctx(sdata, ctx);
+ if (ret) {
+ /* if assign fails refcount stays the same */
+ if (ieee80211_chanctx_refcount(local, ctx) == 0)
+ ieee80211_free_chanctx(local, ctx);
+ goto out;
+ }
+
+ ieee80211_recalc_smps_chanctx(local, ctx);
+ ieee80211_recalc_radar_chanctx(local, ctx);
+ out:
+ mutex_unlock(&local->chanctx_mtx);
+ return ret;
+}
+
int ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 0e963bc1ceac..54a189f0393e 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -3,6 +3,7 @@
* mac80211 debugfs for wireless PHYs
*
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* GPLv2
*
@@ -302,11 +303,6 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
sf += scnprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
- sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
- sf += scnprintf(buf + sf, mxln - sf,
- "SUPPORTS_DYNAMIC_SMPS\n");
if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
sf += scnprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index e205ebabfa50..c68896adfa96 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -226,12 +226,12 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
int err;
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
+ if (!(local->hw.wiphy->features & NL80211_FEATURE_STATIC_SMPS) &&
smps_mode == IEEE80211_SMPS_STATIC)
return -EINVAL;
/* auto should be dynamic if in PS mode */
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
+ if (!(local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS) &&
(smps_mode == IEEE80211_SMPS_DYNAMIC ||
smps_mode == IEEE80211_SMPS_AUTOMATIC))
return -EINVAL;
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 86173c0de40e..bafe48916229 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -2,6 +2,7 @@
* Copyright 2003-2005 Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -77,7 +78,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
TEST(PS_DRIVER), TEST(AUTHORIZED),
TEST(SHORT_PREAMBLE),
- TEST(WME), TEST(WDS), TEST(CLEAR_PS_FILT),
+ sta->sta.wme ? "WME\n" : "",
+ TEST(WDS), TEST(CLEAR_PS_FILT),
TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 11423958116a..196d48c68134 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -450,7 +450,7 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
}
static inline int drv_set_coverage_class(struct ieee80211_local *local,
- u8 value)
+ s16 value)
{
int ret = 0;
might_sleep();
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 9713dc54ea4b..56b53571c807 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -6,6 +6,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1038,7 +1039,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
}
if (sta && elems->wmm_info)
- set_sta_flag(sta, WLAN_STA_WME);
+ sta->sta.wme = true;
if (sta && elems->ht_operation && elems->ht_cap_elem &&
sdata->u.ibss.chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ef7a089ac546..c2aaec4dfcf0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -3,6 +3,7 @@
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -354,6 +355,7 @@ enum ieee80211_sta_flags {
IEEE80211_STA_DISABLE_80P80MHZ = BIT(12),
IEEE80211_STA_DISABLE_160MHZ = BIT(13),
IEEE80211_STA_DISABLE_WMM = BIT(14),
+ IEEE80211_STA_ENABLE_RRM = BIT(15),
};
struct ieee80211_mgd_auth_data {
@@ -1367,6 +1369,7 @@ struct ieee802_11_elems {
const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
const u8 *country_elem;
const u8 *pwr_constr_elem;
+ const u8 *cisco_dtpc_elem;
const struct ieee80211_timeout_interval_ie *timeout_int;
const u8 *opmode_notif;
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
@@ -1587,7 +1590,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
void __ieee80211_start_rx_ba_session(struct sta_info *sta,
u8 dialog_token, u16 timeout,
u16 start_seq_num, u16 ba_policy, u16 tid,
- u16 buf_size, bool tx);
+ u16 buf_size, bool tx, bool auto_seq);
void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
enum ieee80211_agg_stop_reason reason);
void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
@@ -1869,7 +1872,6 @@ ieee80211_vif_reserve_chanctx(struct ieee80211_sub_if_data *sdata,
int __must_check
ieee80211_vif_use_reserved_context(struct ieee80211_sub_if_data *sdata);
int ieee80211_vif_unreserve_chanctx(struct ieee80211_sub_if_data *sdata);
-int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local);
int __must_check
ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
@@ -1918,7 +1920,7 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
size_t extra_ies_len);
int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, enum nl80211_tdls_operation oper);
-
+void ieee80211_tdls_peer_del_work(struct work_struct *wk);
extern const struct ethtool_ops ieee80211_ethtool_ops;
@@ -1929,4 +1931,3 @@ extern const struct ethtool_ops ieee80211_ethtool_ops;
#endif
#endif /* IEEE80211_I_H */
-void ieee80211_tdls_peer_del_work(struct work_struct *wk);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f75e5f132c5a..af237223a8cd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -5,6 +5,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1172,19 +1173,11 @@ static void ieee80211_iface_work(struct work_struct *work)
rx_agg = (void *)&skb->cb;
mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, rx_agg->addr);
- if (sta) {
- u16 last_seq;
-
- last_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(
- sta->last_seq_ctrl[rx_agg->tid]));
-
+ if (sta)
__ieee80211_start_rx_ba_session(sta,
- 0, 0,
- ieee80211_sn_inc(last_seq),
- 1, rx_agg->tid,
+ 0, 0, 0, 1, rx_agg->tid,
IEEE80211_MAX_AMPDU_BUF,
- false);
- }
+ false, true);
mutex_unlock(&local->sta_mtx);
} else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_STOP) {
rx_agg = (void *)&skb->cb;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index d808cff80153..4712150dc210 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -3,6 +3,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -130,9 +131,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+ if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
sdata->crypto_tx_tailroom_needed_cnt--;
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@@ -180,9 +179,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta = key->sta;
sdata = key->sdata;
- if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+ if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
increment_tailroom_need_count(sdata);
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@@ -425,7 +422,7 @@ static void ieee80211_key_free_common(struct ieee80211_key *key)
ieee80211_aes_key_free(key->u.ccmp.tfm);
if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
- kfree(key);
+ kzfree(key);
}
static void __ieee80211_key_destroy(struct ieee80211_key *key,
@@ -878,9 +875,7 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
- (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+ if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
increment_tailroom_need_count(key->sdata);
}
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index e0ab4320a078..0de7c93bf62b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -2,6 +2,7 @@
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index cf032a8db9d7..a6699dceae7c 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -729,7 +729,7 @@ void mesh_plink_broken(struct sta_info *sta)
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, node, i) {
mpath = node->mpath;
- if (rcu_dereference(mpath->next_hop) == sta &&
+ if (rcu_access_pointer(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
spin_lock_bh(&mpath->state_lock);
@@ -794,7 +794,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
tbl = resize_dereference_mesh_paths();
for_each_mesh_entry(tbl, node, i) {
mpath = node->mpath;
- if (rcu_dereference(mpath->next_hop) == sta) {
+ if (rcu_access_pointer(mpath->next_hop) == sta) {
spin_lock(&tbl->hashwlock[i]);
__mesh_path_del(tbl, node);
spin_unlock(&tbl->hashwlock[i]);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index c47194d27149..b488e1859b18 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -431,14 +431,12 @@ __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
return NULL;
sta->plink_state = NL80211_PLINK_LISTEN;
+ sta->sta.wme = true;
sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
- set_sta_flag(sta, WLAN_STA_WME);
- sta->sta.wme = true;
-
return sta;
}
@@ -1004,7 +1002,6 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
enum ieee80211_self_protected_actioncode ftype;
u32 changed = 0;
u8 ie_len = elems->peering_len;
- __le16 _plid, _llid;
u16 plid, llid = 0;
if (!elems->peering) {
@@ -1039,13 +1036,10 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata,
/* Note the lines below are correct, the llid in the frame is the plid
* from the point of view of this host.
*/
- memcpy(&_plid, PLINK_GET_LLID(elems->peering), sizeof(__le16));
- plid = le16_to_cpu(_plid);
+ plid = get_unaligned_le16(PLINK_GET_LLID(elems->peering));
if (ftype == WLAN_SP_MESH_PEERING_CONFIRM ||
- (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8)) {
- memcpy(&_llid, PLINK_GET_PLID(elems->peering), sizeof(__le16));
- llid = le16_to_cpu(_llid);
- }
+ (ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len == 8))
+ llid = get_unaligned_le16(PLINK_GET_PLID(elems->peering));
/* WARNING: Only for sta pointer, is dropped & re-acquired */
rcu_read_lock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b82a12a9f0f1..2de88704278b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -5,6 +5,7 @@
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -149,6 +150,7 @@ static u32
ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
+ const struct ieee80211_ht_cap *ht_cap,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
struct cfg80211_chan_def *chandef, bool tracking)
@@ -162,13 +164,19 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
chandef->center_freq1 = channel->center_freq;
chandef->center_freq2 = 0;
- if (!ht_oper || !sband->ht_cap.ht_supported) {
+ if (!ht_cap || !ht_oper || !sband->ht_cap.ht_supported) {
ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
goto out;
}
chandef->width = NL80211_CHAN_WIDTH_20;
+ if (!(ht_cap->cap_info &
+ cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40))) {
+ ret = IEEE80211_STA_DISABLE_40MHZ;
+ goto out;
+ }
+
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
@@ -328,6 +336,7 @@ out:
static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
+ const struct ieee80211_ht_cap *ht_cap,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
const u8 *bssid, u32 *changed)
@@ -367,8 +376,9 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
sband = local->hw.wiphy->bands[chan->band];
/* calculate new channel (type) based on HT/VHT operation IEs */
- flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
- vht_oper, &chandef, true);
+ flags = ieee80211_determine_chantype(sdata, sband, chan,
+ ht_cap, ht_oper, vht_oper,
+ &chandef, true);
/*
* Downgrade the new channel if we associated with restricted
@@ -663,6 +673,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
(local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
+ if (ifmgd->flags & IEEE80211_STA_ENABLE_RRM)
+ capab |= WLAN_CAPABILITY_RADIO_MEASURE;
+
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, assoc_data->bss->bssid, ETH_ALEN);
@@ -728,16 +741,17 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
}
- if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
- /* 1. power capabilities */
+ if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT ||
+ capab & WLAN_CAPABILITY_RADIO_MEASURE) {
pos = skb_put(skb, 4);
*pos++ = WLAN_EID_PWR_CAPABILITY;
*pos++ = 2;
*pos++ = 0; /* min tx power */
/* max tx power */
*pos++ = ieee80211_chandef_max_power(&chanctx_conf->def);
+ }
- /* 2. supported channels */
+ if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
/* TODO: get this in reg domain format */
pos = skb_put(skb, 2 * sband->n_channels + 2);
*pos++ = WLAN_EID_SUPPORTED_CHANNELS;
@@ -1157,19 +1171,21 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval));
}
-static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *channel,
- const u8 *country_ie, u8 country_ie_len,
- const u8 *pwr_constr_elem)
+static bool
+ieee80211_find_80211h_pwr_constr(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ const u8 *country_ie, u8 country_ie_len,
+ const u8 *pwr_constr_elem,
+ int *chan_pwr, int *pwr_reduction)
{
struct ieee80211_country_ie_triplet *triplet;
int chan = ieee80211_frequency_to_channel(channel->center_freq);
- int i, chan_pwr, chan_increment, new_ap_level;
+ int i, chan_increment;
bool have_chan_pwr = false;
/* Invalid IE */
if (country_ie_len % 2 || country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
- return 0;
+ return false;
triplet = (void *)(country_ie + 3);
country_ie_len -= 3;
@@ -1197,7 +1213,7 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < triplet->chans.num_channels; i++) {
if (first_channel + i * chan_increment == chan) {
have_chan_pwr = true;
- chan_pwr = triplet->chans.max_power;
+ *chan_pwr = triplet->chans.max_power;
break;
}
}
@@ -1209,18 +1225,76 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
country_ie_len -= 3;
}
- if (!have_chan_pwr)
+ if (have_chan_pwr)
+ *pwr_reduction = *pwr_constr_elem;
+ return have_chan_pwr;
+}
+
+static void ieee80211_find_cisco_dtpc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ const u8 *cisco_dtpc_ie,
+ int *pwr_level)
+{
+ /* From practical testing, the first data byte of the DTPC element
+ * seems to contain the requested dBm level, and the CLI on Cisco
+ * APs clearly state the range is -127 to 127 dBm, which indicates
+ * a signed byte, although it seemingly never actually goes negative.
+ * The other byte seems to always be zero.
+ */
+ *pwr_level = (__s8)cisco_dtpc_ie[4];
+}
+
+static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ struct ieee80211_mgmt *mgmt,
+ const u8 *country_ie, u8 country_ie_len,
+ const u8 *pwr_constr_ie,
+ const u8 *cisco_dtpc_ie)
+{
+ bool has_80211h_pwr = false, has_cisco_pwr = false;
+ int chan_pwr = 0, pwr_reduction_80211h = 0;
+ int pwr_level_cisco, pwr_level_80211h;
+ int new_ap_level;
+
+ if (country_ie && pwr_constr_ie &&
+ mgmt->u.probe_resp.capab_info &
+ cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT)) {
+ has_80211h_pwr = ieee80211_find_80211h_pwr_constr(
+ sdata, channel, country_ie, country_ie_len,
+ pwr_constr_ie, &chan_pwr, &pwr_reduction_80211h);
+ pwr_level_80211h =
+ max_t(int, 0, chan_pwr - pwr_reduction_80211h);
+ }
+
+ if (cisco_dtpc_ie) {
+ ieee80211_find_cisco_dtpc(
+ sdata, channel, cisco_dtpc_ie, &pwr_level_cisco);
+ has_cisco_pwr = true;
+ }
+
+ if (!has_80211h_pwr && !has_cisco_pwr)
return 0;
- new_ap_level = max_t(int, 0, chan_pwr - *pwr_constr_elem);
+ /* If we have both 802.11h and Cisco DTPC, apply both limits
+ * by picking the smallest of the two power levels advertised.
+ */
+ if (has_80211h_pwr &&
+ (!has_cisco_pwr || pwr_level_80211h <= pwr_level_cisco)) {
+ sdata_info(sdata,
+ "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
+ pwr_level_80211h, chan_pwr, pwr_reduction_80211h,
+ sdata->u.mgd.bssid);
+ new_ap_level = pwr_level_80211h;
+ } else { /* has_cisco_pwr is always true here. */
+ sdata_info(sdata,
+ "Limiting TX power to %d dBm as advertised by %pM\n",
+ pwr_level_cisco, sdata->u.mgd.bssid);
+ new_ap_level = pwr_level_cisco;
+ }
if (sdata->ap_power_level == new_ap_level)
return 0;
- sdata_info(sdata,
- "Limiting TX power to %d (%d - %d) dBm as advertised by %pM\n",
- new_ap_level, chan_pwr, *pwr_constr_elem,
- sdata->u.mgd.bssid);
sdata->ap_power_level = new_ap_level;
if (__ieee80211_recalc_txpower(sdata))
return BSS_CHANGED_TXPOWER;
@@ -2677,8 +2751,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_MFP_ENABLED)
set_sta_flag(sta, WLAN_STA_MFP);
- if (elems.wmm_param)
- set_sta_flag(sta, WLAN_STA_WME);
+ sta->sta.wme = elems.wmm_param;
err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
@@ -2744,6 +2817,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
u16 capab_info, status_code, aid;
struct ieee802_11_elems elems;
+ int ac, uapsd_queues = -1;
u8 *pos;
bool reassoc;
struct cfg80211_bss *bss;
@@ -2813,9 +2887,15 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
* is set can cause the interface to go idle
*/
ieee80211_destroy_assoc_data(sdata, true);
+
+ /* get uapsd queues configuration */
+ uapsd_queues = 0;
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ if (sdata->tx_conf[ac].uapsd)
+ uapsd_queues |= BIT(ac);
}
- cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len);
+ cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len, uapsd_queues);
}
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -2885,7 +2965,9 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
/*
* This is the canonical list of information elements we care about,
* the filter code also gives us all changes to the Microsoft OUI
- * (00:50:F2) vendor IE which is used for WMM which we need to track.
+ * (00:50:F2) vendor IE which is used for WMM which we need to track,
+ * as well as the DTPC IE (part of the Cisco OUI) used for signaling
+ * changes to requested client power.
*
* We implement beacon filtering in software since that means we can
* avoid processing the frame here and in cfg80211, and userspace
@@ -3174,7 +3256,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, bssid);
- if (ieee80211_config_bw(sdata, sta, elems.ht_operation,
+ if (ieee80211_config_bw(sdata, sta,
+ elems.ht_cap_elem, elems.ht_operation,
elems.vht_operation, bssid, &changed)) {
mutex_unlock(&local->sta_mtx);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
@@ -3190,13 +3273,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rx_status->band, true);
mutex_unlock(&local->sta_mtx);
- if (elems.country_elem && elems.pwr_constr_elem &&
- mgmt->u.probe_resp.capab_info &
- cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT))
- changed |= ieee80211_handle_pwr_constr(sdata, chan,
- elems.country_elem,
- elems.country_elem_len,
- elems.pwr_constr_elem);
+ changed |= ieee80211_handle_pwr_constr(sdata, chan, mgmt,
+ elems.country_elem,
+ elems.country_elem_len,
+ elems.pwr_constr_elem,
+ elems.cisco_dtpc_elem);
ieee80211_bss_info_change_notify(sdata, changed);
}
@@ -3724,7 +3805,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
ifmgd->uapsd_max_sp_len = sdata->local->hw.uapsd_max_sp_len;
ifmgd->p2p_noa_index = -1;
- if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
+ if (sdata->local->hw.wiphy->features & NL80211_FEATURE_DYNAMIC_SMPS)
ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
else
ifmgd->req_smps = IEEE80211_SMPS_OFF;
@@ -3808,6 +3889,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ const struct ieee80211_ht_cap *ht_cap = NULL;
const struct ieee80211_ht_operation *ht_oper = NULL;
const struct ieee80211_vht_operation *vht_oper = NULL;
struct ieee80211_supported_band *sband;
@@ -3824,14 +3906,17 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
sband->ht_cap.ht_supported) {
- const u8 *ht_oper_ie, *ht_cap;
+ const u8 *ht_oper_ie, *ht_cap_ie;
ht_oper_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_OPERATION);
if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
ht_oper = (void *)(ht_oper_ie + 2);
- ht_cap = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY);
- if (!ht_cap || ht_cap[1] < sizeof(struct ieee80211_ht_cap)) {
+ ht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY);
+ if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap))
+ ht_cap = (void *)(ht_cap_ie + 2);
+
+ if (!ht_cap) {
ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
ht_oper = NULL;
}
@@ -3862,7 +3947,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
- ht_oper, vht_oper,
+ ht_cap, ht_oper, vht_oper,
&chandef, false);
sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
@@ -4395,6 +4480,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED;
}
+ if (req->flags & ASSOC_REQ_USE_RRM)
+ ifmgd->flags |= IEEE80211_STA_ENABLE_RRM;
+ else
+ ifmgd->flags &= ~IEEE80211_STA_ENABLE_RRM;
+
if (req->crypto.control_port)
ifmgd->flags |= IEEE80211_STA_CONTROL_PORT;
else
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 1c1469c36dca..2baa7ed8789d 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -75,7 +75,7 @@ minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
{
int j = MAX_THR_RATES;
- while (j > 0 && mi->r[i].cur_tp > mi->r[tp_list[j - 1]].cur_tp)
+ while (j > 0 && mi->r[i].stats.cur_tp > mi->r[tp_list[j - 1]].stats.cur_tp)
j--;
if (j < MAX_THR_RATES - 1)
memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1));
@@ -92,7 +92,7 @@ minstrel_set_rate(struct minstrel_sta_info *mi, struct ieee80211_sta_rates *rate
ratetbl->rate[offset].idx = r->rix;
ratetbl->rate[offset].count = r->adjusted_retry_count;
ratetbl->rate[offset].count_cts = r->retry_count_cts;
- ratetbl->rate[offset].count_rts = r->retry_count_rtscts;
+ ratetbl->rate[offset].count_rts = r->stats.retry_count_rtscts;
}
static void
@@ -140,44 +140,46 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
+ struct minstrel_rate_stats *mrs = &mi->r[i].stats;
usecs = mr->perfect_tx_time;
if (!usecs)
usecs = 1000000;
- if (unlikely(mr->attempts > 0)) {
- mr->sample_skipped = 0;
- mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
- mr->succ_hist += mr->success;
- mr->att_hist += mr->attempts;
- mr->probability = minstrel_ewma(mr->probability,
- mr->cur_prob,
- EWMA_LEVEL);
+ if (unlikely(mrs->attempts > 0)) {
+ mrs->sample_skipped = 0;
+ mrs->cur_prob = MINSTREL_FRAC(mrs->success,
+ mrs->attempts);
+ mrs->succ_hist += mrs->success;
+ mrs->att_hist += mrs->attempts;
+ mrs->probability = minstrel_ewma(mrs->probability,
+ mrs->cur_prob,
+ EWMA_LEVEL);
} else
- mr->sample_skipped++;
+ mrs->sample_skipped++;
- mr->last_success = mr->success;
- mr->last_attempts = mr->attempts;
- mr->success = 0;
- mr->attempts = 0;
+ mrs->last_success = mrs->success;
+ mrs->last_attempts = mrs->attempts;
+ mrs->success = 0;
+ mrs->attempts = 0;
/* Update throughput per rate, reset thr. below 10% success */
- if (mr->probability < MINSTREL_FRAC(10, 100))
- mr->cur_tp = 0;
+ if (mrs->probability < MINSTREL_FRAC(10, 100))
+ mrs->cur_tp = 0;
else
- mr->cur_tp = mr->probability * (1000000 / usecs);
+ mrs->cur_tp = mrs->probability * (1000000 / usecs);
/* Sample less often below the 10% chance of success.
* Sample less often above the 95% chance of success. */
- if (mr->probability > MINSTREL_FRAC(95, 100) ||
- mr->probability < MINSTREL_FRAC(10, 100)) {
- mr->adjusted_retry_count = mr->retry_count >> 1;
+ if (mrs->probability > MINSTREL_FRAC(95, 100) ||
+ mrs->probability < MINSTREL_FRAC(10, 100)) {
+ mr->adjusted_retry_count = mrs->retry_count >> 1;
if (mr->adjusted_retry_count > 2)
mr->adjusted_retry_count = 2;
mr->sample_limit = 4;
} else {
mr->sample_limit = -1;
- mr->adjusted_retry_count = mr->retry_count;
+ mr->adjusted_retry_count = mrs->retry_count;
}
if (!mr->adjusted_retry_count)
mr->adjusted_retry_count = 2;
@@ -190,11 +192,11 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
* choose the maximum throughput rate as max_prob_rate
* (2) if all success probabilities < 95%, the rate with
* highest success probability is choosen as max_prob_rate */
- if (mr->probability >= MINSTREL_FRAC(95, 100)) {
- if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp)
+ if (mrs->probability >= MINSTREL_FRAC(95, 100)) {
+ if (mrs->cur_tp >= mi->r[tmp_prob_rate].stats.cur_tp)
tmp_prob_rate = i;
} else {
- if (mr->probability >= mi->r[tmp_prob_rate].probability)
+ if (mrs->probability >= mi->r[tmp_prob_rate].stats.probability)
tmp_prob_rate = i;
}
}
@@ -240,14 +242,14 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (ndx < 0)
continue;
- mi->r[ndx].attempts += ar[i].count;
+ mi->r[ndx].stats.attempts += ar[i].count;
if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0))
- mi->r[ndx].success += success;
+ mi->r[ndx].stats.success += success;
}
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
- mi->sample_count++;
+ mi->sample_packets++;
if (mi->sample_deferred > 0)
mi->sample_deferred--;
@@ -265,7 +267,7 @@ minstrel_get_retry_count(struct minstrel_rate *mr,
unsigned int retry = mr->adjusted_retry_count;
if (info->control.use_rts)
- retry = max(2U, min(mr->retry_count_rtscts, retry));
+ retry = max(2U, min(mr->stats.retry_count_rtscts, retry));
else if (info->control.use_cts_prot)
retry = max(2U, min(mr->retry_count_cts, retry));
return retry;
@@ -317,15 +319,15 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
sampling_ratio = mp->lookaround_rate;
/* increase sum packet counter */
- mi->packet_count++;
+ mi->total_packets++;
#ifdef CONFIG_MAC80211_DEBUGFS
if (mp->fixed_rate_idx != -1)
return;
#endif
- delta = (mi->packet_count * sampling_ratio / 100) -
- (mi->sample_count + mi->sample_deferred / 2);
+ delta = (mi->total_packets * sampling_ratio / 100) -
+ (mi->sample_packets + mi->sample_deferred / 2);
/* delta < 0: no sampling required */
prev_sample = mi->prev_sample;
@@ -333,10 +335,10 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
if (delta < 0 || (!mrr_capable && prev_sample))
return;
- if (mi->packet_count >= 10000) {
+ if (mi->total_packets >= 10000) {
mi->sample_deferred = 0;
- mi->sample_count = 0;
- mi->packet_count = 0;
+ mi->sample_packets = 0;
+ mi->total_packets = 0;
} else if (delta > mi->n_rates * 2) {
/* With multi-rate retry, not every planned sample
* attempt actually gets used, due to the way the retry
@@ -347,7 +349,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
* starts getting worse, minstrel would start bursting
* out lots of sampling frames, which would result
* in a large throughput loss. */
- mi->sample_count += (delta - mi->n_rates * 2);
+ mi->sample_packets += (delta - mi->n_rates * 2);
}
/* get next random rate sample */
@@ -361,7 +363,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
*/
if (mrr_capable &&
msr->perfect_tx_time > mr->perfect_tx_time &&
- msr->sample_skipped < 20) {
+ msr->stats.sample_skipped < 20) {
/* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
* packets that have the sampling rate deferred to the
* second MRR stage. Increase the sample counter only
@@ -375,7 +377,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
if (!msr->sample_limit != 0)
return;
- mi->sample_count++;
+ mi->sample_packets++;
if (msr->sample_limit > 0)
msr->sample_limit--;
}
@@ -384,7 +386,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
* has a probability of >95%, we shouldn't be attempting
* to use it, as this only wastes precious airtime */
if (!mrr_capable &&
- (mi->r[ndx].probability > MINSTREL_FRAC(95, 100)))
+ (mi->r[ndx].stats.probability > MINSTREL_FRAC(95, 100)))
return;
mi->prev_sample = true;
@@ -459,6 +461,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
for (i = 0; i < sband->n_bitrates; i++) {
struct minstrel_rate *mr = &mi->r[n];
+ struct minstrel_rate_stats *mrs = &mi->r[n].stats;
unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
unsigned int tx_time_single;
unsigned int cw = mp->cw_min;
@@ -471,6 +474,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
n++;
memset(mr, 0, sizeof(*mr));
+ memset(mrs, 0, sizeof(*mrs));
mr->rix = i;
shift = ieee80211_chandef_get_shift(chandef);
@@ -482,9 +486,9 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
mr->sample_limit = -1;
- mr->retry_count = 1;
+ mrs->retry_count = 1;
mr->retry_count_cts = 1;
- mr->retry_count_rtscts = 1;
+ mrs->retry_count_rtscts = 1;
tx_time = mr->perfect_tx_time + mi->sp_ack_dur;
do {
/* add one retransmission */
@@ -501,13 +505,13 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
(mr->retry_count_cts < mp->max_retry))
mr->retry_count_cts++;
if ((tx_time_rtscts < mp->segment_size) &&
- (mr->retry_count_rtscts < mp->max_retry))
- mr->retry_count_rtscts++;
+ (mrs->retry_count_rtscts < mp->max_retry))
+ mrs->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
- (++mr->retry_count < mp->max_retry));
- mr->adjusted_retry_count = mr->retry_count;
+ (++mr->stats.retry_count < mp->max_retry));
+ mr->adjusted_retry_count = mrs->retry_count;
if (!(sband->bitrates[i].flags & IEEE80211_RATE_ERP_G))
- mr->retry_count_cts = mr->retry_count;
+ mr->retry_count_cts = mrs->retry_count;
}
for (i = n; i < sband->n_bitrates; i++) {
@@ -665,7 +669,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
/* convert pkt per sec in kbps (1200 is the average pkt size used for
* computing cur_tp
*/
- return MINSTREL_TRUNC(mi->r[idx].cur_tp) * 1200 * 8 / 1024;
+ return MINSTREL_TRUNC(mi->r[idx].stats.cur_tp) * 1200 * 8 / 1024;
}
const struct rate_control_ops mac80211_minstrel = {
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 046d1bd598a8..97eca86a4af0 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -31,6 +31,27 @@ minstrel_ewma(int old, int new, int weight)
return (new * (EWMA_DIV - weight) + old * weight) / EWMA_DIV;
}
+struct minstrel_rate_stats {
+ /* current / last sampling period attempts/success counters */
+ unsigned int attempts, last_attempts;
+ unsigned int success, last_success;
+
+ /* total attempts/success counters */
+ u64 att_hist, succ_hist;
+
+ /* current throughput */
+ unsigned int cur_tp;
+
+ /* packet delivery probabilities */
+ unsigned int cur_prob, probability;
+
+ /* maximum retry counts */
+ unsigned int retry_count;
+ unsigned int retry_count_rtscts;
+
+ u8 sample_skipped;
+ bool retry_updated;
+};
struct minstrel_rate {
int bitrate;
@@ -40,26 +61,10 @@ struct minstrel_rate {
unsigned int ack_time;
int sample_limit;
- unsigned int retry_count;
unsigned int retry_count_cts;
- unsigned int retry_count_rtscts;
unsigned int adjusted_retry_count;
- u32 success;
- u32 attempts;
- u32 last_attempts;
- u32 last_success;
- u8 sample_skipped;
-
- /* parts per thousand */
- u32 cur_prob;
- u32 probability;
-
- /* per-rate throughput */
- u32 cur_tp;
-
- u64 succ_hist;
- u64 att_hist;
+ struct minstrel_rate_stats stats;
};
struct minstrel_sta_info {
@@ -73,8 +78,8 @@ struct minstrel_sta_info {
u8 max_tp_rate[MAX_THR_RATES];
u8 max_prob_rate;
- unsigned int packet_count;
- unsigned int sample_count;
+ unsigned int total_packets;
+ unsigned int sample_packets;
int sample_deferred;
unsigned int sample_row;
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index fd0b9ca1570e..edde723f9f00 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -72,6 +72,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
"this succ/attempt success attempts\n");
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
+ struct minstrel_rate_stats *mrs = &mi->r[i].stats;
*(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
*(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
@@ -81,24 +82,24 @@ minstrel_stats_open(struct inode *inode, struct file *file)
p += sprintf(p, "%3u%s", mr->bitrate / 2,
(mr->bitrate & 1 ? ".5" : " "));
- tp = MINSTREL_TRUNC(mr->cur_tp / 10);
- prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
- eprob = MINSTREL_TRUNC(mr->probability * 1000);
+ tp = MINSTREL_TRUNC(mrs->cur_tp / 10);
+ prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
+ eprob = MINSTREL_TRUNC(mrs->probability * 1000);
p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
" %3u(%3u) %8llu %8llu\n",
tp / 10, tp % 10,
eprob / 10, eprob % 10,
prob / 10, prob % 10,
- mr->last_success,
- mr->last_attempts,
- (unsigned long long)mr->succ_hist,
- (unsigned long long)mr->att_hist);
+ mrs->last_success,
+ mrs->last_attempts,
+ (unsigned long long)mrs->succ_hist,
+ (unsigned long long)mrs->att_hist);
}
p += sprintf(p, "\nTotal packet count:: ideal %d "
"lookaround %d\n\n",
- mi->packet_count - mi->sample_count,
- mi->sample_count);
+ mi->total_packets - mi->sample_packets,
+ mi->sample_packets);
ms->len = p - ms->buf;
return 0;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 85c1e74b7714..df90ce2db00c 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -135,7 +135,7 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
- return GROUP_IDX((rate->idx / 8) + 1,
+ return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
@@ -233,12 +233,151 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
}
/*
+ * Find & sort topmost throughput rates
+ *
+ * If multiple rates provide equal throughput the sorting is based on their
+ * current success probability. Higher success probability is preferred among
+ * MCS groups, CCK rates do not provide aggregation and are therefore at last.
+ */
+static void
+minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
+ u8 *tp_list)
+{
+ int cur_group, cur_idx, cur_thr, cur_prob;
+ int tmp_group, tmp_idx, tmp_thr, tmp_prob;
+ int j = MAX_THR_RATES;
+
+ cur_group = index / MCS_GROUP_RATES;
+ cur_idx = index % MCS_GROUP_RATES;
+ cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
+ cur_prob = mi->groups[cur_group].rates[cur_idx].probability;
+
+ tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
+ tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
+ tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
+
+ while (j > 0 && (cur_thr > tmp_thr ||
+ (cur_thr == tmp_thr && cur_prob > tmp_prob))) {
+ j--;
+ tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
+ tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
+ tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
+ }
+
+ if (j < MAX_THR_RATES - 1) {
+ memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
+ (MAX_THR_RATES - (j + 1))));
+ }
+ if (j < MAX_THR_RATES)
+ tp_list[j] = index;
+}
+
+/*
+ * Find and set the topmost probability rate per sta and per group
+ */
+static void
+minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u8 index)
+{
+ struct minstrel_mcs_group_data *mg;
+ struct minstrel_rate_stats *mr;
+ int tmp_group, tmp_idx, tmp_tp, tmp_prob, max_tp_group;
+
+ mg = &mi->groups[index / MCS_GROUP_RATES];
+ mr = &mg->rates[index % MCS_GROUP_RATES];
+
+ tmp_group = mi->max_prob_rate / MCS_GROUP_RATES;
+ tmp_idx = mi->max_prob_rate % MCS_GROUP_RATES;
+ tmp_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+ tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
+
+ /* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
+ * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
+ max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES;
+ if((index / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) &&
+ (max_tp_group != MINSTREL_CCK_GROUP))
+ return;
+
+ if (mr->probability > MINSTREL_FRAC(75, 100)) {
+ if (mr->cur_tp > tmp_tp)
+ mi->max_prob_rate = index;
+ if (mr->cur_tp > mg->rates[mg->max_group_prob_rate].cur_tp)
+ mg->max_group_prob_rate = index;
+ } else {
+ if (mr->probability > tmp_prob)
+ mi->max_prob_rate = index;
+ if (mr->probability > mg->rates[mg->max_group_prob_rate].probability)
+ mg->max_group_prob_rate = index;
+ }
+}
+
+
+/*
+ * Assign new rate set per sta and use CCK rates only if the fastest
+ * rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
+ * rate sets where MCS and CCK rates are mixed, because CCK rates can
+ * not use aggregation.
+ */
+static void
+minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
+ u8 tmp_mcs_tp_rate[MAX_THR_RATES],
+ u8 tmp_cck_tp_rate[MAX_THR_RATES])
+{
+ unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp;
+ int i;
+
+ tmp_group = tmp_cck_tp_rate[0] / MCS_GROUP_RATES;
+ tmp_idx = tmp_cck_tp_rate[0] % MCS_GROUP_RATES;
+ tmp_cck_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+
+ tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
+ tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
+ tmp_mcs_tp = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
+
+ if (tmp_cck_tp > tmp_mcs_tp) {
+ for(i = 0; i < MAX_THR_RATES; i++) {
+ minstrel_ht_sort_best_tp_rates(mi, tmp_cck_tp_rate[i],
+ tmp_mcs_tp_rate);
+ }
+ }
+
+}
+
+/*
+ * Try to increase robustness of max_prob rate by decrease number of
+ * streams if possible.
+ */
+static inline void
+minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
+{
+ struct minstrel_mcs_group_data *mg;
+ struct minstrel_rate_stats *mr;
+ int tmp_max_streams, group;
+ int tmp_tp = 0;
+
+ tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
+ MCS_GROUP_RATES].streams;
+ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
+ mg = &mi->groups[group];
+ if (!mg->supported || group == MINSTREL_CCK_GROUP)
+ continue;
+ mr = minstrel_get_ratestats(mi, mg->max_group_prob_rate);
+ if (tmp_tp < mr->cur_tp &&
+ (minstrel_mcs_groups[group].streams < tmp_max_streams)) {
+ mi->max_prob_rate = mg->max_group_prob_rate;
+ tmp_tp = mr->cur_tp;
+ }
+ }
+}
+
+/*
* Update rate statistics and select new primary rates
*
* Rules for rate selection:
* - max_prob_rate must use only one stream, as a tradeoff between delivery
* probability and throughput during strong fluctuations
- * - as long as the max prob rate has a probability of more than 3/4, pick
+ * - as long as the max prob rate has a probability of more than 75%, pick
* higher throughput rates, even if the probablity is a bit lower
*/
static void
@@ -246,9 +385,9 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mr;
- int cur_prob, cur_prob_tp, cur_tp, cur_tp2;
- int group, i, index;
- bool mi_rates_valid = false;
+ int group, i, j;
+ u8 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
+ u8 tmp_cck_tp_rate[MAX_THR_RATES], index;
if (mi->ampdu_packets > 0) {
mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
@@ -260,13 +399,14 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mi->sample_slow = 0;
mi->sample_count = 0;
- for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
- bool mg_rates_valid = false;
+ /* Initialize global rate indexes */
+ for(j = 0; j < MAX_THR_RATES; j++){
+ tmp_mcs_tp_rate[j] = 0;
+ tmp_cck_tp_rate[j] = 0;
+ }
- cur_prob = 0;
- cur_prob_tp = 0;
- cur_tp = 0;
- cur_tp2 = 0;
+ /* Find best rate sets within all MCS groups*/
+ for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
mg = &mi->groups[group];
if (!mg->supported)
@@ -274,24 +414,16 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mi->sample_count++;
+ /* (re)Initialize group rate indexes */
+ for(j = 0; j < MAX_THR_RATES; j++)
+ tmp_group_tp_rate[j] = group;
+
for (i = 0; i < MCS_GROUP_RATES; i++) {
if (!(mg->supported & BIT(i)))
continue;
index = MCS_GROUP_RATES * group + i;
- /* initialize rates selections starting indexes */
- if (!mg_rates_valid) {
- mg->max_tp_rate = mg->max_tp_rate2 =
- mg->max_prob_rate = i;
- if (!mi_rates_valid) {
- mi->max_tp_rate = mi->max_tp_rate2 =
- mi->max_prob_rate = index;
- mi_rates_valid = true;
- }
- mg_rates_valid = true;
- }
-
mr = &mg->rates[i];
mr->retry_updated = false;
minstrel_calc_rate_ewma(mr);
@@ -300,82 +432,47 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
if (!mr->cur_tp)
continue;
- if ((mr->cur_tp > cur_prob_tp && mr->probability >
- MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
- mg->max_prob_rate = index;
- cur_prob = mr->probability;
- cur_prob_tp = mr->cur_tp;
- }
-
- if (mr->cur_tp > cur_tp) {
- swap(index, mg->max_tp_rate);
- cur_tp = mr->cur_tp;
- mr = minstrel_get_ratestats(mi, index);
- }
-
- if (index >= mg->max_tp_rate)
- continue;
-
- if (mr->cur_tp > cur_tp2) {
- mg->max_tp_rate2 = index;
- cur_tp2 = mr->cur_tp;
+ /* Find max throughput rate set */
+ if (group != MINSTREL_CCK_GROUP) {
+ minstrel_ht_sort_best_tp_rates(mi, index,
+ tmp_mcs_tp_rate);
+ } else if (group == MINSTREL_CCK_GROUP) {
+ minstrel_ht_sort_best_tp_rates(mi, index,
+ tmp_cck_tp_rate);
}
- }
- }
- /* try to sample all available rates during each interval */
- mi->sample_count *= 8;
+ /* Find max throughput rate set within a group */
+ minstrel_ht_sort_best_tp_rates(mi, index,
+ tmp_group_tp_rate);
- cur_prob = 0;
- cur_prob_tp = 0;
- cur_tp = 0;
- cur_tp2 = 0;
- for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
- mg = &mi->groups[group];
- if (!mg->supported)
- continue;
-
- mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
- if (cur_tp < mr->cur_tp) {
- mi->max_tp_rate2 = mi->max_tp_rate;
- cur_tp2 = cur_tp;
- mi->max_tp_rate = mg->max_tp_rate;
- cur_tp = mr->cur_tp;
- mi->max_prob_streams = minstrel_mcs_groups[group].streams - 1;
+ /* Find max probability rate per group and global */
+ minstrel_ht_set_best_prob_rate(mi, index);
}
- mr = minstrel_get_ratestats(mi, mg->max_tp_rate2);
- if (cur_tp2 < mr->cur_tp) {
- mi->max_tp_rate2 = mg->max_tp_rate2;
- cur_tp2 = mr->cur_tp;
- }
+ memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
+ sizeof(mg->max_group_tp_rate));
}
- if (mi->max_prob_streams < 1)
- mi->max_prob_streams = 1;
+ /* Assign new rate set per sta */
+ minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate, tmp_cck_tp_rate);
+ memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
- for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
- mg = &mi->groups[group];
- if (!mg->supported)
- continue;
- mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
- if (cur_prob_tp < mr->cur_tp &&
- minstrel_mcs_groups[group].streams <= mi->max_prob_streams) {
- mi->max_prob_rate = mg->max_prob_rate;
- cur_prob = mr->cur_prob;
- cur_prob_tp = mr->cur_tp;
- }
- }
+ /* Try to increase robustness of max_prob_rate*/
+ minstrel_ht_prob_rate_reduce_streams(mi);
+
+ /* try to sample all available rates during each interval */
+ mi->sample_count *= 8;
#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
if (mp->fixed_rate_idx != -1) {
- mi->max_tp_rate = mp->fixed_rate_idx;
- mi->max_tp_rate2 = mp->fixed_rate_idx;
+ for (i = 0; i < 4; i++)
+ mi->max_tp_rate[i] = mp->fixed_rate_idx;
mi->max_prob_rate = mp->fixed_rate_idx;
}
#endif
+ /* Reset update timer */
mi->stats_update = jiffies;
}
@@ -420,8 +517,7 @@ minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
}
static void
-minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
- bool primary)
+minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u8 *idx, bool primary)
{
int group, orig_group;
@@ -437,9 +533,9 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
continue;
if (primary)
- *idx = mi->groups[group].max_tp_rate;
+ *idx = mi->groups[group].max_group_tp_rate[0];
else
- *idx = mi->groups[group].max_tp_rate2;
+ *idx = mi->groups[group].max_group_tp_rate[1];
break;
}
}
@@ -524,19 +620,19 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
* check for sudden death of spatial multiplexing,
* downgrade to a lower number of streams if necessary.
*/
- rate = minstrel_get_ratestats(mi, mi->max_tp_rate);
+ rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
if (rate->attempts > 30 &&
MINSTREL_FRAC(rate->success, rate->attempts) <
MINSTREL_FRAC(20, 100)) {
- minstrel_downgrade_rate(mi, &mi->max_tp_rate, true);
+ minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
update = true;
}
- rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2);
+ rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
if (rate2->attempts > 30 &&
MINSTREL_FRAC(rate2->success, rate2->attempts) <
MINSTREL_FRAC(20, 100)) {
- minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false);
+ minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
update = true;
}
@@ -661,12 +757,12 @@ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
if (!rates)
return;
- /* Start with max_tp_rate */
- minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate);
+ /* Start with max_tp_rate[0] */
+ minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
if (mp->hw->max_rates >= 3) {
- /* At least 3 tx rates supported, use max_tp_rate2 next */
- minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate2);
+ /* At least 3 tx rates supported, use max_tp_rate[1] next */
+ minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[1]);
}
if (mp->hw->max_rates >= 2) {
@@ -691,7 +787,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_rate_stats *mr;
struct minstrel_mcs_group_data *mg;
- unsigned int sample_dur, sample_group;
+ unsigned int sample_dur, sample_group, cur_max_tp_streams;
int sample_idx = 0;
if (mi->sample_wait > 0) {
@@ -718,8 +814,8 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
* to the frame. Hence, don't use sampling for the currently
* used rates.
*/
- if (sample_idx == mi->max_tp_rate ||
- sample_idx == mi->max_tp_rate2 ||
+ if (sample_idx == mi->max_tp_rate[0] ||
+ sample_idx == mi->max_tp_rate[1] ||
sample_idx == mi->max_prob_rate)
return -1;
@@ -734,9 +830,12 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
* Make sure that lower rates get sampled only occasionally,
* if the link is working perfectly.
*/
+
+ cur_max_tp_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
+ MCS_GROUP_RATES].streams;
sample_dur = minstrel_get_duration(sample_idx);
- if (sample_dur >= minstrel_get_duration(mi->max_tp_rate2) &&
- (mi->max_prob_streams <
+ if (sample_dur >= minstrel_get_duration(mi->max_tp_rate[1]) &&
+ (cur_max_tp_streams - 1 <
minstrel_mcs_groups[sample_group].streams ||
sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
if (mr->sample_skipped < 20)
@@ -1041,8 +1140,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
if (!msp->is_ht)
return mac80211_minstrel.get_expected_throughput(priv_sta);
- i = mi->max_tp_rate / MCS_GROUP_RATES;
- j = mi->max_tp_rate % MCS_GROUP_RATES;
+ i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
+ j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
/* convert cur_tp from pkt per second in kbps */
return mi->groups[i].rates[j].cur_tp * AVG_PKT_SIZE * 8 / 1024;
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index d655586773ac..01570e0e014b 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -26,28 +26,6 @@ struct mcs_group {
extern const struct mcs_group minstrel_mcs_groups[];
-struct minstrel_rate_stats {
- /* current / last sampling period attempts/success counters */
- unsigned int attempts, last_attempts;
- unsigned int success, last_success;
-
- /* total attempts/success counters */
- u64 att_hist, succ_hist;
-
- /* current throughput */
- unsigned int cur_tp;
-
- /* packet delivery probabilities */
- unsigned int cur_prob, probability;
-
- /* maximum retry counts */
- unsigned int retry_count;
- unsigned int retry_count_rtscts;
-
- bool retry_updated;
- u8 sample_skipped;
-};
-
struct minstrel_mcs_group_data {
u8 index;
u8 column;
@@ -55,10 +33,9 @@ struct minstrel_mcs_group_data {
/* bitfield of supported MCS rates of this group */
u8 supported;
- /* selected primary rates */
- unsigned int max_tp_rate;
- unsigned int max_tp_rate2;
- unsigned int max_prob_rate;
+ /* sorted rate set within a MCS group*/
+ u8 max_group_tp_rate[MAX_THR_RATES];
+ u8 max_group_prob_rate;
/* MCS rate statistics */
struct minstrel_rate_stats rates[MCS_GROUP_RATES];
@@ -74,15 +51,9 @@ struct minstrel_ht_sta {
/* ampdu length (EWMA) */
unsigned int avg_ampdu_len;
- /* best throughput rate */
- unsigned int max_tp_rate;
-
- /* second best throughput rate */
- unsigned int max_tp_rate2;
-
- /* best probability rate */
- unsigned int max_prob_rate;
- unsigned int max_prob_streams;
+ /* overall sorted rate set */
+ u8 max_tp_rate[MAX_THR_RATES];
+ u8 max_prob_rate;
/* time of last status update */
unsigned long stats_update;
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 3e7d793de0c3..a72ad46f2a04 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -46,8 +46,10 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
else
p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
- *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
- *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
+ *(p++) = (idx == mi->max_tp_rate[0]) ? 'A' : ' ';
+ *(p++) = (idx == mi->max_tp_rate[1]) ? 'B' : ' ';
+ *(p++) = (idx == mi->max_tp_rate[2]) ? 'C' : ' ';
+ *(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' ';
*(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
if (i == max_mcs) {
@@ -100,8 +102,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
file->private_data = ms;
p = ms->buf;
- p += sprintf(p, "type rate throughput ewma prob this prob "
- "retry this succ/attempt success attempts\n");
+ p += sprintf(p, "type rate throughput ewma prob "
+ "this prob retry this succ/attempt success attempts\n");
p = minstrel_ht_stats_dump(mi, max_mcs, p);
for (i = 0; i < max_mcs; i++)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index bd2c9b22c945..b04ca4049c95 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3,6 +3,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -835,6 +836,16 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
spin_lock(&tid_agg_rx->reorder_lock);
+ /*
+ * Offloaded BA sessions have no known starting sequence number so pick
+ * one from first Rxed frame for this tid after BA was started.
+ */
+ if (unlikely(tid_agg_rx->auto_seq)) {
+ tid_agg_rx->auto_seq = false;
+ tid_agg_rx->ssn = mpdu_seq_num;
+ tid_agg_rx->head_seq_num = mpdu_seq_num;
+ }
+
buf_size = tid_agg_rx->buf_size;
head_seq_num = tid_agg_rx->head_seq_num;
@@ -2725,7 +2736,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
sig = status->signal;
if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
- rx->skb->data, rx->skb->len, 0, GFP_ATOMIC)) {
+ rx->skb->data, rx->skb->len, 0)) {
if (rx->sta)
rx->sta->rx_packets++;
dev_kfree_skb(rx->skb);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index a0a938145dcc..af0d094b2f2f 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -6,6 +6,7 @@
* Copyright 2005, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1094,7 +1095,7 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
if (rcu_access_pointer(local->sched_scan_sdata)) {
ret = drv_sched_scan_stop(local, sdata);
if (!ret)
- rcu_assign_pointer(local->sched_scan_sdata, NULL);
+ RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
}
out:
mutex_unlock(&local->mtx);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a1e433b88c66..de494df3bab8 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1,6 +1,7 @@
/*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1182,7 +1183,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb;
int size = sizeof(*nullfunc);
__le16 fc;
- bool qos = test_sta_flag(sta, WLAN_STA_WME);
+ bool qos = sta->sta.wme;
struct ieee80211_tx_info *info;
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -1837,7 +1838,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_AUTHORIZED);
if (test_sta_flag(sta, WLAN_STA_SHORT_PREAMBLE))
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_SHORT_PREAMBLE);
- if (test_sta_flag(sta, WLAN_STA_WME))
+ if (sta->sta.wme)
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_WME);
if (test_sta_flag(sta, WLAN_STA_MFP))
sinfo->sta_flags.set |= BIT(NL80211_STA_FLAG_MFP);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d411bcc8ef08..42f68cb8957e 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -1,5 +1,6 @@
/*
* Copyright 2002-2005, Devicescape Software, Inc.
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,7 +32,6 @@
* when virtual port control is not in use.
* @WLAN_STA_SHORT_PREAMBLE: Station is capable of receiving short-preamble
* frames.
- * @WLAN_STA_WME: Station is a QoS-STA.
* @WLAN_STA_WDS: Station is one of our WDS peers.
* @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
* IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
@@ -69,7 +69,6 @@ enum ieee80211_sta_info_flags {
WLAN_STA_PS_STA,
WLAN_STA_AUTHORIZED,
WLAN_STA_SHORT_PREAMBLE,
- WLAN_STA_WME,
WLAN_STA_WDS,
WLAN_STA_CLEAR_PS_FILT,
WLAN_STA_MFP,
@@ -169,6 +168,8 @@ struct tid_ampdu_tx {
* @dialog_token: dialog token for aggregation session
* @rcu_head: RCU head used for freeing this struct
* @reorder_lock: serializes access to reorder buffer, see below.
+ * @auto_seq: used for offloaded BA sessions to automatically pick head_seq_and
+ * and ssn.
*
* This structure's lifetime is managed by RCU, assignments to
* the array holding it must hold the aggregation mutex.
@@ -192,6 +193,7 @@ struct tid_ampdu_rx {
u16 buf_size;
u16 timeout;
u8 dialog_token;
+ bool auto_seq;
};
/**
@@ -448,6 +450,9 @@ struct sta_info {
enum ieee80211_smps_mode known_smps_mode;
const struct ieee80211_cipher_scheme *cipher_scheme;
+ /* TDLS timeout data */
+ unsigned long last_tdls_pkt_time;
+
/* keep last! */
struct ieee80211_sta sta;
};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index aa06dcad336e..89290e33dafe 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -3,6 +3,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2008-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -537,6 +538,8 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
* - current throughput (higher value for higher tpt)?
*/
#define STA_LOST_PKT_THRESHOLD 50
+#define STA_LOST_TDLS_PKT_THRESHOLD 10
+#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb)
{
@@ -547,7 +550,20 @@ static void ieee80211_lost_packet(struct sta_info *sta, struct sk_buff *skb)
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
- if (++sta->lost_packets < STA_LOST_PKT_THRESHOLD)
+ sta->lost_packets++;
+ if (!sta->sta.tdls && sta->lost_packets < STA_LOST_PKT_THRESHOLD)
+ return;
+
+ /*
+ * If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD
+ * of the last packets were lost, and that no ACK was received in the
+ * last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
+ * mechanism.
+ */
+ if (sta->sta.tdls &&
+ (sta->lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
+ time_before(jiffies,
+ sta->last_tdls_pkt_time + STA_LOST_TDLS_PKT_TIME)))
return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
@@ -694,6 +710,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_STAT_ACK) {
if (sta->lost_packets)
sta->lost_packets = 0;
+
+ /* Track when last TDLS packet was ACKed */
+ if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
+ sta->last_tdls_pkt_time = jiffies;
} else {
ieee80211_lost_packet(sta, skb);
}
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 1b21050be174..4ea25dec0698 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -3,6 +3,7 @@
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2014, Intel Corporation
+ * Copyright 2014 Intel Mobile Communications GmbH
*
* This file is GPLv2 as found in COPYING.
*/
@@ -316,8 +317,7 @@ ieee80211_tdls_add_setup_cfm_ies(struct ieee80211_sub_if_data *sdata,
}
/* add the QoS param IE if both the peer and we support it */
- if (local->hw.queues >= IEEE80211_NUM_ACS &&
- test_sta_flag(sta, WLAN_STA_WME))
+ if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme)
ieee80211_tdls_add_wmm_param_ie(sdata, skb);
/* add any custom IEs that go before HT operation */
@@ -412,6 +412,9 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->ether_type = cpu_to_be16(ETH_P_TDLS);
tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+ /* network header is after the ethernet header */
+ skb_set_network_header(skb, ETH_HLEN);
+
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
tf->category = WLAN_CATEGORY_TDLS;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 02ac535d1274..38fae7ebe984 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -672,13 +672,13 @@ DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold,
);
TRACE_EVENT(drv_set_coverage_class,
- TP_PROTO(struct ieee80211_local *local, u8 value),
+ TP_PROTO(struct ieee80211_local *local, s16 value),
TP_ARGS(local, value),
TP_STRUCT__entry(
LOCAL_ENTRY
- __field(u8, value)
+ __field(s16, value)
),
TP_fast_assign(
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 464106c023d8..900632a250ec 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3,6 +3,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1478,7 +1479,10 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
tail_need = max_t(int, tail_need, 0);
}
- if (skb_cloned(skb))
+ if (skb_cloned(skb) &&
+ (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CLONED_SKBS) ||
+ !skb_clone_writable(skb, ETH_HLEN) ||
+ sdata->crypto_tx_tailroom_needed_cnt))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
I802_DEBUG_INC(local->tx_expand_skb_head);
@@ -1785,9 +1789,8 @@ static void ieee80211_tx_latency_start_msrmnt(struct ieee80211_local *local,
* @skb: packet to be sent
* @dev: incoming interface
*
- * Returns: 0 on success (and frees skb in this case) or 1 on failure (skb will
- * not be freed, and caller is responsible for either retrying later or freeing
- * skb).
+ * Returns: NETDEV_TX_OK both on success and on failure. On failure skb will
+ * be freed.
*
* This function takes in an Ethernet header and encapsulates it with suitable
* IEEE 802.11 header based on which interface the packet is coming in. The
@@ -1844,7 +1847,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
- wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ wme_sta = sta->sta.wme;
}
ap_sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
u.ap);
@@ -1957,7 +1960,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (sta) {
authorized = test_sta_flag(sta,
WLAN_STA_AUTHORIZED);
- wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ wme_sta = sta->sta.wme;
tdls_peer = test_sta_flag(sta,
WLAN_STA_TDLS_PEER);
tdls_auth = test_sta_flag(sta,
@@ -2035,7 +2038,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
sta = sta_info_get(sdata, hdr.addr1);
if (sta) {
authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
- wme_sta = test_sta_flag(sta, WLAN_STA_WME);
+ wme_sta = sta->sta.wme;
}
}
@@ -2069,30 +2072,23 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (unlikely(!multicast && skb->sk &&
skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
- struct sk_buff *orig_skb = skb;
+ struct sk_buff *ack_skb = skb_clone_sk(skb);
- skb = skb_clone(skb, GFP_ATOMIC);
- if (skb) {
+ if (ack_skb) {
unsigned long flags;
int id;
spin_lock_irqsave(&local->ack_status_lock, flags);
- id = idr_alloc(&local->ack_status_frames, orig_skb,
+ id = idr_alloc(&local->ack_status_frames, ack_skb,
1, 0x10000, GFP_ATOMIC);
spin_unlock_irqrestore(&local->ack_status_lock, flags);
if (id >= 0) {
info_id = id;
info_flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- } else if (skb_shared(skb)) {
- kfree_skb(orig_skb);
} else {
- kfree_skb(skb);
- skb = orig_skb;
+ kfree_skb(ack_skb);
}
- } else {
- /* couldn't clone -- lose tx status ... */
- skb = orig_skb;
}
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 725af7a468d2..3c61060a4d2b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3,6 +3,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1014,6 +1015,31 @@ u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
}
elems->pwr_constr_elem = pos;
break;
+ case WLAN_EID_CISCO_VENDOR_SPECIFIC:
+ /* Lots of different options exist, but we only care
+ * about the Dynamic Transmit Power Control element.
+ * First check for the Cisco OUI, then for the DTPC
+ * tag (0x00).
+ */
+ if (elen < 4) {
+ elem_parse_failed = true;
+ break;
+ }
+
+ if (pos[0] != 0x00 || pos[1] != 0x40 ||
+ pos[2] != 0x96 || pos[3] != 0x00)
+ break;
+
+ if (elen != 6) {
+ elem_parse_failed = true;
+ break;
+ }
+
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+
+ elems->cisco_dtpc_elem = pos;
+ break;
case WLAN_EID_TIMEOUT_INTERVAL:
if (elen >= sizeof(struct ieee80211_timeout_interval_ie))
elems->timeout_int = (void *)pos;
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index d51422c778de..3b873989992c 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -1,5 +1,6 @@
/*
* Copyright 2004, Instant802 Networks, Inc.
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -118,7 +119,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP_VLAN:
sta = rcu_dereference(sdata->u.vlan.sta);
if (sta) {
- qos = test_sta_flag(sta, WLAN_STA_WME);
+ qos = sta->sta.wme;
break;
}
case NL80211_IFTYPE_AP:
@@ -145,7 +146,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
if (!sta && ra && !is_multicast_ether_addr(ra)) {
sta = sta_info_get(sdata, ra);
if (sta)
- qos = test_sta_flag(sta, WLAN_STA_WME);
+ qos = sta->sta.wme;
}
rcu_read_unlock();
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index f7d4ca4c46e0..983527a4c1ab 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -64,8 +64,11 @@ ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx)
if (!info->control.hw_key)
tail += IEEE80211_TKIP_ICV_LEN;
- if (WARN_ON(skb_tailroom(skb) < tail ||
- skb_headroom(skb) < IEEE80211_TKIP_IV_LEN))
+ if (WARN(skb_tailroom(skb) < tail ||
+ skb_headroom(skb) < IEEE80211_TKIP_IV_LEN,
+ "mmic: not enough head/tail (%d/%d,%d/%d)\n",
+ skb_headroom(skb), IEEE80211_TKIP_IV_LEN,
+ skb_tailroom(skb), tail))
return TX_DROP;
key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY];
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 7f820a108a9c..a14cf9ede171 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -86,9 +86,8 @@ fail:
static void mac802154_rx_worker(struct work_struct *work)
{
struct rx_work *rw = container_of(work, struct rx_work, work);
- struct sk_buff *skb = rw->skb;
- mac802154_subif_rx(rw->dev, skb, rw->lqi);
+ mac802154_subif_rx(rw->dev, rw->skb, rw->lqi);
kfree(rw);
}
@@ -101,7 +100,7 @@ ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, u8 lqi)
if (!skb)
return;
- work = kzalloc(sizeof(struct rx_work), GFP_ATOMIC);
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 8124353646ae..fdf4c0e67259 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -89,8 +89,7 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
if (!(priv->phy->channels_supported[page] & (1 << chan))) {
WARN_ON(1);
- kfree_skb(skb);
- return NETDEV_TX_OK;
+ goto err_tx;
}
mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb);
@@ -103,12 +102,10 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
data[1] = crc >> 8;
}
- if (skb_cow_head(skb, priv->hw.extra_tx_headroom)) {
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
+ if (skb_cow_head(skb, priv->hw.extra_tx_headroom))
+ goto err_tx;
- work = kzalloc(sizeof(struct xmit_work), GFP_ATOMIC);
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
kfree_skb(skb);
return NETDEV_TX_BUSY;
@@ -129,4 +126,8 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
queue_work(priv->dev_workqueue, &work->work);
return NETDEV_TX_OK;
+
+err_tx:
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index 547838822d5e..b7961129ce4d 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -475,8 +475,7 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
rc = mac802154_llsec_decrypt(&sdata->sec, skb);
if (rc) {
pr_debug("decryption failed: %i\n", rc);
- kfree_skb(skb);
- return NET_RX_DROP;
+ goto fail;
}
sdata->dev->stats.rx_packets++;
@@ -488,9 +487,12 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb,
default:
pr_warn("ieee802154: bad frame received (type = %d)\n",
mac_cb(skb)->type);
- kfree_skb(skb);
- return NET_RX_DROP;
+ goto fail;
}
+
+fail:
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
static void mac802154_print_addr(const char *name,
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index 6b38d083e1c9..e28ed2ef5b06 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -65,15 +65,9 @@ out:
return segs;
}
-static int mpls_gso_send_check(struct sk_buff *skb)
-{
- return 0;
-}
-
static struct packet_offload mpls_mc_offload = {
.type = cpu_to_be16(ETH_P_MPLS_MC),
.callbacks = {
- .gso_send_check = mpls_gso_send_check,
.gso_segment = mpls_gso_segment,
},
};
@@ -81,7 +75,6 @@ static struct packet_offload mpls_mc_offload = {
static struct packet_offload mpls_uc_offload = {
.type = cpu_to_be16(ETH_P_MPLS_UC),
.callbacks = {
- .gso_send_check = mpls_gso_send_check,
.gso_segment = mpls_gso_segment,
},
};
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 6d77cce481d5..ae5096ab65eb 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -496,6 +496,15 @@ config NFT_LIMIT
This option adds the "limit" expression that you can use to
ratelimit rule matchings.
+config NFT_MASQ
+ depends on NF_TABLES
+ depends on NF_CONNTRACK
+ depends on NF_NAT
+ tristate "Netfilter nf_tables masquerade support"
+ help
+ This option adds the "masquerade" expression that you can use
+ to perform NAT in the masquerade flavour.
+
config NFT_NAT
depends on NF_TABLES
depends on NF_CONNTRACK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index fad5fdba34e5..a9571be3f791 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o
obj-$(CONFIG_NFT_HASH) += nft_hash.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o
+obj-$(CONFIG_NFT_MASQ) += nft_masq.o
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
index 2f7f5c32c6f9..234a8ec82076 100644
--- a/net/netfilter/ipset/Kconfig
+++ b/net/netfilter/ipset/Kconfig
@@ -99,6 +99,15 @@ config IP_SET_HASH_IPPORTNET
To compile it as a module, choose M here. If unsure, say N.
+config IP_SET_HASH_MAC
+ tristate "hash:mac set support"
+ depends on IP_SET
+ help
+ This option adds the hash:mac set type support, by which
+ one can store MAC (ethernet address) elements in a set.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP_SET_HASH_NETPORTNET
tristate "hash:net,port,net set support"
depends on IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
index 231f10196cb9..3dbd5e958489 100644
--- a/net/netfilter/ipset/Makefile
+++ b/net/netfilter/ipset/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_IP_SET_HASH_IPMARK) += ip_set_hash_ipmark.o
obj-$(CONFIG_IP_SET_HASH_IPPORT) += ip_set_hash_ipport.o
obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
+obj-$(CONFIG_IP_SET_HASH_MAC) += ip_set_hash_mac.o
obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
obj-$(CONFIG_IP_SET_HASH_NETIFACE) += ip_set_hash_netiface.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index f2c7d83dc23f..6f024a8a1534 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -128,6 +128,8 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
return 0;
if (SET_WITH_COUNTER(set))
ip_set_update_counter(ext_counter(x, set), ext, mext, flags);
+ if (SET_WITH_SKBINFO(set))
+ ip_set_get_skbinfo(ext_skbinfo(x, set), ext, mext, flags);
return 1;
}
@@ -161,6 +163,8 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
ip_set_init_counter(ext_counter(x, set), ext);
if (SET_WITH_COMMENT(set))
ip_set_init_comment(ext_comment(x, set), ext);
+ if (SET_WITH_SKBINFO(set))
+ ip_set_init_skbinfo(ext_skbinfo(x, set), ext);
return 0;
}
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 6f1f9f494808..55b083ec587a 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -27,7 +27,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Counter support added */
-#define IPSET_TYPE_REV_MAX 2 /* Comment support added */
+/* 2 Comment support added */
+#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -112,7 +113,7 @@ bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
{
struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct bitmap_ip_adt_elem e = { };
+ struct bitmap_ip_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
u32 ip;
@@ -132,14 +133,17 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
struct bitmap_ip *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
u32 ip = 0, ip_to = 0;
- struct bitmap_ip_adt_elem e = { };
+ struct bitmap_ip_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret = 0;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -357,6 +361,9 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 740eabededd9..86104744b00f 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -27,7 +27,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Counter support added */
-#define IPSET_TYPE_REV_MAX 2 /* Comment support added */
+/* 2 Comment support added */
+#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -203,7 +204,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
{
struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct bitmap_ipmac_adt_elem e = {};
+ struct bitmap_ipmac_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
u32 ip;
@@ -232,7 +233,7 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct bitmap_ipmac *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct bitmap_ipmac_adt_elem e = {};
+ struct bitmap_ipmac_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0;
int ret = 0;
@@ -240,7 +241,10 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -394,6 +398,9 @@ static struct ip_set_type bitmap_ipmac_type = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index cf99676e69f8..005dd36444c3 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -22,7 +22,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Counter support added */
-#define IPSET_TYPE_REV_MAX 2 /* Comment support added */
+/* 2 Comment support added */
+#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -104,7 +105,7 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
{
struct bitmap_port *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct bitmap_port_adt_elem e = {};
+ struct bitmap_port_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
__be16 __port;
u16 port = 0;
@@ -129,7 +130,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
{
struct bitmap_port *map = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct bitmap_port_adt_elem e = {};
+ struct bitmap_port_adt_elem e = { .id = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port; /* wraparound */
u16 port_to;
@@ -139,7 +140,10 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -291,6 +295,9 @@ static struct ip_set_type bitmap_port_type = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index ec8114fae50b..26c795e6b57f 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -101,7 +101,7 @@ load_settype(const char *name)
nfnl_unlock(NFNL_SUBSYS_IPSET);
pr_debug("try to load ip_set_%s\n", name);
if (request_module("ip_set_%s", name) < 0) {
- pr_warning("Can't find ip_set type %s\n", name);
+ pr_warn("Can't find ip_set type %s\n", name);
nfnl_lock(NFNL_SUBSYS_IPSET);
return false;
}
@@ -195,20 +195,19 @@ ip_set_type_register(struct ip_set_type *type)
int ret = 0;
if (type->protocol != IPSET_PROTOCOL) {
- pr_warning("ip_set type %s, family %s, revision %u:%u uses "
- "wrong protocol version %u (want %u)\n",
- type->name, family_name(type->family),
- type->revision_min, type->revision_max,
- type->protocol, IPSET_PROTOCOL);
+ pr_warn("ip_set type %s, family %s, revision %u:%u uses wrong protocol version %u (want %u)\n",
+ type->name, family_name(type->family),
+ type->revision_min, type->revision_max,
+ type->protocol, IPSET_PROTOCOL);
return -EINVAL;
}
ip_set_type_lock();
if (find_set_type(type->name, type->family, type->revision_min)) {
/* Duplicate! */
- pr_warning("ip_set type %s, family %s with revision min %u "
- "already registered!\n", type->name,
- family_name(type->family), type->revision_min);
+ pr_warn("ip_set type %s, family %s with revision min %u already registered!\n",
+ type->name, family_name(type->family),
+ type->revision_min);
ret = -EINVAL;
goto unlock;
}
@@ -228,9 +227,9 @@ ip_set_type_unregister(struct ip_set_type *type)
{
ip_set_type_lock();
if (!find_set_type(type->name, type->family, type->revision_min)) {
- pr_warning("ip_set type %s, family %s with revision min %u "
- "not registered\n", type->name,
- family_name(type->family), type->revision_min);
+ pr_warn("ip_set type %s, family %s with revision min %u not registered\n",
+ type->name, family_name(type->family),
+ type->revision_min);
goto unlock;
}
list_del_rcu(&type->list);
@@ -338,6 +337,12 @@ const struct ip_set_ext_type ip_set_extensions[] = {
.len = sizeof(unsigned long),
.align = __alignof__(unsigned long),
},
+ [IPSET_EXT_ID_SKBINFO] = {
+ .type = IPSET_EXT_SKBINFO,
+ .flag = IPSET_FLAG_WITH_SKBINFO,
+ .len = sizeof(struct ip_set_skbinfo),
+ .align = __alignof__(struct ip_set_skbinfo),
+ },
[IPSET_EXT_ID_COMMENT] = {
.type = IPSET_EXT_COMMENT | IPSET_EXT_DESTROY,
.flag = IPSET_FLAG_WITH_COMMENT,
@@ -383,6 +388,7 @@ int
ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
struct ip_set_ext *ext)
{
+ u64 fullmark;
if (tb[IPSET_ATTR_TIMEOUT]) {
if (!(set->extensions & IPSET_EXT_TIMEOUT))
return -IPSET_ERR_TIMEOUT;
@@ -403,7 +409,25 @@ ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
return -IPSET_ERR_COMMENT;
ext->comment = ip_set_comment_uget(tb[IPSET_ATTR_COMMENT]);
}
-
+ if (tb[IPSET_ATTR_SKBMARK]) {
+ if (!(set->extensions & IPSET_EXT_SKBINFO))
+ return -IPSET_ERR_SKBINFO;
+ fullmark = be64_to_cpu(nla_get_be64(tb[IPSET_ATTR_SKBMARK]));
+ ext->skbmark = fullmark >> 32;
+ ext->skbmarkmask = fullmark & 0xffffffff;
+ }
+ if (tb[IPSET_ATTR_SKBPRIO]) {
+ if (!(set->extensions & IPSET_EXT_SKBINFO))
+ return -IPSET_ERR_SKBINFO;
+ ext->skbprio = be32_to_cpu(nla_get_be32(
+ tb[IPSET_ATTR_SKBPRIO]));
+ }
+ if (tb[IPSET_ATTR_SKBQUEUE]) {
+ if (!(set->extensions & IPSET_EXT_SKBINFO))
+ return -IPSET_ERR_SKBINFO;
+ ext->skbqueue = be16_to_cpu(nla_get_be16(
+ tb[IPSET_ATTR_SKBQUEUE]));
+ }
return 0;
}
EXPORT_SYMBOL_GPL(ip_set_get_extensions);
@@ -1398,7 +1422,8 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
struct nlmsghdr *rep, *nlh = nlmsg_hdr(skb);
struct sk_buff *skb2;
struct nlmsgerr *errmsg;
- size_t payload = sizeof(*errmsg) + nlmsg_len(nlh);
+ size_t payload = min(SIZE_MAX,
+ sizeof(*errmsg) + nlmsg_len(nlh));
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
struct nlattr *cmdattr;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 61c7fb052802..fee7c64e4dd1 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -565,8 +565,8 @@ retry:
set->name, orig->htable_bits, htable_bits, orig);
if (!htable_bits) {
/* In case we have plenty of memory :-) */
- pr_warning("Cannot increase the hashsize of set %s further\n",
- set->name);
+ pr_warn("Cannot increase the hashsize of set %s further\n",
+ set->name);
return -IPSET_ERR_HASH_FULL;
}
t = ip_set_alloc(sizeof(*t)
@@ -651,8 +651,8 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (h->elements >= h->maxelem) {
if (net_ratelimit())
- pr_warning("Set %s is full, maxelem %u reached\n",
- set->name, h->maxelem);
+ pr_warn("Set %s is full, maxelem %u reached\n",
+ set->name, h->maxelem);
return -IPSET_ERR_HASH_FULL;
}
@@ -720,6 +720,8 @@ reuse_slot:
ip_set_init_counter(ext_counter(data, set), ext);
if (SET_WITH_COMMENT(set))
ip_set_init_comment(ext_comment(data, set), ext);
+ if (SET_WITH_SKBINFO(set))
+ ip_set_init_skbinfo(ext_skbinfo(data, set), ext);
out:
rcu_read_unlock_bh();
@@ -797,6 +799,9 @@ mtype_data_match(struct mtype_elem *data, const struct ip_set_ext *ext,
if (SET_WITH_COUNTER(set))
ip_set_update_counter(ext_counter(data, set),
ext, mext, flags);
+ if (SET_WITH_SKBINFO(set))
+ ip_set_get_skbinfo(ext_skbinfo(data, set),
+ ext, mext, flags);
return mtype_do_data_match(data);
}
@@ -998,8 +1003,8 @@ mtype_list(const struct ip_set *set,
nla_put_failure:
nlmsg_trim(skb, incomplete);
if (unlikely(first == cb->args[IPSET_CB_ARG0])) {
- pr_warning("Can't list set %s: one bucket does not fit into "
- "a message. Please report it!\n", set->name);
+ pr_warn("Can't list set %s: one bucket does not fit into a message. Please report it!\n",
+ set->name);
cb->args[IPSET_CB_ARG0] = 0;
return -EMSGSIZE;
}
@@ -1049,8 +1054,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
struct HTYPE *h;
struct htable *t;
+#ifndef IP_SET_PROTO_UNDEF
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
+#endif
#ifdef IP_SET_HASH_WITH_MARKMASK
markmask = 0xffffffff;
@@ -1093,7 +1100,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
if (tb[IPSET_ATTR_MARKMASK]) {
markmask = ntohl(nla_get_u32(tb[IPSET_ATTR_MARKMASK]));
- if ((markmask > 4294967295u) || markmask == 0)
+ if (markmask == 0)
return -IPSET_ERR_INVALID_MARKMASK;
}
#endif
@@ -1132,25 +1139,32 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
rcu_assign_pointer(h->table, t);
set->data = h;
+#ifndef IP_SET_PROTO_UNDEF
if (set->family == NFPROTO_IPV4) {
+#endif
set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
set->dsize = ip_set_elem_len(set, tb,
sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)));
+#ifndef IP_SET_PROTO_UNDEF
} else {
set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
set->dsize = ip_set_elem_len(set, tb,
sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)));
}
+#endif
if (tb[IPSET_ATTR_TIMEOUT]) {
set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
+#ifndef IP_SET_PROTO_UNDEF
if (set->family == NFPROTO_IPV4)
+#endif
IPSET_TOKEN(HTYPE, 4_gc_init)(set,
IPSET_TOKEN(HTYPE, 4_gc));
+#ifndef IP_SET_PROTO_UNDEF
else
IPSET_TOKEN(HTYPE, 6_gc_init)(set,
IPSET_TOKEN(HTYPE, 6_gc));
+#endif
}
-
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
set->name, jhash_size(t->htable_bits),
t->htable_bits, h->maxelem, set->data, t);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index dd40607f878e..76959d79e9d1 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -26,7 +26,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Counters support */
/* 2 Comments support */
-#define IPSET_TYPE_REV_MAX 3 /* Forceadd support */
+/* 3 Forceadd support */
+#define IPSET_TYPE_REV_MAX 4 /* skbinfo support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -84,7 +85,7 @@ hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ip4_elem e = {};
+ struct hash_ip4_elem e = { 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
__be32 ip;
@@ -103,7 +104,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ip4_elem e = {};
+ struct hash_ip4_elem e = { 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, hosts;
int ret = 0;
@@ -111,7 +112,10 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -222,7 +226,7 @@ hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
{
const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ip6_elem e = {};
+ struct hash_ip6_elem e = { { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6);
@@ -239,7 +243,7 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_ip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ip6_elem e = {};
+ struct hash_ip6_elem e = { { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
int ret;
@@ -247,6 +251,9 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
@@ -295,6 +302,9 @@ static struct ip_set_type hash_ip_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c
index 4eff0a297254..7abf9788cfa8 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmark.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmark.c
@@ -25,7 +25,8 @@
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
-#define IPSET_TYPE_REV_MAX 1 /* Forceadd support */
+/* 1 Forceadd support */
+#define IPSET_TYPE_REV_MAX 2 /* skbinfo support */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vytas Dauksa <vytas.dauksa@smoothwall.net>");
@@ -113,7 +114,10 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_attr_netorder(tb, IPSET_ATTR_MARK) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -244,6 +248,9 @@ hash_ipmark6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
@@ -301,6 +308,9 @@ static struct ip_set_type hash_ipmark_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 7597b82a8b03..dcbcceb9a52f 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -28,7 +28,8 @@
/* 1 SCTP and UDPLITE support added */
/* 2 Counters support added */
/* 3 Comments support added */
-#define IPSET_TYPE_REV_MAX 4 /* Forceadd support added */
+/* 4 Forceadd support added */
+#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -94,7 +95,7 @@ hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipport4_elem e = { };
+ struct hash_ipport4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
@@ -111,7 +112,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipport4_elem e = { };
+ struct hash_ipport4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip, ip_to = 0, p = 0, port, port_to;
bool with_ports = false;
@@ -122,7 +123,10 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -258,7 +262,7 @@ hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipport6_elem e = { };
+ struct hash_ipport6_elem e = { .ip = { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
@@ -275,7 +279,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_ipport *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipport6_elem e = { };
+ struct hash_ipport6_elem e = { .ip = { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
@@ -287,6 +291,9 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
@@ -370,6 +377,9 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 672655ffd573..7ef93fc887a1 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -28,7 +28,8 @@
/* 1 SCTP and UDPLITE support added */
/* 2 Counters support added */
/* 3 Comments support added */
-#define IPSET_TYPE_REV_MAX 4 /* Forceadd support added */
+/* 4 Forceadd support added */
+#define IPSET_TYPE_REV_MAX 5 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -95,7 +96,7 @@ hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportip4_elem e = { };
+ struct hash_ipportip4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
@@ -113,7 +114,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportip4_elem e = { };
+ struct hash_ipportip4_elem e = { .ip = 0 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip, ip_to = 0, p = 0, port, port_to;
bool with_ports = false;
@@ -124,7 +125,10 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -265,7 +269,7 @@ hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
enum ipset_adt adt, struct ip_set_adt_opt *opt)
{
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportip6_elem e = { };
+ struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
@@ -283,7 +287,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct hash_ipportip *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportip6_elem e = { };
+ struct hash_ipportip6_elem e = { .ip = { .all = { 0 } } };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to;
bool with_ports = false;
@@ -295,6 +299,9 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
@@ -382,6 +389,9 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 7308d84f9277..b6012ad92781 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -30,7 +30,8 @@
/* 3 nomatch flag support added */
/* 4 Counters support added */
/* 5 Comments support added */
-#define IPSET_TYPE_REV_MAX 6 /* Forceadd support added */
+/* 6 Forceadd support added */
+#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -179,7 +180,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -432,6 +436,9 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
@@ -541,6 +548,9 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_mac.c b/net/netfilter/ipset/ip_set_hash_mac.c
new file mode 100644
index 000000000000..65690b52a4d5
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_mac.c
@@ -0,0 +1,173 @@
+/* Copyright (C) 2014 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the hash:mac type */
+
+#include <linux/jhash.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/ipset/ip_set.h>
+#include <linux/netfilter/ipset/ip_set_hash.h>
+
+#define IPSET_TYPE_REV_MIN 0
+#define IPSET_TYPE_REV_MAX 0
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+IP_SET_MODULE_DESC("hash:mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
+MODULE_ALIAS("ip_set_hash:mac");
+
+/* Type specific function prefix */
+#define HTYPE hash_mac
+
+/* Member elements */
+struct hash_mac4_elem {
+ /* Zero valued IP addresses cannot be stored */
+ union {
+ unsigned char ether[ETH_ALEN];
+ __be32 foo[2];
+ };
+};
+
+/* Common functions */
+
+static inline bool
+hash_mac4_data_equal(const struct hash_mac4_elem *e1,
+ const struct hash_mac4_elem *e2,
+ u32 *multi)
+{
+ return ether_addr_equal(e1->ether, e2->ether);
+}
+
+static inline bool
+hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e)
+{
+ return nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether);
+}
+
+static inline void
+hash_mac4_data_next(struct hash_mac4_elem *next,
+ const struct hash_mac4_elem *e)
+{
+}
+
+#define MTYPE hash_mac4
+#define PF 4
+#define HOST_MASK 32
+#define IP_SET_EMIT_CREATE
+#define IP_SET_PROTO_UNDEF
+#include "ip_set_hash_gen.h"
+
+/* Zero valued element is not supported */
+static const unsigned char invalid_ether[ETH_ALEN] = { 0 };
+
+static int
+hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+ enum ipset_adt adt, struct ip_set_adt_opt *opt)
+{
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
+ struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
+
+ /* MAC can be src only */
+ if (!(opt->flags & IPSET_DIM_ONE_SRC))
+ return 0;
+
+ if (skb_mac_header(skb) < skb->head ||
+ (skb_mac_header(skb) + ETH_HLEN) > skb->data)
+ return -EINVAL;
+
+ memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
+ if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0)
+ return -EINVAL;
+ return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
+}
+
+static int
+hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+{
+ ipset_adtfn adtfn = set->variant->adt[adt];
+ struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ int ret;
+
+ if (unlikely(!tb[IPSET_ATTR_ETHER] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
+ return -IPSET_ERR_PROTOCOL;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
+ ret = ip_set_get_extensions(set, tb, &ext);
+ if (ret)
+ return ret;
+ memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
+ if (memcmp(e.ether, invalid_ether, ETH_ALEN) == 0)
+ return -IPSET_ERR_HASH_ELEM;
+
+ return adtfn(set, &e, &ext, &ext, flags);
+}
+
+static struct ip_set_type hash_mac_type __read_mostly = {
+ .name = "hash:mac",
+ .protocol = IPSET_PROTOCOL,
+ .features = IPSET_TYPE_MAC,
+ .dimension = IPSET_DIM_ONE,
+ .family = NFPROTO_UNSPEC,
+ .revision_min = IPSET_TYPE_REV_MIN,
+ .revision_max = IPSET_TYPE_REV_MAX,
+ .create = hash_mac_create,
+ .create_policy = {
+ [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
+ [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
+ [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
+ [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
+ },
+ .adt_policy = {
+ [IPSET_ATTR_ETHER] = { .type = NLA_BINARY,
+ .len = ETH_ALEN },
+ [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
+ [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
+ [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
+ },
+ .me = THIS_MODULE,
+};
+
+static int __init
+hash_mac_init(void)
+{
+ return ip_set_type_register(&hash_mac_type);
+}
+
+static void __exit
+hash_mac_fini(void)
+{
+ ip_set_type_unregister(&hash_mac_type);
+}
+
+module_init(hash_mac_init);
+module_exit(hash_mac_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 4c7d495783a3..6b3ac10ac2f1 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -27,7 +27,8 @@
/* 2 nomatch flag support added */
/* 3 Counters support added */
/* 4 Comments support added */
-#define IPSET_TYPE_REV_MAX 5 /* Forceadd support added */
+/* 5 Forceadd support added */
+#define IPSET_TYPE_REV_MAX 6 /* skbinfo mapping support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -150,7 +151,10 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -318,7 +322,10 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -377,6 +384,9 @@ static struct ip_set_type hash_net_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index db2606805b35..35dd35873442 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -28,7 +28,8 @@
/* 2 /0 support added */
/* 3 Counters support added */
/* 4 Comments support added */
-#define IPSET_TYPE_REV_MAX 5 /* Forceadd support added */
+/* 5 Forceadd support added */
+#define IPSET_TYPE_REV_MAX 6 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -236,7 +237,7 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
#define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC)
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
if (!nf_bridge)
@@ -281,7 +282,10 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -470,7 +474,7 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
ip6_netmask(&e.ip, e.cidr);
if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
if (!nf_bridge)
@@ -514,7 +518,10 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -590,6 +597,9 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index 3e99987e4bf2..da00284b3571 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -24,7 +24,8 @@
#include <linux/netfilter/ipset/ip_set_hash.h>
#define IPSET_TYPE_REV_MIN 0
-#define IPSET_TYPE_REV_MAX 1 /* Forceadd support added */
+/* 1 Forceadd support added */
+#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@@ -171,7 +172,10 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -203,7 +207,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
flags |= (IPSET_FLAG_NOMATCH << 16);
}
- if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] &&
+ if (adt == IPSET_TEST || !(tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_IP2_TO])) {
e.ip[0] = htonl(ip & ip_set_hostmask(e.cidr[0]));
e.ip[1] = htonl(ip2_from & ip_set_hostmask(e.cidr[1]));
@@ -219,9 +223,10 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
if (ip_to < ip)
swap(ip, ip_to);
- if (ip + UINT_MAX == ip_to)
+ if (unlikely(ip + UINT_MAX == ip_to))
return -IPSET_ERR_HASH_RANGE;
- }
+ } else
+ ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
@@ -230,10 +235,10 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
if (ip2_to < ip2_from)
swap(ip2_from, ip2_to);
- if (ip2_from + UINT_MAX == ip2_to)
+ if (unlikely(ip2_from + UINT_MAX == ip2_to))
return -IPSET_ERR_HASH_RANGE;
-
- }
+ } else
+ ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
if (retried)
ip = ntohl(h->next.ip[0]);
@@ -393,7 +398,10 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -461,6 +469,9 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 1c645fbd09c7..c0ddb58d19dc 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -29,7 +29,8 @@
/* 3 nomatch flag support added */
/* 4 Counters support added */
/* 5 Comments support added */
-#define IPSET_TYPE_REV_MAX 6 /* Forceadd support added */
+/* 6 Forceadd support added */
+#define IPSET_TYPE_REV_MAX 7 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -172,7 +173,10 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -389,7 +393,10 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -489,6 +496,9 @@ static struct ip_set_type hash_netport_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index c0d2ba73f8b2..b8053d675fc3 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -26,7 +26,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 0 Comments support added */
-#define IPSET_TYPE_REV_MAX 1 /* Forceadd support added */
+/* 1 Forceadd support added */
+#define IPSET_TYPE_REV_MAX 2 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Oliver Smith <oliver@8.c.9.b.0.7.4.0.1.0.0.2.ip6.arpa>");
@@ -189,7 +190,10 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -257,7 +261,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(ip, ip_to);
if (unlikely(ip + UINT_MAX == ip_to))
return -IPSET_ERR_HASH_RANGE;
- }
+ } else
+ ip_set_mask_from_to(ip, ip_to, e.cidr[0]);
port_to = port = ntohs(e.port);
if (tb[IPSET_ATTR_PORT_TO]) {
@@ -275,7 +280,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
swap(ip2_from, ip2_to);
if (unlikely(ip2_from + UINT_MAX == ip2_to))
return -IPSET_ERR_HASH_RANGE;
- }
+ } else
+ ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
if (retried)
ip = ntohl(h->next.ip[0]);
@@ -458,7 +464,10 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_IP2_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -567,6 +576,9 @@ static struct ip_set_type hash_netportnet_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 3e2317f3cf68..f8f682806e36 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -17,7 +17,8 @@
#define IPSET_TYPE_REV_MIN 0
/* 1 Counters support added */
-#define IPSET_TYPE_REV_MAX 2 /* Comments support added */
+/* 2 Comments support added */
+#define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -73,6 +74,10 @@ list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
ip_set_update_counter(ext_counter(e, set),
ext, &opt->ext,
cmdflags);
+ if (SET_WITH_SKBINFO(set))
+ ip_set_get_skbinfo(ext_skbinfo(e, set),
+ ext, &opt->ext,
+ cmdflags);
return ret;
}
}
@@ -197,6 +202,8 @@ list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d,
ip_set_init_counter(ext_counter(e, set), ext);
if (SET_WITH_COMMENT(set))
ip_set_init_comment(ext_comment(e, set), ext);
+ if (SET_WITH_SKBINFO(set))
+ ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
return 0;
}
@@ -307,6 +314,8 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
ip_set_init_counter(ext_counter(e, set), ext);
if (SET_WITH_COMMENT(set))
ip_set_init_comment(ext_comment(e, set), ext);
+ if (SET_WITH_SKBINFO(set))
+ ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
/* Set is already added to the list */
ip_set_put_byindex(map->net, d->id);
return 0;
@@ -378,7 +387,10 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -597,7 +609,9 @@ init_list_set(struct net *net, struct ip_set *set, u32 size)
struct set_elem *e;
u32 i;
- map = kzalloc(sizeof(*map) + size * set->dsize, GFP_KERNEL);
+ map = kzalloc(sizeof(*map) +
+ min_t(u32, size, IP_SET_LIST_MAX_SIZE) * set->dsize,
+ GFP_KERNEL);
if (!map)
return false;
@@ -665,6 +679,9 @@ static struct ip_set_type list_set_type __read_mostly = {
[IPSET_ATTR_BYTES] = { .type = NLA_U64 },
[IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
[IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING },
+ [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
+ [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
+ [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 0c3b1670b0d1..3b6929dec748 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -152,6 +152,16 @@ config IP_VS_WLC
If you want to compile it in kernel, say Y. To compile it as a
module, choose M here. If unsure, say N.
+config IP_VS_FO
+ tristate "weighted failover scheduling"
+ ---help---
+ The weighted failover scheduling algorithm directs network
+ connections to the server with the highest weight that is
+ currently available.
+
+ If you want to compile it in kernel, say Y. To compile it as a
+ module, choose M here. If unsure, say N.
+
config IP_VS_LBLC
tristate "locality-based least-connection scheduling"
---help---
diff --git a/net/netfilter/ipvs/Makefile b/net/netfilter/ipvs/Makefile
index 34ee602ddb66..38b2723b2e3d 100644
--- a/net/netfilter/ipvs/Makefile
+++ b/net/netfilter/ipvs/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_IP_VS_RR) += ip_vs_rr.o
obj-$(CONFIG_IP_VS_WRR) += ip_vs_wrr.o
obj-$(CONFIG_IP_VS_LC) += ip_vs_lc.o
obj-$(CONFIG_IP_VS_WLC) += ip_vs_wlc.o
+obj-$(CONFIG_IP_VS_FO) += ip_vs_fo.o
obj-$(CONFIG_IP_VS_LBLC) += ip_vs_lblc.o
obj-$(CONFIG_IP_VS_LBLCR) += ip_vs_lblcr.o
obj-$(CONFIG_IP_VS_DH) += ip_vs_dh.o
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 610e19c0e13f..b0f7b626b56d 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/in.h>
+#include <linux/inet.h>
#include <linux/net.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -77,6 +78,13 @@ static unsigned int ip_vs_conn_rnd __read_mostly;
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
+/* We need an addrstrlen that works with or without v6 */
+#ifdef CONFIG_IP_VS_IPV6
+#define IP_VS_ADDRSTRLEN INET6_ADDRSTRLEN
+#else
+#define IP_VS_ADDRSTRLEN (8+1)
+#endif
+
struct ip_vs_aligned_lock
{
spinlock_t l;
@@ -488,7 +496,12 @@ static inline void ip_vs_bind_xmit(struct ip_vs_conn *cp)
break;
case IP_VS_CONN_F_TUNNEL:
- cp->packet_xmit = ip_vs_tunnel_xmit;
+#ifdef CONFIG_IP_VS_IPV6
+ if (cp->daf == AF_INET6)
+ cp->packet_xmit = ip_vs_tunnel_xmit_v6;
+ else
+#endif
+ cp->packet_xmit = ip_vs_tunnel_xmit;
break;
case IP_VS_CONN_F_DROUTE:
@@ -514,7 +527,10 @@ static inline void ip_vs_bind_xmit_v6(struct ip_vs_conn *cp)
break;
case IP_VS_CONN_F_TUNNEL:
- cp->packet_xmit = ip_vs_tunnel_xmit_v6;
+ if (cp->daf == AF_INET6)
+ cp->packet_xmit = ip_vs_tunnel_xmit_v6;
+ else
+ cp->packet_xmit = ip_vs_tunnel_xmit;
break;
case IP_VS_CONN_F_DROUTE:
@@ -580,7 +596,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
- IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
+ IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, atomic_read(&cp->refcnt),
atomic_read(&dest->refcnt));
@@ -616,7 +632,13 @@ void ip_vs_try_bind_dest(struct ip_vs_conn *cp)
struct ip_vs_dest *dest;
rcu_read_lock();
- dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
+
+ /* This function is only invoked by the synchronization code. We do
+ * not currently support heterogeneous pools with synchronization,
+ * so we can make the assumption that the svc_af is the same as the
+ * dest_af
+ */
+ dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, cp->af, &cp->daddr,
cp->dport, &cp->vaddr, cp->vport,
cp->protocol, cp->fwmark, cp->flags);
if (dest) {
@@ -671,7 +693,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
ip_vs_proto_name(cp->protocol),
IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
- IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport),
+ IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
ip_vs_fwd_tag(cp), cp->state,
cp->flags, atomic_read(&cp->refcnt),
atomic_read(&dest->refcnt));
@@ -740,7 +762,7 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
ntohs(ct->cport),
IP_VS_DBG_ADDR(ct->af, &ct->vaddr),
ntohs(ct->vport),
- IP_VS_DBG_ADDR(ct->af, &ct->daddr),
+ IP_VS_DBG_ADDR(ct->daf, &ct->daddr),
ntohs(ct->dport));
/*
@@ -848,7 +870,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
* Create a new connection entry and hash it into the ip_vs_conn_tab
*/
struct ip_vs_conn *
-ip_vs_conn_new(const struct ip_vs_conn_param *p,
+ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
struct ip_vs_dest *dest, __u32 fwmark)
{
@@ -867,6 +889,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
ip_vs_conn_net_set(cp, p->net);
cp->af = p->af;
+ cp->daf = dest_af;
cp->protocol = p->protocol;
ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
cp->cport = p->cport;
@@ -874,7 +897,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
&cp->vaddr, p->vaddr);
cp->vport = p->vport;
- ip_vs_addr_set(p->af, &cp->daddr, daddr);
+ ip_vs_addr_set(cp->daf, &cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
cp->fwmark = fwmark;
@@ -1036,6 +1059,7 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
struct net *net = seq_file_net(seq);
char pe_data[IP_VS_PENAME_MAXLEN + IP_VS_PEDATA_MAXLEN + 3];
size_t len = 0;
+ char dbuf[IP_VS_ADDRSTRLEN];
if (!ip_vs_conn_net_eq(cp, net))
return 0;
@@ -1050,24 +1074,32 @@ static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
pe_data[len] = '\0';
#ifdef CONFIG_IP_VS_IPV6
+ if (cp->daf == AF_INET6)
+ snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
+ else
+#endif
+ snprintf(dbuf, sizeof(dbuf), "%08X",
+ ntohl(cp->daddr.ip));
+
+#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
- "%pI6 %04X %-11s %7lu%s\n",
+ "%s %04X %-11s %7lu%s\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
- &cp->daddr.in6, ntohs(cp->dport),
+ dbuf, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
(cp->timer.expires-jiffies)/HZ, pe_data);
else
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X"
- " %08X %04X %-11s %7lu%s\n",
+ " %s %04X %-11s %7lu%s\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
- ntohl(cp->daddr.ip), ntohs(cp->dport),
+ dbuf, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
(cp->timer.expires-jiffies)/HZ, pe_data);
}
@@ -1105,6 +1137,7 @@ static const char *ip_vs_origin_name(unsigned int flags)
static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
{
+ char dbuf[IP_VS_ADDRSTRLEN];
if (v == SEQ_START_TOKEN)
seq_puts(seq,
@@ -1117,12 +1150,21 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
return 0;
#ifdef CONFIG_IP_VS_IPV6
+ if (cp->daf == AF_INET6)
+ snprintf(dbuf, sizeof(dbuf), "%pI6", &cp->daddr.in6);
+ else
+#endif
+ snprintf(dbuf, sizeof(dbuf), "%08X",
+ ntohl(cp->daddr.ip));
+
+#ifdef CONFIG_IP_VS_IPV6
if (cp->af == AF_INET6)
- seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X %pI6 %04X %-11s %-6s %7lu\n",
+ seq_printf(seq, "%-3s %pI6 %04X %pI6 %04X "
+ "%s %04X %-11s %-6s %7lu\n",
ip_vs_proto_name(cp->protocol),
&cp->caddr.in6, ntohs(cp->cport),
&cp->vaddr.in6, ntohs(cp->vport),
- &cp->daddr.in6, ntohs(cp->dport),
+ dbuf, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
@@ -1130,11 +1172,11 @@ static int ip_vs_conn_sync_seq_show(struct seq_file *seq, void *v)
#endif
seq_printf(seq,
"%-3s %08X %04X %08X %04X "
- "%08X %04X %-11s %-6s %7lu\n",
+ "%s %04X %-11s %-6s %7lu\n",
ip_vs_proto_name(cp->protocol),
ntohl(cp->caddr.ip), ntohs(cp->cport),
ntohl(cp->vaddr.ip), ntohs(cp->vport),
- ntohl(cp->daddr.ip), ntohs(cp->dport),
+ dbuf, ntohs(cp->dport),
ip_vs_state_name(cp->protocol, cp->state),
ip_vs_origin_name(cp->flags),
(cp->timer.expires-jiffies)/HZ);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5c34e8d42e01..990decba1fe4 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -328,7 +328,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
* This adds param.pe_data to the template,
* and thus param.pe_data will be destroyed
* when the template expires */
- ct = ip_vs_conn_new(&param, &dest->addr, dport,
+ ct = ip_vs_conn_new(&param, dest->af, &dest->addr, dport,
IP_VS_CONN_F_TEMPLATE, dest, skb->mark);
if (ct == NULL) {
kfree(param.pe_data);
@@ -357,7 +357,8 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr,
src_port, &iph->daddr, dst_port, &param);
- cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark);
+ cp = ip_vs_conn_new(&param, dest->af, &dest->addr, dport, flags, dest,
+ skb->mark);
if (cp == NULL) {
ip_vs_conn_put(ct);
*ignored = -1;
@@ -479,7 +480,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
&iph->saddr, pptr[0], &iph->daddr,
pptr[1], &p);
- cp = ip_vs_conn_new(&p, &dest->addr,
+ cp = ip_vs_conn_new(&p, dest->af, &dest->addr,
dest->port ? dest->port : pptr[1],
flags, dest, skb->mark);
if (!cp) {
@@ -491,9 +492,9 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
IP_VS_DBG_BUF(6, "Schedule fwd:%c c:%s:%u v:%s:%u "
"d:%s:%u conn->flags:%X conn->refcnt:%d\n",
ip_vs_fwd_tag(cp),
- IP_VS_DBG_ADDR(svc->af, &cp->caddr), ntohs(cp->cport),
- IP_VS_DBG_ADDR(svc->af, &cp->vaddr), ntohs(cp->vport),
- IP_VS_DBG_ADDR(svc->af, &cp->daddr), ntohs(cp->dport),
+ IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport),
+ IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport),
+ IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport),
cp->flags, atomic_read(&cp->refcnt));
ip_vs_conn_stats(cp, svc);
@@ -550,7 +551,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol,
&iph->saddr, pptr[0],
&iph->daddr, pptr[1], &p);
- cp = ip_vs_conn_new(&p, &daddr, 0,
+ cp = ip_vs_conn_new(&p, svc->af, &daddr, 0,
IP_VS_CONN_F_BYPASS | flags,
NULL, skb->mark);
if (!cp)
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index fd3f444a4f96..ac7ba689efe7 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -574,8 +574,8 @@ bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
* Called under RCU lock.
*/
static struct ip_vs_dest *
-ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
- __be16 dport)
+ip_vs_lookup_dest(struct ip_vs_service *svc, int dest_af,
+ const union nf_inet_addr *daddr, __be16 dport)
{
struct ip_vs_dest *dest;
@@ -583,9 +583,9 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
* Find the destination for the given service
*/
list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
- if ((dest->af == svc->af)
- && ip_vs_addr_equal(svc->af, &dest->addr, daddr)
- && (dest->port == dport)) {
+ if ((dest->af == dest_af) &&
+ ip_vs_addr_equal(dest_af, &dest->addr, daddr) &&
+ (dest->port == dport)) {
/* HIT */
return dest;
}
@@ -602,7 +602,7 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
* on the backup.
* Called under RCU lock, no refcnt is returned.
*/
-struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
+struct ip_vs_dest *ip_vs_find_dest(struct net *net, int svc_af, int dest_af,
const union nf_inet_addr *daddr,
__be16 dport,
const union nf_inet_addr *vaddr,
@@ -613,14 +613,14 @@ struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
struct ip_vs_service *svc;
__be16 port = dport;
- svc = ip_vs_service_find(net, af, fwmark, protocol, vaddr, vport);
+ svc = ip_vs_service_find(net, svc_af, fwmark, protocol, vaddr, vport);
if (!svc)
return NULL;
if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
port = 0;
- dest = ip_vs_lookup_dest(svc, daddr, port);
+ dest = ip_vs_lookup_dest(svc, dest_af, daddr, port);
if (!dest)
- dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
+ dest = ip_vs_lookup_dest(svc, dest_af, daddr, port ^ dport);
return dest;
}
@@ -657,8 +657,8 @@ static void __ip_vs_dst_cache_reset(struct ip_vs_dest *dest)
* scheduling.
*/
static struct ip_vs_dest *
-ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
- __be16 dport)
+ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af,
+ const union nf_inet_addr *daddr, __be16 dport)
{
struct ip_vs_dest *dest;
struct netns_ipvs *ipvs = net_ipvs(svc->net);
@@ -671,11 +671,11 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
"dest->refcnt=%d\n",
dest->vfwmark,
- IP_VS_DBG_ADDR(svc->af, &dest->addr),
+ IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port),
atomic_read(&dest->refcnt));
- if (dest->af == svc->af &&
- ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
+ if (dest->af == dest_af &&
+ ip_vs_addr_equal(dest_af, &dest->addr, daddr) &&
dest->port == dport &&
dest->vfwmark == svc->fwmark &&
dest->protocol == svc->protocol &&
@@ -779,6 +779,12 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
struct ip_vs_scheduler *sched;
int conn_flags;
+ /* We cannot modify an address and change the address family */
+ BUG_ON(!add && udest->af != dest->af);
+
+ if (add && udest->af != svc->af)
+ ipvs->mixed_address_family_dests++;
+
/* set the weight and the flags */
atomic_set(&dest->weight, udest->weight);
conn_flags = udest->conn_flags & IP_VS_CONN_F_DEST_MASK;
@@ -816,6 +822,8 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
dest->u_threshold = udest->u_threshold;
dest->l_threshold = udest->l_threshold;
+ dest->af = udest->af;
+
spin_lock_bh(&dest->dst_lock);
__ip_vs_dst_cache_reset(dest);
spin_unlock_bh(&dest->dst_lock);
@@ -847,7 +855,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
EnterFunction(2);
#ifdef CONFIG_IP_VS_IPV6
- if (svc->af == AF_INET6) {
+ if (udest->af == AF_INET6) {
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&
@@ -875,12 +883,12 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
u64_stats_init(&ip_vs_dest_stats->syncp);
}
- dest->af = svc->af;
+ dest->af = udest->af;
dest->protocol = svc->protocol;
dest->vaddr = svc->addr;
dest->vport = svc->port;
dest->vfwmark = svc->fwmark;
- ip_vs_addr_copy(svc->af, &dest->addr, &udest->addr);
+ ip_vs_addr_copy(udest->af, &dest->addr, &udest->addr);
dest->port = udest->port;
atomic_set(&dest->activeconns, 0);
@@ -928,11 +936,11 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
return -ERANGE;
}
- ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
+ ip_vs_addr_copy(udest->af, &daddr, &udest->addr);
/* We use function that requires RCU lock */
rcu_read_lock();
- dest = ip_vs_lookup_dest(svc, &daddr, dport);
+ dest = ip_vs_lookup_dest(svc, udest->af, &daddr, dport);
rcu_read_unlock();
if (dest != NULL) {
@@ -944,12 +952,12 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
* Check if the dest already exists in the trash and
* is from the same service
*/
- dest = ip_vs_trash_get_dest(svc, &daddr, dport);
+ dest = ip_vs_trash_get_dest(svc, udest->af, &daddr, dport);
if (dest != NULL) {
IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, "
"dest->refcnt=%d, service %u/%s:%u\n",
- IP_VS_DBG_ADDR(svc->af, &daddr), ntohs(dport),
+ IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport),
atomic_read(&dest->refcnt),
dest->vfwmark,
IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
@@ -992,11 +1000,11 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
return -ERANGE;
}
- ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
+ ip_vs_addr_copy(udest->af, &daddr, &udest->addr);
/* We use function that requires RCU lock */
rcu_read_lock();
- dest = ip_vs_lookup_dest(svc, &daddr, dport);
+ dest = ip_vs_lookup_dest(svc, udest->af, &daddr, dport);
rcu_read_unlock();
if (dest == NULL) {
@@ -1055,6 +1063,9 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
list_del_rcu(&dest->n_list);
svc->num_dests--;
+ if (dest->af != svc->af)
+ net_ipvs(svc->net)->mixed_address_family_dests--;
+
if (svcupd) {
struct ip_vs_scheduler *sched;
@@ -1078,7 +1089,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
/* We use function that requires RCU lock */
rcu_read_lock();
- dest = ip_vs_lookup_dest(svc, &udest->addr, dport);
+ dest = ip_vs_lookup_dest(svc, udest->af, &udest->addr, dport);
rcu_read_unlock();
if (dest == NULL) {
@@ -2179,29 +2190,41 @@ static int ip_vs_set_timeout(struct net *net, struct ip_vs_timeout_user *u)
return 0;
}
+#define CMDID(cmd) (cmd - IP_VS_BASE_CTL)
+
+struct ip_vs_svcdest_user {
+ struct ip_vs_service_user s;
+ struct ip_vs_dest_user d;
+};
+
+static const unsigned char set_arglen[CMDID(IP_VS_SO_SET_MAX) + 1] = {
+ [CMDID(IP_VS_SO_SET_ADD)] = sizeof(struct ip_vs_service_user),
+ [CMDID(IP_VS_SO_SET_EDIT)] = sizeof(struct ip_vs_service_user),
+ [CMDID(IP_VS_SO_SET_DEL)] = sizeof(struct ip_vs_service_user),
+ [CMDID(IP_VS_SO_SET_ADDDEST)] = sizeof(struct ip_vs_svcdest_user),
+ [CMDID(IP_VS_SO_SET_DELDEST)] = sizeof(struct ip_vs_svcdest_user),
+ [CMDID(IP_VS_SO_SET_EDITDEST)] = sizeof(struct ip_vs_svcdest_user),
+ [CMDID(IP_VS_SO_SET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user),
+ [CMDID(IP_VS_SO_SET_STARTDAEMON)] = sizeof(struct ip_vs_daemon_user),
+ [CMDID(IP_VS_SO_SET_STOPDAEMON)] = sizeof(struct ip_vs_daemon_user),
+ [CMDID(IP_VS_SO_SET_ZERO)] = sizeof(struct ip_vs_service_user),
+};
-#define SET_CMDID(cmd) (cmd - IP_VS_BASE_CTL)
-#define SERVICE_ARG_LEN (sizeof(struct ip_vs_service_user))
-#define SVCDEST_ARG_LEN (sizeof(struct ip_vs_service_user) + \
- sizeof(struct ip_vs_dest_user))
-#define TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
-#define DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user))
-#define MAX_ARG_LEN SVCDEST_ARG_LEN
-
-static const unsigned char set_arglen[SET_CMDID(IP_VS_SO_SET_MAX)+1] = {
- [SET_CMDID(IP_VS_SO_SET_ADD)] = SERVICE_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_EDIT)] = SERVICE_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_DEL)] = SERVICE_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_FLUSH)] = 0,
- [SET_CMDID(IP_VS_SO_SET_ADDDEST)] = SVCDEST_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_DELDEST)] = SVCDEST_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_EDITDEST)] = SVCDEST_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_TIMEOUT)] = TIMEOUT_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_STARTDAEMON)] = DAEMON_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_STOPDAEMON)] = DAEMON_ARG_LEN,
- [SET_CMDID(IP_VS_SO_SET_ZERO)] = SERVICE_ARG_LEN,
+union ip_vs_set_arglen {
+ struct ip_vs_service_user field_IP_VS_SO_SET_ADD;
+ struct ip_vs_service_user field_IP_VS_SO_SET_EDIT;
+ struct ip_vs_service_user field_IP_VS_SO_SET_DEL;
+ struct ip_vs_svcdest_user field_IP_VS_SO_SET_ADDDEST;
+ struct ip_vs_svcdest_user field_IP_VS_SO_SET_DELDEST;
+ struct ip_vs_svcdest_user field_IP_VS_SO_SET_EDITDEST;
+ struct ip_vs_timeout_user field_IP_VS_SO_SET_TIMEOUT;
+ struct ip_vs_daemon_user field_IP_VS_SO_SET_STARTDAEMON;
+ struct ip_vs_daemon_user field_IP_VS_SO_SET_STOPDAEMON;
+ struct ip_vs_service_user field_IP_VS_SO_SET_ZERO;
};
+#define MAX_SET_ARGLEN sizeof(union ip_vs_set_arglen)
+
static void ip_vs_copy_usvc_compat(struct ip_vs_service_user_kern *usvc,
struct ip_vs_service_user *usvc_compat)
{
@@ -2232,6 +2255,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
udest->weight = udest_compat->weight;
udest->u_threshold = udest_compat->u_threshold;
udest->l_threshold = udest_compat->l_threshold;
+ udest->af = AF_INET;
}
static int
@@ -2239,7 +2263,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
struct net *net = sock_net(sk);
int ret;
- unsigned char arg[MAX_ARG_LEN];
+ unsigned char arg[MAX_SET_ARGLEN];
struct ip_vs_service_user *usvc_compat;
struct ip_vs_service_user_kern usvc;
struct ip_vs_service *svc;
@@ -2247,16 +2271,15 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
struct ip_vs_dest_user_kern udest;
struct netns_ipvs *ipvs = net_ipvs(net);
+ BUILD_BUG_ON(sizeof(arg) > 255);
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
return -EINVAL;
- if (len < 0 || len > MAX_ARG_LEN)
- return -EINVAL;
- if (len != set_arglen[SET_CMDID(cmd)]) {
- pr_err("set_ctl: len %u != %u\n",
- len, set_arglen[SET_CMDID(cmd)]);
+ if (len != set_arglen[CMDID(cmd)]) {
+ IP_VS_DBG(1, "set_ctl: len %u != %u\n",
+ len, set_arglen[CMDID(cmd)]);
return -EINVAL;
}
@@ -2469,6 +2492,12 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
if (count >= get->num_dests)
break;
+ /* Cannot expose heterogeneous members via sockopt
+ * interface
+ */
+ if (dest->af != svc->af)
+ continue;
+
entry.addr = dest->addr.ip;
entry.port = dest->port;
entry.conn_flags = atomic_read(&dest->conn_flags);
@@ -2512,51 +2541,51 @@ __ip_vs_get_timeouts(struct net *net, struct ip_vs_timeout_user *u)
#endif
}
+static const unsigned char get_arglen[CMDID(IP_VS_SO_GET_MAX) + 1] = {
+ [CMDID(IP_VS_SO_GET_VERSION)] = 64,
+ [CMDID(IP_VS_SO_GET_INFO)] = sizeof(struct ip_vs_getinfo),
+ [CMDID(IP_VS_SO_GET_SERVICES)] = sizeof(struct ip_vs_get_services),
+ [CMDID(IP_VS_SO_GET_SERVICE)] = sizeof(struct ip_vs_service_entry),
+ [CMDID(IP_VS_SO_GET_DESTS)] = sizeof(struct ip_vs_get_dests),
+ [CMDID(IP_VS_SO_GET_TIMEOUT)] = sizeof(struct ip_vs_timeout_user),
+ [CMDID(IP_VS_SO_GET_DAEMON)] = 2 * sizeof(struct ip_vs_daemon_user),
+};
-#define GET_CMDID(cmd) (cmd - IP_VS_BASE_CTL)
-#define GET_INFO_ARG_LEN (sizeof(struct ip_vs_getinfo))
-#define GET_SERVICES_ARG_LEN (sizeof(struct ip_vs_get_services))
-#define GET_SERVICE_ARG_LEN (sizeof(struct ip_vs_service_entry))
-#define GET_DESTS_ARG_LEN (sizeof(struct ip_vs_get_dests))
-#define GET_TIMEOUT_ARG_LEN (sizeof(struct ip_vs_timeout_user))
-#define GET_DAEMON_ARG_LEN (sizeof(struct ip_vs_daemon_user) * 2)
-
-static const unsigned char get_arglen[GET_CMDID(IP_VS_SO_GET_MAX)+1] = {
- [GET_CMDID(IP_VS_SO_GET_VERSION)] = 64,
- [GET_CMDID(IP_VS_SO_GET_INFO)] = GET_INFO_ARG_LEN,
- [GET_CMDID(IP_VS_SO_GET_SERVICES)] = GET_SERVICES_ARG_LEN,
- [GET_CMDID(IP_VS_SO_GET_SERVICE)] = GET_SERVICE_ARG_LEN,
- [GET_CMDID(IP_VS_SO_GET_DESTS)] = GET_DESTS_ARG_LEN,
- [GET_CMDID(IP_VS_SO_GET_TIMEOUT)] = GET_TIMEOUT_ARG_LEN,
- [GET_CMDID(IP_VS_SO_GET_DAEMON)] = GET_DAEMON_ARG_LEN,
+union ip_vs_get_arglen {
+ char field_IP_VS_SO_GET_VERSION[64];
+ struct ip_vs_getinfo field_IP_VS_SO_GET_INFO;
+ struct ip_vs_get_services field_IP_VS_SO_GET_SERVICES;
+ struct ip_vs_service_entry field_IP_VS_SO_GET_SERVICE;
+ struct ip_vs_get_dests field_IP_VS_SO_GET_DESTS;
+ struct ip_vs_timeout_user field_IP_VS_SO_GET_TIMEOUT;
+ struct ip_vs_daemon_user field_IP_VS_SO_GET_DAEMON[2];
};
+#define MAX_GET_ARGLEN sizeof(union ip_vs_get_arglen)
+
static int
do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
- unsigned char arg[128];
+ unsigned char arg[MAX_GET_ARGLEN];
int ret = 0;
unsigned int copylen;
struct net *net = sock_net(sk);
struct netns_ipvs *ipvs = net_ipvs(net);
BUG_ON(!net);
+ BUILD_BUG_ON(sizeof(arg) > 255);
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
return -EINVAL;
- if (*len < get_arglen[GET_CMDID(cmd)]) {
- pr_err("get_ctl: len %u < %u\n",
- *len, get_arglen[GET_CMDID(cmd)]);
+ copylen = get_arglen[CMDID(cmd)];
+ if (*len < (int) copylen) {
+ IP_VS_DBG(1, "get_ctl: len %d < %u\n", *len, copylen);
return -EINVAL;
}
- copylen = get_arglen[GET_CMDID(cmd)];
- if (copylen > 128)
- return -EINVAL;
-
if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
/*
@@ -2766,6 +2795,7 @@ static const struct nla_policy ip_vs_dest_policy[IPVS_DEST_ATTR_MAX + 1] = {
[IPVS_DEST_ATTR_INACT_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_PERSIST_CONNS] = { .type = NLA_U32 },
[IPVS_DEST_ATTR_STATS] = { .type = NLA_NESTED },
+ [IPVS_DEST_ATTR_ADDR_FAMILY] = { .type = NLA_U16 },
};
static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
@@ -3021,7 +3051,8 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
atomic_read(&dest->inactconns)) ||
nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
- atomic_read(&dest->persistconns)))
+ atomic_read(&dest->persistconns)) ||
+ nla_put_u16(skb, IPVS_DEST_ATTR_ADDR_FAMILY, dest->af))
goto nla_put_failure;
if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
goto nla_put_failure;
@@ -3102,6 +3133,7 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
{
struct nlattr *attrs[IPVS_DEST_ATTR_MAX + 1];
struct nlattr *nla_addr, *nla_port;
+ struct nlattr *nla_addr_family;
/* Parse mandatory identifying destination fields first */
if (nla == NULL ||
@@ -3110,6 +3142,7 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
nla_addr = attrs[IPVS_DEST_ATTR_ADDR];
nla_port = attrs[IPVS_DEST_ATTR_PORT];
+ nla_addr_family = attrs[IPVS_DEST_ATTR_ADDR_FAMILY];
if (!(nla_addr && nla_port))
return -EINVAL;
@@ -3119,6 +3152,11 @@ static int ip_vs_genl_parse_dest(struct ip_vs_dest_user_kern *udest,
nla_memcpy(&udest->addr, nla_addr, sizeof(udest->addr));
udest->port = nla_get_be16(nla_port);
+ if (nla_addr_family)
+ udest->af = nla_get_u16(nla_addr_family);
+ else
+ udest->af = 0;
+
/* If a full entry was requested, check for the additional fields */
if (full_entry) {
struct nlattr *nla_fwd, *nla_weight, *nla_u_thresh,
@@ -3223,6 +3261,12 @@ static int ip_vs_genl_new_daemon(struct net *net, struct nlattr **attrs)
attrs[IPVS_DAEMON_ATTR_SYNC_ID]))
return -EINVAL;
+ /* The synchronization protocol is incompatible with mixed family
+ * services
+ */
+ if (net_ipvs(net)->mixed_address_family_dests > 0)
+ return -EINVAL;
+
return start_sync_thread(net,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]),
nla_data(attrs[IPVS_DAEMON_ATTR_MCAST_IFN]),
@@ -3346,6 +3390,35 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
need_full_dest);
if (ret)
goto out;
+
+ /* Old protocols did not allow the user to specify address
+ * family, so we set it to zero instead. We also didn't
+ * allow heterogeneous pools in the old code, so it's safe
+ * to assume that this will have the same address family as
+ * the service.
+ */
+ if (udest.af == 0)
+ udest.af = svc->af;
+
+ if (udest.af != svc->af) {
+ /* The synchronization protocol is incompatible
+ * with mixed family services
+ */
+ if (net_ipvs(net)->sync_state) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Which connection types do we support? */
+ switch (udest.conn_flags) {
+ case IP_VS_CONN_F_TUNNEL:
+ /* We are able to forward this */
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ }
}
switch (cmd) {
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index c3b84546ea9e..6be5c538b71e 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -234,7 +234,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
IP_VS_DBG_BUF(6, "DH: destination IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, &iph->daddr),
- IP_VS_DBG_ADDR(svc->af, &dest->addr),
+ IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
return dest;
diff --git a/net/netfilter/ipvs/ip_vs_fo.c b/net/netfilter/ipvs/ip_vs_fo.c
new file mode 100644
index 000000000000..e09874d02938
--- /dev/null
+++ b/net/netfilter/ipvs/ip_vs_fo.c
@@ -0,0 +1,79 @@
+/*
+ * IPVS: Weighted Fail Over module
+ *
+ * Authors: Kenny Mathis <kmathis@chokepoint.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Changes:
+ * Kenny Mathis : added initial functionality based on weight
+ *
+ */
+
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <net/ip_vs.h>
+
+/* Weighted Fail Over Module */
+static struct ip_vs_dest *
+ip_vs_fo_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
+ struct ip_vs_iphdr *iph)
+{
+ struct ip_vs_dest *dest, *hweight = NULL;
+ int hw = 0; /* Track highest weight */
+
+ IP_VS_DBG(6, "ip_vs_fo_schedule(): Scheduling...\n");
+
+ /* Basic failover functionality
+ * Find virtual server with highest weight and send it traffic
+ */
+ list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
+ if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
+ atomic_read(&dest->weight) > hw) {
+ hweight = dest;
+ hw = atomic_read(&dest->weight);
+ }
+ }
+
+ if (hweight) {
+ IP_VS_DBG_BUF(6, "FO: server %s:%u activeconns %d weight %d\n",
+ IP_VS_DBG_ADDR(hweight->af, &hweight->addr),
+ ntohs(hweight->port),
+ atomic_read(&hweight->activeconns),
+ atomic_read(&hweight->weight));
+ return hweight;
+ }
+
+ ip_vs_scheduler_err(svc, "no destination available");
+ return NULL;
+}
+
+static struct ip_vs_scheduler ip_vs_fo_scheduler = {
+ .name = "fo",
+ .refcnt = ATOMIC_INIT(0),
+ .module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_fo_scheduler.n_list),
+ .schedule = ip_vs_fo_schedule,
+};
+
+static int __init ip_vs_fo_init(void)
+{
+ return register_ip_vs_scheduler(&ip_vs_fo_scheduler);
+}
+
+static void __exit ip_vs_fo_cleanup(void)
+{
+ unregister_ip_vs_scheduler(&ip_vs_fo_scheduler);
+ synchronize_rcu();
+}
+
+module_init(ip_vs_fo_init);
+module_exit(ip_vs_fo_cleanup);
+MODULE_LICENSE("GPL");
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 77c173282f38..a64fa15790e5 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -233,7 +233,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
ip_vs_conn_fill_param(ip_vs_conn_net(cp),
AF_INET, IPPROTO_TCP, &cp->caddr,
0, &cp->vaddr, port, &p);
- n_cp = ip_vs_conn_new(&p, &from, port,
+ /* As above, this is ipv4 only */
+ n_cp = ip_vs_conn_new(&p, AF_INET, &from, port,
IP_VS_CONN_F_NO_CPORT |
IP_VS_CONN_F_NFCT,
cp->dest, skb->mark);
@@ -396,7 +397,8 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
htons(ntohs(cp->vport)-1), &p);
n_cp = ip_vs_conn_in_get(&p);
if (!n_cp) {
- n_cp = ip_vs_conn_new(&p, &cp->daddr,
+ /* This is ipv4 only */
+ n_cp = ip_vs_conn_new(&p, AF_INET, &cp->daddr,
htons(ntohs(cp->dport)-1),
IP_VS_CONN_F_NFCT, cp->dest,
skb->mark);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 547ff33c1efd..127f14046c51 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -199,11 +199,11 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
*/
static inline struct ip_vs_lblc_entry *
ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
- struct ip_vs_dest *dest)
+ u16 af, struct ip_vs_dest *dest)
{
struct ip_vs_lblc_entry *en;
- en = ip_vs_lblc_get(dest->af, tbl, daddr);
+ en = ip_vs_lblc_get(af, tbl, daddr);
if (en) {
if (en->dest == dest)
return en;
@@ -213,8 +213,8 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
if (!en)
return NULL;
- en->af = dest->af;
- ip_vs_addr_copy(dest->af, &en->addr, daddr);
+ en->af = af;
+ ip_vs_addr_copy(af, &en->addr, daddr);
en->lastuse = jiffies;
ip_vs_dest_hold(dest);
@@ -521,13 +521,13 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
/* If we fail to create a cache entry, we'll just use the valid dest */
spin_lock_bh(&svc->sched_lock);
if (!tbl->dead)
- ip_vs_lblc_new(tbl, &iph->daddr, dest);
+ ip_vs_lblc_new(tbl, &iph->daddr, svc->af, dest);
spin_unlock_bh(&svc->sched_lock);
out:
IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, &iph->daddr),
- IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
+ IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
return dest;
}
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 3f21a2f47de1..2229d2d8bbe0 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -362,18 +362,18 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
*/
static inline struct ip_vs_lblcr_entry *
ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
- struct ip_vs_dest *dest)
+ u16 af, struct ip_vs_dest *dest)
{
struct ip_vs_lblcr_entry *en;
- en = ip_vs_lblcr_get(dest->af, tbl, daddr);
+ en = ip_vs_lblcr_get(af, tbl, daddr);
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en)
return NULL;
- en->af = dest->af;
- ip_vs_addr_copy(dest->af, &en->addr, daddr);
+ en->af = af;
+ ip_vs_addr_copy(af, &en->addr, daddr);
en->lastuse = jiffies;
/* initialize its dest set */
@@ -706,13 +706,13 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
/* If we fail to create a cache entry, we'll just use the valid dest */
spin_lock_bh(&svc->sched_lock);
if (!tbl->dead)
- ip_vs_lblcr_new(tbl, &iph->daddr, dest);
+ ip_vs_lblcr_new(tbl, &iph->daddr, svc->af, dest);
spin_unlock_bh(&svc->sched_lock);
out:
IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, &iph->daddr),
- IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
+ IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
return dest;
}
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index 2bdcb1cf2127..19a0769a989a 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -59,7 +59,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
else
IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
"inactconns %d\n",
- IP_VS_DBG_ADDR(svc->af, &least->addr),
+ IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->inactconns));
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index 961a6de9bb29..a8b63401e773 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -107,7 +107,8 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
out:
IP_VS_DBG_BUF(6, "NQ: server %s:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
- IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
+ IP_VS_DBG_ADDR(least->af, &least->addr),
+ ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->refcnt),
atomic_read(&least->weight), loh);
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 2f7ea7564044..5b84c0b56642 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -432,7 +432,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
pd->pp->name,
((direction == IP_VS_DIR_OUTPUT) ?
"output " : "input "),
- IP_VS_DBG_ADDR(cp->af, &cp->daddr),
+ IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
ntohs(cp->dport),
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index e3a697234a98..8e92beb0cca9 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -510,7 +510,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,
th->fin ? 'F' : '.',
th->ack ? 'A' : '.',
th->rst ? 'R' : '.',
- IP_VS_DBG_ADDR(cp->af, &cp->daddr),
+ IP_VS_DBG_ADDR(cp->daf, &cp->daddr),
ntohs(cp->dport),
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
ntohs(cp->cport),
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index 176b87c35e34..58bacfc461ee 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -95,7 +95,7 @@ stop:
spin_unlock_bh(&svc->sched_lock);
IP_VS_DBG_BUF(6, "RR: server %s:%u "
"activeconns %d refcnt %d weight %d\n",
- IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
+ IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
atomic_read(&dest->activeconns),
atomic_read(&dest->refcnt), atomic_read(&dest->weight));
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index e446b9fa7424..f8e2d00f528b 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -108,7 +108,8 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
IP_VS_DBG_BUF(6, "SED: server %s:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
- IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
+ IP_VS_DBG_ADDR(least->af, &least->addr),
+ ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->refcnt),
atomic_read(&least->weight), loh);
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index cc65b2f42cd4..98a13433b68c 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -138,7 +138,7 @@ ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
return dest;
IP_VS_DBG_BUF(6, "SH: selected unavailable server %s:%d, reselecting",
- IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
+ IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port));
/* if the original dest is unavailable, loop around the table
* starting from ihash to find a new dest
@@ -153,7 +153,7 @@ ip_vs_sh_get_fallback(struct ip_vs_service *svc, struct ip_vs_sh_state *s,
return dest;
IP_VS_DBG_BUF(6, "SH: selected unavailable "
"server %s:%d (offset %d), reselecting",
- IP_VS_DBG_ADDR(svc->af, &dest->addr),
+ IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port), roffset);
}
@@ -192,7 +192,7 @@ ip_vs_sh_reassign(struct ip_vs_sh_state *s, struct ip_vs_service *svc)
RCU_INIT_POINTER(b->dest, dest);
IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n",
- i, IP_VS_DBG_ADDR(svc->af, &dest->addr),
+ i, IP_VS_DBG_ADDR(dest->af, &dest->addr),
atomic_read(&dest->weight));
/* Don't move to next dest until filling weight */
@@ -342,7 +342,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
IP_VS_DBG_BUF(6, "SH: source IP address %s --> server %s:%d\n",
IP_VS_DBG_ADDR(svc->af, &iph->saddr),
- IP_VS_DBG_ADDR(svc->af, &dest->addr),
+ IP_VS_DBG_ADDR(dest->af, &dest->addr),
ntohs(dest->port));
return dest;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index eadffb29dec0..7162c86fd50d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -880,10 +880,17 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
* but still handled.
*/
rcu_read_lock();
- dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
- param->vport, protocol, fwmark, flags);
+ /* This function is only invoked by the synchronization
+ * code. We do not currently support heterogeneous pools
+ * with synchronization, so we can make the assumption that
+ * the svc_af is the same as the dest_af
+ */
+ dest = ip_vs_find_dest(net, type, type, daddr, dport,
+ param->vaddr, param->vport, protocol,
+ fwmark, flags);
- cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
+ cp = ip_vs_conn_new(param, type, daddr, dport, flags, dest,
+ fwmark);
rcu_read_unlock();
if (!cp) {
kfree(param->pe_data);
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index b5b4650d50a9..6b366fd90554 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -80,7 +80,8 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
IP_VS_DBG_BUF(6, "WLC: server %s:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
- IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
+ IP_VS_DBG_ADDR(least->af, &least->addr),
+ ntohs(least->port),
atomic_read(&least->activeconns),
atomic_read(&least->refcnt),
atomic_read(&least->weight), loh);
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 0546cd572d6b..17e6d4406ca7 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -216,7 +216,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb,
found:
IP_VS_DBG_BUF(6, "WRR: server %s:%u "
"activeconns %d refcnt %d weight %d\n",
- IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
+ IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
atomic_read(&dest->activeconns),
atomic_read(&dest->refcnt),
atomic_read(&dest->weight));
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 56896a412bce..91f17c1eb8a2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -157,18 +157,113 @@ retry:
return rt;
}
+#ifdef CONFIG_IP_VS_IPV6
+static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
+{
+ return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
+}
+#endif
+
+static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
+ int rt_mode,
+ bool new_rt_is_local)
+{
+ bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
+ bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
+ bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR);
+ bool source_is_loopback;
+ bool old_rt_is_local;
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (skb_af == AF_INET6) {
+ int addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
+
+ source_is_loopback =
+ (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
+ (addr_type & IPV6_ADDR_LOOPBACK);
+ old_rt_is_local = __ip_vs_is_local_route6(
+ (struct rt6_info *)skb_dst(skb));
+ } else
+#endif
+ {
+ source_is_loopback = ipv4_is_loopback(ip_hdr(skb)->saddr);
+ old_rt_is_local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
+ }
+
+ if (unlikely(new_rt_is_local)) {
+ if (!rt_mode_allow_local)
+ return true;
+ if (!rt_mode_allow_redirect && !old_rt_is_local)
+ return true;
+ } else {
+ if (!rt_mode_allow_non_local)
+ return true;
+ if (source_is_loopback)
+ return true;
+ }
+ return false;
+}
+
+static inline void maybe_update_pmtu(int skb_af, struct sk_buff *skb, int mtu)
+{
+ struct sock *sk = skb->sk;
+ struct rtable *ort = skb_rtable(skb);
+
+ if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
+ ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
+}
+
+static inline bool ensure_mtu_is_adequate(int skb_af, int rt_mode,
+ struct ip_vs_iphdr *ipvsh,
+ struct sk_buff *skb, int mtu)
+{
+#ifdef CONFIG_IP_VS_IPV6
+ if (skb_af == AF_INET6) {
+ struct net *net = dev_net(skb_dst(skb)->dev);
+
+ if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
+ if (!skb->dev)
+ skb->dev = net->loopback_dev;
+ /* only send ICMP too big on first fragment */
+ if (!ipvsh->fragoffs)
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ IP_VS_DBG(1, "frag needed for %pI6c\n",
+ &ipv6_hdr(skb)->saddr);
+ return false;
+ }
+ } else
+#endif
+ {
+ struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
+
+ /* If we're going to tunnel the packet and pmtu discovery
+ * is disabled, we'll just fragment it anyway
+ */
+ if ((rt_mode & IP_VS_RT_MODE_TUNNEL) && !sysctl_pmtu_disc(ipvs))
+ return true;
+
+ if (unlikely(ip_hdr(skb)->frag_off & htons(IP_DF) &&
+ skb->len > mtu && !skb_is_gso(skb))) {
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+ IP_VS_DBG(1, "frag needed for %pI4\n",
+ &ip_hdr(skb)->saddr);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/* Get route to destination or remote server */
static int
-__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
- __be32 daddr, int rt_mode, __be32 *ret_saddr)
+__ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
+ __be32 daddr, int rt_mode, __be32 *ret_saddr,
+ struct ip_vs_iphdr *ipvsh)
{
struct net *net = dev_net(skb_dst(skb)->dev);
- struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_dest_dst *dest_dst;
struct rtable *rt; /* Route to the other host */
- struct rtable *ort; /* Original route */
- struct iphdr *iph;
- __be16 df;
int mtu;
int local, noref = 1;
@@ -218,30 +313,14 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
}
local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
- if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
- rt_mode)) {
- IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
- (rt->rt_flags & RTCF_LOCAL) ?
- "local":"non-local", &daddr);
+ if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
+ local))) {
+ IP_VS_DBG_RL("We are crossing local and non-local addresses"
+ " daddr=%pI4\n", &dest->addr.ip);
goto err_put;
}
- iph = ip_hdr(skb);
- if (likely(!local)) {
- if (unlikely(ipv4_is_loopback(iph->saddr))) {
- IP_VS_DBG_RL("Stopping traffic from loopback address "
- "%pI4 to non-local address, dest: %pI4\n",
- &iph->saddr, &daddr);
- goto err_put;
- }
- } else {
- ort = skb_rtable(skb);
- if (!(rt_mode & IP_VS_RT_MODE_RDR) &&
- !(ort->rt_flags & RTCF_LOCAL)) {
- IP_VS_DBG_RL("Redirect from non-local address %pI4 to "
- "local requires NAT method, dest: %pI4\n",
- &iph->daddr, &daddr);
- goto err_put;
- }
+
+ if (unlikely(local)) {
/* skb to local stack, preserve old route */
if (!noref)
ip_rt_put(rt);
@@ -250,28 +329,17 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL))) {
mtu = dst_mtu(&rt->dst);
- df = iph->frag_off & htons(IP_DF);
} else {
- struct sock *sk = skb->sk;
-
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
goto err_put;
}
- ort = skb_rtable(skb);
- if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
- ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
- /* MTU check allowed? */
- df = sysctl_pmtu_disc(ipvs) ? iph->frag_off & htons(IP_DF) : 0;
+ maybe_update_pmtu(skb_af, skb, mtu);
}
- /* MTU checking */
- if (unlikely(df && skb->len > mtu && !skb_is_gso(skb))) {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
- IP_VS_DBG(1, "frag needed for %pI4\n", &iph->saddr);
+ if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu))
goto err_put;
- }
skb_dst_drop(skb);
if (noref) {
@@ -295,12 +363,6 @@ err_unreach:
}
#ifdef CONFIG_IP_VS_IPV6
-
-static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
-{
- return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
-}
-
static struct dst_entry *
__ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
struct in6_addr *ret_saddr, int do_xfrm)
@@ -339,14 +401,13 @@ out_err:
* Get route to destination or remote server
*/
static int
-__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
+__ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
struct in6_addr *daddr, struct in6_addr *ret_saddr,
struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode)
{
struct net *net = dev_net(skb_dst(skb)->dev);
struct ip_vs_dest_dst *dest_dst;
struct rt6_info *rt; /* Route to the other host */
- struct rt6_info *ort; /* Original route */
struct dst_entry *dst;
int mtu;
int local, noref = 1;
@@ -393,32 +454,15 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
}
local = __ip_vs_is_local_route6(rt);
- if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
- rt_mode)) {
- IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n",
- local ? "local":"non-local", daddr);
+
+ if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
+ local))) {
+ IP_VS_DBG_RL("We are crossing local and non-local addresses"
+ " daddr=%pI6\n", &dest->addr.in6);
goto err_put;
}
- if (likely(!local)) {
- if (unlikely((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
- ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
- IPV6_ADDR_LOOPBACK)) {
- IP_VS_DBG_RL("Stopping traffic from loopback address "
- "%pI6c to non-local address, "
- "dest: %pI6c\n",
- &ipv6_hdr(skb)->saddr, daddr);
- goto err_put;
- }
- } else {
- ort = (struct rt6_info *) skb_dst(skb);
- if (!(rt_mode & IP_VS_RT_MODE_RDR) &&
- !__ip_vs_is_local_route6(ort)) {
- IP_VS_DBG_RL("Redirect from non-local address %pI6c "
- "to local requires NAT method, "
- "dest: %pI6c\n",
- &ipv6_hdr(skb)->daddr, daddr);
- goto err_put;
- }
+
+ if (unlikely(local)) {
/* skb to local stack, preserve old route */
if (!noref)
dst_release(&rt->dst);
@@ -429,28 +473,17 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL)))
mtu = dst_mtu(&rt->dst);
else {
- struct sock *sk = skb->sk;
-
mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
if (mtu < IPV6_MIN_MTU) {
IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
IPV6_MIN_MTU);
goto err_put;
}
- ort = (struct rt6_info *) skb_dst(skb);
- if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
- ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
+ maybe_update_pmtu(skb_af, skb, mtu);
}
- if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
- if (!skb->dev)
- skb->dev = net->loopback_dev;
- /* only send ICMP too big on first fragment */
- if (!ipvsh->fragoffs)
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
- IP_VS_DBG(1, "frag needed for %pI6c\n", &ipv6_hdr(skb)->saddr);
+ if (!ensure_mtu_is_adequate(skb_af, rt_mode, ipvsh, skb, mtu))
goto err_put;
- }
skb_dst_drop(skb);
if (noref) {
@@ -556,8 +589,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
rcu_read_lock();
- if (__ip_vs_get_out_rt(skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL,
- NULL) < 0)
+ if (__ip_vs_get_out_rt(cp->af, skb, NULL, iph->daddr,
+ IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0)
goto tx_error;
ip_send_check(iph);
@@ -586,7 +619,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
rcu_read_lock();
- if (__ip_vs_get_out_rt_v6(skb, NULL, &ipvsh->daddr.in6, NULL,
+ if (__ip_vs_get_out_rt_v6(cp->af, skb, NULL, &ipvsh->daddr.in6, NULL,
ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0)
goto tx_error;
@@ -633,10 +666,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
}
was_input = rt_is_input_route(skb_rtable(skb));
- local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
+ local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
- IP_VS_RT_MODE_RDR, NULL);
+ IP_VS_RT_MODE_RDR, NULL, ipvsh);
if (local < 0)
goto tx_error;
rt = skb_rtable(skb);
@@ -721,8 +754,8 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
}
- local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
- ipvsh, 0,
+ local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+ NULL, ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR);
@@ -791,6 +824,81 @@ tx_error:
}
#endif
+/* When forwarding a packet, we must ensure that we've got enough headroom
+ * for the encapsulation packet in the skb. This also gives us an
+ * opportunity to figure out what the payload_len, dsfield, ttl, and df
+ * values should be, so that we won't need to look at the old ip header
+ * again
+ */
+static struct sk_buff *
+ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
+ unsigned int max_headroom, __u8 *next_protocol,
+ __u32 *payload_len, __u8 *dsfield, __u8 *ttl,
+ __be16 *df)
+{
+ struct sk_buff *new_skb = NULL;
+ struct iphdr *old_iph = NULL;
+#ifdef CONFIG_IP_VS_IPV6
+ struct ipv6hdr *old_ipv6h = NULL;
+#endif
+
+ if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
+ new_skb = skb_realloc_headroom(skb, max_headroom);
+ if (!new_skb)
+ goto error;
+ consume_skb(skb);
+ skb = new_skb;
+ }
+
+#ifdef CONFIG_IP_VS_IPV6
+ if (skb_af == AF_INET6) {
+ old_ipv6h = ipv6_hdr(skb);
+ *next_protocol = IPPROTO_IPV6;
+ if (payload_len)
+ *payload_len =
+ ntohs(old_ipv6h->payload_len) +
+ sizeof(*old_ipv6h);
+ *dsfield = ipv6_get_dsfield(old_ipv6h);
+ *ttl = old_ipv6h->hop_limit;
+ if (df)
+ *df = 0;
+ } else
+#endif
+ {
+ old_iph = ip_hdr(skb);
+ /* Copy DF, reset fragment offset and MF */
+ if (df)
+ *df = (old_iph->frag_off & htons(IP_DF));
+ *next_protocol = IPPROTO_IPIP;
+
+ /* fix old IP header checksum */
+ ip_send_check(old_iph);
+ *dsfield = ipv4_get_dsfield(old_iph);
+ *ttl = old_iph->ttl;
+ if (payload_len)
+ *payload_len = ntohs(old_iph->tot_len);
+ }
+
+ return skb;
+error:
+ kfree_skb(skb);
+ return ERR_PTR(-ENOMEM);
+}
+
+static inline int __tun_gso_type_mask(int encaps_af, int orig_af)
+{
+ if (encaps_af == AF_INET) {
+ if (orig_af == AF_INET)
+ return SKB_GSO_IPIP;
+
+ return SKB_GSO_SIT;
+ }
+
+ /* GSO: we need to provide proper SKB_GSO_ value for IPv6:
+ * SKB_GSO_SIT/IPV6
+ */
+ return 0;
+}
/*
* IP Tunneling transmitter
@@ -819,9 +927,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct rtable *rt; /* Route to the other host */
__be32 saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
- struct iphdr *old_iph = ip_hdr(skb);
- u8 tos = old_iph->tos;
- __be16 df;
+ __u8 next_protocol = 0;
+ __u8 dsfield = 0;
+ __u8 ttl = 0;
+ __be16 df = 0;
+ __be16 *dfp = NULL;
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int ret, local;
@@ -829,11 +939,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
rcu_read_lock();
- local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
+ local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_CONNECT |
- IP_VS_RT_MODE_TUNNEL, &saddr);
+ IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh);
if (local < 0)
goto tx_error;
if (local) {
@@ -844,29 +954,21 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
rt = skb_rtable(skb);
tdev = rt->dst.dev;
- /* Copy DF, reset fragment offset and MF */
- df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
-
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
- if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
- struct sk_buff *new_skb =
- skb_realloc_headroom(skb, max_headroom);
-
- if (!new_skb)
- goto tx_error;
- consume_skb(skb);
- skb = new_skb;
- old_iph = ip_hdr(skb);
- }
-
- /* fix old IP header checksum */
- ip_send_check(old_iph);
+ /* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */
+ dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL;
+ skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
+ &next_protocol, NULL, &dsfield,
+ &ttl, dfp);
+ if (IS_ERR(skb))
+ goto tx_error;
- skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP);
+ skb = iptunnel_handle_offloads(
+ skb, false, __tun_gso_type_mask(AF_INET, cp->af));
if (IS_ERR(skb))
goto tx_error;
@@ -883,11 +985,11 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
iph->version = 4;
iph->ihl = sizeof(struct iphdr)>>2;
iph->frag_off = df;
- iph->protocol = IPPROTO_IPIP;
- iph->tos = tos;
+ iph->protocol = next_protocol;
+ iph->tos = dsfield;
iph->daddr = cp->daddr.ip;
iph->saddr = saddr;
- iph->ttl = old_iph->ttl;
+ iph->ttl = ttl;
ip_select_ident(skb, NULL);
/* Another hack: avoid icmp_send in ip_fragment */
@@ -920,7 +1022,10 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct rt6_info *rt; /* Route to the other host */
struct in6_addr saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
- struct ipv6hdr *old_iph = ipv6_hdr(skb);
+ __u8 next_protocol = 0;
+ __u32 payload_len = 0;
+ __u8 dsfield = 0;
+ __u8 ttl = 0;
struct ipv6hdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int ret, local;
@@ -928,7 +1033,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
rcu_read_lock();
- local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
+ local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
&saddr, ipvsh, 1,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
@@ -948,19 +1053,14 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
- if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
- struct sk_buff *new_skb =
- skb_realloc_headroom(skb, max_headroom);
-
- if (!new_skb)
- goto tx_error;
- consume_skb(skb);
- skb = new_skb;
- old_iph = ipv6_hdr(skb);
- }
+ skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom,
+ &next_protocol, &payload_len,
+ &dsfield, &ttl, NULL);
+ if (IS_ERR(skb))
+ goto tx_error;
- /* GSO: we need to provide proper SKB_GSO_ value for IPv6 */
- skb = iptunnel_handle_offloads(skb, false, 0); /* SKB_GSO_SIT/IPV6 */
+ skb = iptunnel_handle_offloads(
+ skb, false, __tun_gso_type_mask(AF_INET6, cp->af));
if (IS_ERR(skb))
goto tx_error;
@@ -975,14 +1075,13 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
*/
iph = ipv6_hdr(skb);
iph->version = 6;
- iph->nexthdr = IPPROTO_IPV6;
- iph->payload_len = old_iph->payload_len;
- be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
+ iph->nexthdr = next_protocol;
+ iph->payload_len = htons(payload_len);
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
- ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
+ ipv6_change_dsfield(iph, 0, dsfield);
iph->daddr = cp->daddr.in6;
iph->saddr = saddr;
- iph->hop_limit = old_iph->hop_limit;
+ iph->hop_limit = ttl;
/* Another hack: avoid icmp_send in ip_fragment */
skb->ignore_df = 1;
@@ -1021,10 +1120,10 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
rcu_read_lock();
- local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
+ local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
- IP_VS_RT_MODE_KNOWN_NH, NULL);
+ IP_VS_RT_MODE_KNOWN_NH, NULL, ipvsh);
if (local < 0)
goto tx_error;
if (local) {
@@ -1060,8 +1159,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
rcu_read_lock();
- local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
- ipvsh, 0,
+ local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+ NULL, ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL);
if (local < 0)
@@ -1128,7 +1227,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
rcu_read_lock();
- local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, rt_mode, NULL);
+ local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, rt_mode,
+ NULL, iph);
if (local < 0)
goto tx_error;
rt = skb_rtable(skb);
@@ -1219,8 +1319,8 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
rcu_read_lock();
- local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
- ipvsh, 0, rt_mode);
+ local = __ip_vs_get_out_rt_v6(cp->af, skb, cp->dest, &cp->daddr.in6,
+ NULL, ipvsh, 0, rt_mode);
if (local < 0)
goto tx_error;
rt = (struct rt6_info *) skb_dst(skb);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index de88c4ab5146..5016a6929085 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -142,7 +142,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, u16 zone)
static u32 __hash_bucket(u32 hash, unsigned int size)
{
- return ((u64)hash * size) >> 32;
+ return reciprocal_scale(hash, size);
}
static u32 hash_bucket(u32 hash, const struct net *net)
@@ -358,7 +358,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
tstamp = nf_conn_tstamp_find(ct);
if (tstamp && tstamp->stop == 0)
- tstamp->stop = ktime_to_ns(ktime_get_real());
+ tstamp->stop = ktime_get_real_ns();
if (nf_ct_is_dying(ct))
goto delete;
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index f87e8f68ad45..91a1837acd0e 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -83,7 +83,8 @@ static unsigned int nf_ct_expect_dst_hash(const struct nf_conntrack_tuple *tuple
hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
(((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
(__force __u16)tuple->dst.u.all) ^ nf_conntrack_hash_rnd);
- return ((u64)hash * nf_ct_expect_hsize) >> 32;
+
+ return reciprocal_scale(hash, nf_ct_expect_hsize);
}
struct nf_conntrack_expect *
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 355a5c4ef763..1bd9ed9e62f6 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1737,7 +1737,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
}
tstamp = nf_conn_tstamp_find(ct);
if (tstamp)
- tstamp->start = ktime_to_ns(ktime_get_real());
+ tstamp->start = ktime_get_real_ns();
err = nf_conntrack_hash_check_insert(ct);
if (err < 0)
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index d25f29377648..957c1db66652 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -14,6 +14,30 @@
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static bool nf_generic_should_process(u8 proto)
+{
+ switch (proto) {
+#ifdef CONFIG_NF_CT_PROTO_SCTP_MODULE
+ case IPPROTO_SCTP:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_DCCP_MODULE
+ case IPPROTO_DCCP:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_GRE_MODULE
+ case IPPROTO_GRE:
+ return false;
+#endif
+#ifdef CONFIG_NF_CT_PROTO_UDPLITE_MODULE
+ case IPPROTO_UDPLITE:
+ return false;
+#endif
+ default:
+ return true;
+ }
+}
+
static inline struct nf_generic_net *generic_pernet(struct net *net)
{
return &net->ct.nf_ct_proto.generic;
@@ -67,7 +91,7 @@ static int generic_packet(struct nf_conn *ct,
static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, unsigned int *timeouts)
{
- return true;
+ return nf_generic_should_process(nf_ct_protonum(ct));
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index f641751dba9d..cf65a1e040dd 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -101,7 +101,7 @@ static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ct_iter_state *st = seq->private;
- st->time_now = ktime_to_ns(ktime_get_real());
+ st->time_now = ktime_get_real_ns();
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
index eeb8ef4ff1a3..a2233e77cf39 100644
--- a/net/netfilter/nf_log_common.c
+++ b/net/netfilter/nf_log_common.c
@@ -158,7 +158,7 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
'0' + loginfo->u.log.level, prefix,
in ? in->name : "",
out ? out->name : "");
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (skb->nf_bridge) {
const struct net_device *physindev;
const struct net_device *physoutdev;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 552f97cd9fde..4e0b47831d43 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -126,7 +126,8 @@ hash_by_src(const struct net *net, u16 zone,
/* Original src, to ensure we map it consistently if poss. */
hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
tuple->dst.protonum ^ zone ^ nf_conntrack_hash_rnd);
- return ((u64)hash * net->ct.nat_htable_size) >> 32;
+
+ return reciprocal_scale(hash, net->ct.nat_htable_size);
}
/* Is this tuple already taken? (not by us) */
@@ -274,7 +275,7 @@ find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
}
var_ipp->all[i] = (__force __u32)
- htonl(minip + (((u64)j * dist) >> 32));
+ htonl(minip + reciprocal_scale(j, dist));
if (var_ipp->all[i] != range->max_addr.all[i])
full_range = true;
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5d24b1fdb593..4c8b68e5fa16 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -52,7 +52,7 @@ void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
dev_put(entry->indev);
if (entry->outdev)
dev_put(entry->outdev);
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (entry->skb->nf_bridge) {
struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
@@ -77,7 +77,7 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
dev_hold(entry->indev);
if (entry->outdev)
dev_hold(entry->outdev);
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (entry->skb->nf_bridge) {
struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
struct net_device *physdev;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index deeb95fb7028..556a0dfa4abc 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -127,6 +127,204 @@ static void nft_trans_destroy(struct nft_trans *trans)
kfree(trans);
}
+static void nf_tables_unregister_hooks(const struct nft_table *table,
+ const struct nft_chain *chain,
+ unsigned int hook_nops)
+{
+ if (!(table->flags & NFT_TABLE_F_DORMANT) &&
+ chain->flags & NFT_BASE_CHAIN)
+ nf_unregister_hooks(nft_base_chain(chain)->ops, hook_nops);
+}
+
+/* Internal table flags */
+#define NFT_TABLE_INACTIVE (1 << 15)
+
+static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
+{
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table));
+ if (trans == NULL)
+ return -ENOMEM;
+
+ if (msg_type == NFT_MSG_NEWTABLE)
+ ctx->table->flags |= NFT_TABLE_INACTIVE;
+
+ list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ return 0;
+}
+
+static int nft_deltable(struct nft_ctx *ctx)
+{
+ int err;
+
+ err = nft_trans_table_add(ctx, NFT_MSG_DELTABLE);
+ if (err < 0)
+ return err;
+
+ list_del_rcu(&ctx->table->list);
+ return err;
+}
+
+static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
+{
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
+ if (trans == NULL)
+ return -ENOMEM;
+
+ if (msg_type == NFT_MSG_NEWCHAIN)
+ ctx->chain->flags |= NFT_CHAIN_INACTIVE;
+
+ list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+ return 0;
+}
+
+static int nft_delchain(struct nft_ctx *ctx)
+{
+ int err;
+
+ err = nft_trans_chain_add(ctx, NFT_MSG_DELCHAIN);
+ if (err < 0)
+ return err;
+
+ ctx->table->use--;
+ list_del_rcu(&ctx->chain->list);
+
+ return err;
+}
+
+static inline bool
+nft_rule_is_active(struct net *net, const struct nft_rule *rule)
+{
+ return (rule->genmask & (1 << net->nft.gencursor)) == 0;
+}
+
+static inline int gencursor_next(struct net *net)
+{
+ return net->nft.gencursor+1 == 1 ? 1 : 0;
+}
+
+static inline int
+nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
+{
+ return (rule->genmask & (1 << gencursor_next(net))) == 0;
+}
+
+static inline void
+nft_rule_activate_next(struct net *net, struct nft_rule *rule)
+{
+ /* Now inactive, will be active in the future */
+ rule->genmask = (1 << net->nft.gencursor);
+}
+
+static inline void
+nft_rule_deactivate_next(struct net *net, struct nft_rule *rule)
+{
+ rule->genmask = (1 << gencursor_next(net));
+}
+
+static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
+{
+ rule->genmask = 0;
+}
+
+static int
+nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
+{
+ /* You cannot delete the same rule twice */
+ if (nft_rule_is_active_next(ctx->net, rule)) {
+ nft_rule_deactivate_next(ctx->net, rule);
+ ctx->chain->use--;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
+ struct nft_rule *rule)
+{
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule));
+ if (trans == NULL)
+ return NULL;
+
+ nft_trans_rule(trans) = rule;
+ list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+
+ return trans;
+}
+
+static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
+{
+ struct nft_trans *trans;
+ int err;
+
+ trans = nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule);
+ if (trans == NULL)
+ return -ENOMEM;
+
+ err = nf_tables_delrule_deactivate(ctx, rule);
+ if (err < 0) {
+ nft_trans_destroy(trans);
+ return err;
+ }
+
+ return 0;
+}
+
+static int nft_delrule_by_chain(struct nft_ctx *ctx)
+{
+ struct nft_rule *rule;
+ int err;
+
+ list_for_each_entry(rule, &ctx->chain->rules, list) {
+ err = nft_delrule(ctx, rule);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+/* Internal set flag */
+#define NFT_SET_INACTIVE (1 << 15)
+
+static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
+ struct nft_set *set)
+{
+ struct nft_trans *trans;
+
+ trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
+ if (trans == NULL)
+ return -ENOMEM;
+
+ if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
+ nft_trans_set_id(trans) =
+ ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
+ set->flags |= NFT_SET_INACTIVE;
+ }
+ nft_trans_set(trans) = set;
+ list_add_tail(&trans->list, &ctx->net->nft.commit_list);
+
+ return 0;
+}
+
+static int nft_delset(struct nft_ctx *ctx, struct nft_set *set)
+{
+ int err;
+
+ err = nft_trans_set_add(ctx, NFT_MSG_DELSET, set);
+ if (err < 0)
+ return err;
+
+ list_del_rcu(&set->list);
+ ctx->table->use--;
+
+ return err;
+}
+
/*
* Tables
*/
@@ -207,9 +405,9 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
[NFTA_TABLE_FLAGS] = { .type = NLA_U32 },
};
-static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq,
- int event, u32 flags, int family,
- const struct nft_table *table)
+static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
+ u32 portid, u32 seq, int event, u32 flags,
+ int family, const struct nft_table *table)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
@@ -222,7 +420,7 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, u32 portid, u32 seq,
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = family;
nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
+ nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) ||
nla_put_be32(skb, NFTA_TABLE_FLAGS, htonl(table->flags)) ||
@@ -250,8 +448,8 @@ static int nf_tables_table_notify(const struct nft_ctx *ctx, int event)
if (skb == NULL)
goto err;
- err = nf_tables_fill_table_info(skb, ctx->portid, ctx->seq, event, 0,
- ctx->afi->family, ctx->table);
+ err = nf_tables_fill_table_info(skb, ctx->net, ctx->portid, ctx->seq,
+ event, 0, ctx->afi->family, ctx->table);
if (err < 0) {
kfree_skb(skb);
goto err;
@@ -290,7 +488,7 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
- if (nf_tables_fill_table_info(skb,
+ if (nf_tables_fill_table_info(skb, net,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWTABLE,
@@ -309,9 +507,6 @@ done:
return skb->len;
}
-/* Internal table flags */
-#define NFT_TABLE_INACTIVE (1 << 15)
-
static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -345,7 +540,7 @@ static int nf_tables_gettable(struct sock *nlsk, struct sk_buff *skb,
if (!skb2)
return -ENOMEM;
- err = nf_tables_fill_table_info(skb2, NETLINK_CB(skb).portid,
+ err = nf_tables_fill_table_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
family, table);
if (err < 0)
@@ -443,21 +638,6 @@ err:
return ret;
}
-static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
-{
- struct nft_trans *trans;
-
- trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_table));
- if (trans == NULL)
- return -ENOMEM;
-
- if (msg_type == NFT_MSG_NEWTABLE)
- ctx->table->flags |= NFT_TABLE_INACTIVE;
-
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
- return 0;
-}
-
static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -527,6 +707,67 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
return 0;
}
+static int nft_flush_table(struct nft_ctx *ctx)
+{
+ int err;
+ struct nft_chain *chain, *nc;
+ struct nft_set *set, *ns;
+
+ list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+ ctx->chain = chain;
+
+ err = nft_delrule_by_chain(ctx);
+ if (err < 0)
+ goto out;
+
+ err = nft_delchain(ctx);
+ if (err < 0)
+ goto out;
+ }
+
+ list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
+ if (set->flags & NFT_SET_ANONYMOUS &&
+ !list_empty(&set->bindings))
+ continue;
+
+ err = nft_delset(ctx, set);
+ if (err < 0)
+ goto out;
+ }
+
+ err = nft_deltable(ctx);
+out:
+ return err;
+}
+
+static int nft_flush(struct nft_ctx *ctx, int family)
+{
+ struct nft_af_info *afi;
+ struct nft_table *table, *nt;
+ const struct nlattr * const *nla = ctx->nla;
+ int err = 0;
+
+ list_for_each_entry(afi, &ctx->net->nft.af_info, list) {
+ if (family != AF_UNSPEC && afi->family != family)
+ continue;
+
+ ctx->afi = afi;
+ list_for_each_entry_safe(table, nt, &afi->tables, list) {
+ if (nla[NFTA_TABLE_NAME] &&
+ nla_strcmp(nla[NFTA_TABLE_NAME], table->name) != 0)
+ continue;
+
+ ctx->table = table;
+
+ err = nft_flush_table(ctx);
+ if (err < 0)
+ goto out;
+ }
+ }
+out:
+ return err;
+}
+
static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -535,9 +776,13 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
struct nft_af_info *afi;
struct nft_table *table;
struct net *net = sock_net(skb->sk);
- int family = nfmsg->nfgen_family, err;
+ int family = nfmsg->nfgen_family;
struct nft_ctx ctx;
+ nft_ctx_init(&ctx, skb, nlh, NULL, NULL, NULL, nla);
+ if (family == AF_UNSPEC || nla[NFTA_TABLE_NAME] == NULL)
+ return nft_flush(&ctx, family);
+
afi = nf_tables_afinfo_lookup(net, family, false);
if (IS_ERR(afi))
return PTR_ERR(afi);
@@ -547,16 +792,11 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
return PTR_ERR(table);
if (table->flags & NFT_TABLE_INACTIVE)
return -ENOENT;
- if (table->use > 0)
- return -EBUSY;
- nft_ctx_init(&ctx, skb, nlh, afi, table, NULL, nla);
- err = nft_trans_table_add(&ctx, NFT_MSG_DELTABLE);
- if (err < 0)
- return err;
+ ctx.afi = afi;
+ ctx.table = table;
- list_del_rcu(&table->list);
- return 0;
+ return nft_flush_table(&ctx);
}
static void nf_tables_table_destroy(struct nft_ctx *ctx)
@@ -674,9 +914,9 @@ nla_put_failure:
return -ENOSPC;
}
-static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
- int event, u32 flags, int family,
- const struct nft_table *table,
+static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
+ u32 portid, u32 seq, int event, u32 flags,
+ int family, const struct nft_table *table,
const struct nft_chain *chain)
{
struct nlmsghdr *nlh;
@@ -690,7 +930,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, u32 portid, u32 seq,
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = family;
nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
+ nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name))
goto nla_put_failure;
@@ -748,8 +988,8 @@ static int nf_tables_chain_notify(const struct nft_ctx *ctx, int event)
if (skb == NULL)
goto err;
- err = nf_tables_fill_chain_info(skb, ctx->portid, ctx->seq, event, 0,
- ctx->afi->family, ctx->table,
+ err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq,
+ event, 0, ctx->afi->family, ctx->table,
ctx->chain);
if (err < 0) {
kfree_skb(skb);
@@ -791,7 +1031,8 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
- if (nf_tables_fill_chain_info(skb, NETLINK_CB(cb->skb).portid,
+ if (nf_tables_fill_chain_info(skb, net,
+ NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWCHAIN,
NLM_F_MULTI,
@@ -850,7 +1091,7 @@ static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
if (!skb2)
return -ENOMEM;
- err = nf_tables_fill_chain_info(skb2, NETLINK_CB(skb).portid,
+ err = nf_tables_fill_chain_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
family, table, chain);
if (err < 0)
@@ -913,21 +1154,6 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
rcu_assign_pointer(chain->stats, newstats);
}
-static int nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
-{
- struct nft_trans *trans;
-
- trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain));
- if (trans == NULL)
- return -ENOMEM;
-
- if (msg_type == NFT_MSG_NEWCHAIN)
- ctx->chain->flags |= NFT_CHAIN_INACTIVE;
-
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
- return 0;
-}
-
static void nf_tables_chain_destroy(struct nft_chain *chain)
{
BUG_ON(chain->use > 0);
@@ -1157,11 +1383,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
list_add_tail_rcu(&chain->list, &table->chains);
return 0;
err2:
- if (!(table->flags & NFT_TABLE_F_DORMANT) &&
- chain->flags & NFT_BASE_CHAIN) {
- nf_unregister_hooks(nft_base_chain(chain)->ops,
- afi->nops);
- }
+ nf_tables_unregister_hooks(table, chain, afi->nops);
err1:
nf_tables_chain_destroy(chain);
return err;
@@ -1178,7 +1400,6 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
struct net *net = sock_net(skb->sk);
int family = nfmsg->nfgen_family;
struct nft_ctx ctx;
- int err;
afi = nf_tables_afinfo_lookup(net, family, false);
if (IS_ERR(afi))
@@ -1199,13 +1420,8 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
return -EBUSY;
nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
- err = nft_trans_chain_add(&ctx, NFT_MSG_DELCHAIN);
- if (err < 0)
- return err;
- table->use--;
- list_del_rcu(&chain->list);
- return 0;
+ return nft_delchain(&ctx);
}
/*
@@ -1432,8 +1648,9 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
.len = NFT_USERDATA_MAXLEN },
};
-static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq,
- int event, u32 flags, int family,
+static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
+ u32 portid, u32 seq, int event,
+ u32 flags, int family,
const struct nft_table *table,
const struct nft_chain *chain,
const struct nft_rule *rule)
@@ -1453,7 +1670,7 @@ static int nf_tables_fill_rule_info(struct sk_buff *skb, u32 portid, u32 seq,
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = family;
nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
+ nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_RULE_TABLE, table->name))
goto nla_put_failure;
@@ -1509,8 +1726,8 @@ static int nf_tables_rule_notify(const struct nft_ctx *ctx,
if (skb == NULL)
goto err;
- err = nf_tables_fill_rule_info(skb, ctx->portid, ctx->seq, event, 0,
- ctx->afi->family, ctx->table,
+ err = nf_tables_fill_rule_info(skb, ctx->net, ctx->portid, ctx->seq,
+ event, 0, ctx->afi->family, ctx->table,
ctx->chain, rule);
if (err < 0) {
kfree_skb(skb);
@@ -1527,41 +1744,6 @@ err:
return err;
}
-static inline bool
-nft_rule_is_active(struct net *net, const struct nft_rule *rule)
-{
- return (rule->genmask & (1 << net->nft.gencursor)) == 0;
-}
-
-static inline int gencursor_next(struct net *net)
-{
- return net->nft.gencursor+1 == 1 ? 1 : 0;
-}
-
-static inline int
-nft_rule_is_active_next(struct net *net, const struct nft_rule *rule)
-{
- return (rule->genmask & (1 << gencursor_next(net))) == 0;
-}
-
-static inline void
-nft_rule_activate_next(struct net *net, struct nft_rule *rule)
-{
- /* Now inactive, will be active in the future */
- rule->genmask = (1 << net->nft.gencursor);
-}
-
-static inline void
-nft_rule_disactivate_next(struct net *net, struct nft_rule *rule)
-{
- rule->genmask = (1 << gencursor_next(net));
-}
-
-static inline void nft_rule_clear(struct net *net, struct nft_rule *rule)
-{
- rule->genmask = 0;
-}
-
static int nf_tables_dump_rules(struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -1591,7 +1773,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
- if (nf_tables_fill_rule_info(skb, NETLINK_CB(cb->skb).portid,
+ if (nf_tables_fill_rule_info(skb, net, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFT_MSG_NEWRULE,
NLM_F_MULTI | NLM_F_APPEND,
@@ -1657,7 +1839,7 @@ static int nf_tables_getrule(struct sock *nlsk, struct sk_buff *skb,
if (!skb2)
return -ENOMEM;
- err = nf_tables_fill_rule_info(skb2, NETLINK_CB(skb).portid,
+ err = nf_tables_fill_rule_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
family, table, chain, rule);
if (err < 0)
@@ -1687,21 +1869,6 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
kfree(rule);
}
-static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type,
- struct nft_rule *rule)
-{
- struct nft_trans *trans;
-
- trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_rule));
- if (trans == NULL)
- return NULL;
-
- nft_trans_rule(trans) = rule;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
-
- return trans;
-}
-
#define NFT_RULE_MAXEXPRS 128
static struct nft_expr_info *info;
@@ -1823,7 +1990,7 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
err = -ENOMEM;
goto err2;
}
- nft_rule_disactivate_next(net, old_rule);
+ nft_rule_deactivate_next(net, old_rule);
chain->use--;
list_add_tail_rcu(&rule->list, &old_rule->list);
} else {
@@ -1867,33 +2034,6 @@ err1:
return err;
}
-static int
-nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
-{
- /* You cannot delete the same rule twice */
- if (nft_rule_is_active_next(ctx->net, rule)) {
- if (nft_trans_rule_add(ctx, NFT_MSG_DELRULE, rule) == NULL)
- return -ENOMEM;
- nft_rule_disactivate_next(ctx->net, rule);
- ctx->chain->use--;
- return 0;
- }
- return -ENOENT;
-}
-
-static int nf_table_delrule_by_chain(struct nft_ctx *ctx)
-{
- struct nft_rule *rule;
- int err;
-
- list_for_each_entry(rule, &ctx->chain->rules, list) {
- err = nf_tables_delrule_one(ctx, rule);
- if (err < 0)
- return err;
- }
- return 0;
-}
-
static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -1932,14 +2072,14 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(rule))
return PTR_ERR(rule);
- err = nf_tables_delrule_one(&ctx, rule);
+ err = nft_delrule(&ctx, rule);
} else {
- err = nf_table_delrule_by_chain(&ctx);
+ err = nft_delrule_by_chain(&ctx);
}
} else {
list_for_each_entry(chain, &table->chains, list) {
ctx.chain = chain;
- err = nf_table_delrule_by_chain(&ctx);
+ err = nft_delrule_by_chain(&ctx);
if (err < 0)
break;
}
@@ -2183,7 +2323,7 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = ctx->afi->family;
nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
+ nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
goto nla_put_failure;
@@ -2204,6 +2344,11 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
}
+ if (set->policy != NFT_SET_POL_PERFORMANCE) {
+ if (nla_put_be32(skb, NFTA_SET_POLICY, htonl(set->policy)))
+ goto nla_put_failure;
+ }
+
desc = nla_nest_start(skb, NFTA_SET_DESC);
if (desc == NULL)
goto nla_put_failure;
@@ -2322,8 +2467,6 @@ static int nf_tables_dump_sets_done(struct netlink_callback *cb)
return 0;
}
-#define NFT_SET_INACTIVE (1 << 15) /* Internal set flag */
-
static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -2398,26 +2541,6 @@ static int nf_tables_set_desc_parse(const struct nft_ctx *ctx,
return 0;
}
-static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type,
- struct nft_set *set)
-{
- struct nft_trans *trans;
-
- trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set));
- if (trans == NULL)
- return -ENOMEM;
-
- if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
- nft_trans_set_id(trans) =
- ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
- set->flags |= NFT_SET_INACTIVE;
- }
- nft_trans_set(trans) = set;
- list_add_tail(&trans->list, &ctx->net->nft.commit_list);
-
- return 0;
-}
-
static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -2551,6 +2674,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
set->dlen = desc.dlen;
set->flags = flags;
set->size = desc.size;
+ set->policy = policy;
err = ops->init(set, &desc, nla);
if (err < 0)
@@ -2611,13 +2735,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
if (!list_empty(&set->bindings))
return -EBUSY;
- err = nft_trans_set_add(&ctx, NFT_MSG_DELSET, set);
- if (err < 0)
- return err;
-
- list_del_rcu(&set->list);
- ctx.table->use--;
- return 0;
+ return nft_delset(&ctx, set);
}
static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
@@ -2815,7 +2933,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = ctx.afi->family;
nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
+ nfmsg->res_id = htons(ctx.net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name))
goto nla_put_failure;
@@ -2896,7 +3014,7 @@ static int nf_tables_fill_setelem_info(struct sk_buff *skb,
nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = ctx->afi->family;
nfmsg->version = NFNETLINK_V0;
- nfmsg->res_id = 0;
+ nfmsg->res_id = htons(ctx->net->nft.base_seq & 0xffff);
if (nla_put_string(skb, NFTA_SET_TABLE, ctx->table->name))
goto nla_put_failure;
@@ -3183,6 +3301,87 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
return err;
}
+static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
+ u32 portid, u32 seq)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ int event = (NFNL_SUBSYS_NFTABLES << 8) | NFT_MSG_NEWGEN;
+
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0);
+ if (nlh == NULL)
+ goto nla_put_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
+
+ if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq)))
+ goto nla_put_failure;
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_trim(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int nf_tables_gen_notify(struct net *net, struct sk_buff *skb, int event)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ struct sk_buff *skb2;
+ int err;
+
+ if (nlmsg_report(nlh) &&
+ !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
+ return 0;
+
+ err = -ENOBUFS;
+ skb2 = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ goto err;
+
+ err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq);
+ if (err < 0) {
+ kfree_skb(skb2);
+ goto err;
+ }
+
+ err = nfnetlink_send(skb2, net, NETLINK_CB(skb).portid,
+ NFNLGRP_NFTABLES, nlmsg_report(nlh), GFP_KERNEL);
+err:
+ if (err < 0) {
+ nfnetlink_set_err(net, NETLINK_CB(skb).portid, NFNLGRP_NFTABLES,
+ err);
+ }
+ return err;
+}
+
+static int nf_tables_getgen(struct sock *nlsk, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nla[])
+{
+ struct net *net = sock_net(skb->sk);
+ struct sk_buff *skb2;
+ int err;
+
+ skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
+ nlh->nlmsg_seq);
+ if (err < 0)
+ goto err;
+
+ return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+err:
+ kfree_skb(skb2);
+ return err;
+}
+
static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
[NFT_MSG_NEWTABLE] = {
.call_batch = nf_tables_newtable,
@@ -3259,6 +3458,9 @@ static const struct nfnl_callback nf_tables_cb[NFT_MSG_MAX] = {
.attr_count = NFTA_SET_ELEM_LIST_MAX,
.policy = nft_set_elem_list_policy,
},
+ [NFT_MSG_GETGEN] = {
+ .call = nf_tables_getgen,
+ },
};
static void nft_chain_commit_update(struct nft_trans *trans)
@@ -3352,11 +3554,9 @@ static int nf_tables_commit(struct sk_buff *skb)
break;
case NFT_MSG_DELCHAIN:
nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN);
- if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
- trans->ctx.chain->flags & NFT_BASE_CHAIN) {
- nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
- trans->ctx.afi->nops);
- }
+ nf_tables_unregister_hooks(trans->ctx.table,
+ trans->ctx.chain,
+ trans->ctx.afi->nops);
break;
case NFT_MSG_NEWRULE:
nft_rule_clear(trans->ctx.net, nft_trans_rule(trans));
@@ -3418,6 +3618,8 @@ static int nf_tables_commit(struct sk_buff *skb)
call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu);
}
+ nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+
return 0;
}
@@ -3479,11 +3681,9 @@ static int nf_tables_abort(struct sk_buff *skb)
} else {
trans->ctx.table->use--;
list_del_rcu(&trans->ctx.chain->list);
- if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
- trans->ctx.chain->flags & NFT_BASE_CHAIN) {
- nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
- trans->ctx.afi->nops);
- }
+ nf_tables_unregister_hooks(trans->ctx.table,
+ trans->ctx.chain,
+ trans->ctx.afi->nops);
}
break;
case NFT_MSG_DELCHAIN:
@@ -3963,6 +4163,7 @@ static void __exit nf_tables_module_exit(void)
{
unregister_pernet_subsys(&nf_tables_net_ops);
nfnetlink_subsys_unregister(&nf_tables_subsys);
+ rcu_barrier();
nf_tables_core_module_exit();
kfree(info);
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index f37f0716a9fc..6c5a915cfa75 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -381,7 +381,7 @@ replay:
*/
if (err == -EAGAIN) {
nfnl_err_reset(&err_list);
- ss->abort(skb);
+ ss->abort(oskb);
nfnl_unlock(subsys_id);
kfree_skb(nskb);
goto replay;
@@ -418,9 +418,9 @@ ack:
}
done:
if (success && done)
- ss->commit(skb);
+ ss->commit(oskb);
else
- ss->abort(skb);
+ ss->abort(oskb);
nfnl_err_deliver(&err_list, oskb);
nfnl_unlock(subsys_id);
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 3ea0eacbd970..c18af2f63eef 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -40,6 +40,11 @@ struct nf_acct {
char data[0];
};
+struct nfacct_filter {
+ u32 value;
+ u32 mask;
+};
+
#define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES)
#define NFACCT_OVERQUOTA_BIT 2 /* NFACCT_F_OVERQUOTA */
@@ -181,6 +186,7 @@ static int
nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct nf_acct *cur, *last;
+ const struct nfacct_filter *filter = cb->data;
if (cb->args[2])
return 0;
@@ -197,6 +203,10 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
last = NULL;
}
+
+ if (filter && (cur->flags & filter->mask) != filter->value)
+ continue;
+
if (nfnl_acct_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
@@ -211,6 +221,38 @@ nfnl_acct_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
+static int nfnl_acct_done(struct netlink_callback *cb)
+{
+ kfree(cb->data);
+ return 0;
+}
+
+static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = {
+ [NFACCT_FILTER_MASK] = { .type = NLA_U32 },
+ [NFACCT_FILTER_VALUE] = { .type = NLA_U32 },
+};
+
+static struct nfacct_filter *
+nfacct_filter_alloc(const struct nlattr * const attr)
+{
+ struct nfacct_filter *filter;
+ struct nlattr *tb[NFACCT_FILTER_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL);
+ if (!filter)
+ return ERR_PTR(-ENOMEM);
+
+ filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK]));
+ filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE]));
+
+ return filter;
+}
+
static int
nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
const struct nlmsghdr *nlh, const struct nlattr * const tb[])
@@ -222,7 +264,18 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
.dump = nfnl_acct_dump,
+ .done = nfnl_acct_done,
};
+
+ if (tb[NFACCT_FILTER]) {
+ struct nfacct_filter *filter;
+
+ filter = nfacct_filter_alloc(tb[NFACCT_FILTER]);
+ if (IS_ERR(filter))
+ return PTR_ERR(filter);
+
+ c.data = filter;
+ }
return netlink_dump_start(nfnl, skb, nlh, &c);
}
@@ -314,6 +367,7 @@ static const struct nla_policy nfnl_acct_policy[NFACCT_MAX+1] = {
[NFACCT_PKTS] = { .type = NLA_U64 },
[NFACCT_FLAGS] = { .type = NLA_U32 },
[NFACCT_QUOTA] = { .type = NLA_U64 },
+ [NFACCT_FILTER] = {.type = NLA_NESTED },
};
static const struct nfnl_callback nfnl_acct_cb[NFNL_MSG_ACCT_MAX] = {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index a11c5ff2f720..b1e3a0579416 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -36,7 +36,7 @@
#include <linux/atomic.h>
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
#include "../bridge/br_private.h"
#endif
@@ -429,7 +429,7 @@ __build_packet_message(struct nfnl_log_net *log,
goto nla_put_failure;
if (indev) {
-#ifndef CONFIG_BRIDGE_NETFILTER
+#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
htonl(indev->ifindex)))
goto nla_put_failure;
@@ -460,7 +460,7 @@ __build_packet_message(struct nfnl_log_net *log,
}
if (outdev) {
-#ifndef CONFIG_BRIDGE_NETFILTER
+#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
htonl(outdev->ifindex)))
goto nla_put_failure;
@@ -640,7 +640,7 @@ nfulnl_log_packet(struct net *net,
+ nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#endif
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 108120f216b1..a82077d9f59b 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -36,7 +36,7 @@
#include <linux/atomic.h>
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
#include "../bridge/br_private.h"
#endif
@@ -302,7 +302,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
+ nla_total_size(sizeof(u_int32_t)) /* ifindex */
#endif
@@ -380,7 +380,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
indev = entry->indev;
if (indev) {
-#ifndef CONFIG_BRIDGE_NETFILTER
+#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
goto nla_put_failure;
#else
@@ -410,7 +410,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
}
if (outdev) {
-#ifndef CONFIG_BRIDGE_NETFILTER
+#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
goto nla_put_failure;
#else
@@ -569,7 +569,7 @@ nf_queue_entry_dup(struct nf_queue_entry *e)
return NULL;
}
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
/* When called from bridge netfilter, skb->data must point to MAC header
* before calling skb_gso_segment(). Else, original MAC header is lost
* and segmented skbs will be sent to wrong destination.
@@ -763,7 +763,7 @@ dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
if (entry->outdev)
if (entry->outdev->ifindex == ifindex)
return 1;
-#ifdef CONFIG_BRIDGE_NETFILTER
+#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
if (entry->skb->nf_bridge) {
if (entry->skb->nf_bridge->physindev &&
entry->skb->nf_bridge->physindev->ifindex == ifindex)
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 1840989092ed..7e2683c8a44a 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -101,26 +101,12 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
static void target_compat_from_user(struct xt_target *t, void *in, void *out)
{
-#ifdef CONFIG_COMPAT
- if (t->compat_from_user) {
- int pad;
-
- t->compat_from_user(out, in);
- pad = XT_ALIGN(t->targetsize) - t->targetsize;
- if (pad > 0)
- memset(out + t->targetsize, 0, pad);
- } else
-#endif
- memcpy(out, in, XT_ALIGN(t->targetsize));
-}
+ int pad;
-static inline int nft_compat_target_offset(struct xt_target *target)
-{
-#ifdef CONFIG_COMPAT
- return xt_compat_target_offset(target);
-#else
- return 0;
-#endif
+ memcpy(out, in, t->targetsize);
+ pad = XT_ALIGN(t->targetsize) - t->targetsize;
+ if (pad > 0)
+ memset(out + t->targetsize, 0, pad);
}
static const struct nla_policy nft_rule_compat_policy[NFTA_RULE_COMPAT_MAX + 1] = {
@@ -208,34 +194,6 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
module_put(target->me);
}
-static int
-target_dump_info(struct sk_buff *skb, const struct xt_target *t, const void *in)
-{
- int ret;
-
-#ifdef CONFIG_COMPAT
- if (t->compat_to_user) {
- mm_segment_t old_fs;
- void *out;
-
- out = kmalloc(XT_ALIGN(t->targetsize), GFP_ATOMIC);
- if (out == NULL)
- return -ENOMEM;
-
- /* We want to reuse existing compat_to_user */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- t->compat_to_user(out, in);
- set_fs(old_fs);
- ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), out);
- kfree(out);
- } else
-#endif
- ret = nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(t->targetsize), in);
-
- return ret;
-}
-
static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct xt_target *target = expr->ops->data;
@@ -243,7 +201,7 @@ static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_string(skb, NFTA_TARGET_NAME, target->name) ||
nla_put_be32(skb, NFTA_TARGET_REV, htonl(target->revision)) ||
- target_dump_info(skb, target, info))
+ nla_put(skb, NFTA_TARGET_INFO, XT_ALIGN(target->targetsize), info))
goto nla_put_failure;
return 0;
@@ -341,17 +299,12 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
static void match_compat_from_user(struct xt_match *m, void *in, void *out)
{
-#ifdef CONFIG_COMPAT
- if (m->compat_from_user) {
- int pad;
-
- m->compat_from_user(out, in);
- pad = XT_ALIGN(m->matchsize) - m->matchsize;
- if (pad > 0)
- memset(out + m->matchsize, 0, pad);
- } else
-#endif
- memcpy(out, in, XT_ALIGN(m->matchsize));
+ int pad;
+
+ memcpy(out, in, m->matchsize);
+ pad = XT_ALIGN(m->matchsize) - m->matchsize;
+ if (pad > 0)
+ memset(out + m->matchsize, 0, pad);
}
static int
@@ -404,43 +357,6 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
module_put(match->me);
}
-static int
-match_dump_info(struct sk_buff *skb, const struct xt_match *m, const void *in)
-{
- int ret;
-
-#ifdef CONFIG_COMPAT
- if (m->compat_to_user) {
- mm_segment_t old_fs;
- void *out;
-
- out = kmalloc(XT_ALIGN(m->matchsize), GFP_ATOMIC);
- if (out == NULL)
- return -ENOMEM;
-
- /* We want to reuse existing compat_to_user */
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- m->compat_to_user(out, in);
- set_fs(old_fs);
- ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), out);
- kfree(out);
- } else
-#endif
- ret = nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(m->matchsize), in);
-
- return ret;
-}
-
-static inline int nft_compat_match_offset(struct xt_match *match)
-{
-#ifdef CONFIG_COMPAT
- return xt_compat_match_offset(match);
-#else
- return 0;
-#endif
-}
-
static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
void *info = nft_expr_priv(expr);
@@ -448,7 +364,7 @@ static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
nla_put_be32(skb, NFTA_MATCH_REV, htonl(match->revision)) ||
- match_dump_info(skb, match, info))
+ nla_put(skb, NFTA_MATCH_INFO, XT_ALIGN(match->matchsize), info))
goto nla_put_failure;
return 0;
@@ -643,8 +559,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
return ERR_PTR(-ENOMEM);
nft_match->ops.type = &nft_match_type;
- nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize) +
- nft_compat_match_offset(match));
+ nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
nft_match->ops.eval = nft_match_eval;
nft_match->ops.init = nft_match_init;
nft_match->ops.destroy = nft_match_destroy;
@@ -714,8 +629,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
return ERR_PTR(-ENOMEM);
nft_target->ops.type = &nft_target_type;
- nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize) +
- nft_compat_target_offset(target));
+ nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
nft_target->ops.eval = nft_target_eval;
nft_target->ops.init = nft_target_init;
nft_target->ops.destroy = nft_target_destroy;
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
new file mode 100644
index 000000000000..6637bab00567
--- /dev/null
+++ b/net/netfilter/nft_masq.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014 Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nft_masq.h>
+
+const struct nla_policy nft_masq_policy[NFTA_MASQ_MAX + 1] = {
+ [NFTA_MASQ_FLAGS] = { .type = NLA_U32 },
+};
+EXPORT_SYMBOL_GPL(nft_masq_policy);
+
+int nft_masq_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_masq *priv = nft_expr_priv(expr);
+
+ if (tb[NFTA_MASQ_FLAGS] == NULL)
+ return 0;
+
+ priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS]));
+ if (priv->flags & ~NF_NAT_RANGE_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_masq_init);
+
+int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_masq *priv = nft_expr_priv(expr);
+
+ if (priv->flags == 0)
+ return 0;
+
+ if (nla_put_be32(skb, NFTA_MASQ_FLAGS, htonl(priv->flags)))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(nft_masq_dump);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index 852b178c6ae7..1e7c076ca63a 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -14,6 +14,10 @@
#include <linux/netlink.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/smp.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/tcp_states.h> /* for TCP_TIME_WAIT */
@@ -124,6 +128,43 @@ void nft_meta_get_eval(const struct nft_expr *expr,
dest->data[0] = skb->secmark;
break;
#endif
+ case NFT_META_PKTTYPE:
+ if (skb->pkt_type != PACKET_LOOPBACK) {
+ dest->data[0] = skb->pkt_type;
+ break;
+ }
+
+ switch (pkt->ops->pf) {
+ case NFPROTO_IPV4:
+ if (ipv4_is_multicast(ip_hdr(skb)->daddr))
+ dest->data[0] = PACKET_MULTICAST;
+ else
+ dest->data[0] = PACKET_BROADCAST;
+ break;
+ case NFPROTO_IPV6:
+ if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
+ dest->data[0] = PACKET_MULTICAST;
+ else
+ dest->data[0] = PACKET_BROADCAST;
+ break;
+ default:
+ WARN_ON(1);
+ goto err;
+ }
+ break;
+ case NFT_META_CPU:
+ dest->data[0] = smp_processor_id();
+ break;
+ case NFT_META_IIFGROUP:
+ if (in == NULL)
+ goto err;
+ dest->data[0] = in->group;
+ break;
+ case NFT_META_OIFGROUP:
+ if (out == NULL)
+ goto err;
+ dest->data[0] = out->group;
+ break;
default:
WARN_ON(1);
goto err;
@@ -195,6 +236,10 @@ int nft_meta_get_init(const struct nft_ctx *ctx,
#ifdef CONFIG_NETWORK_SECMARK
case NFT_META_SECMARK:
#endif
+ case NFT_META_PKTTYPE:
+ case NFT_META_CPU:
+ case NFT_META_IIFGROUP:
+ case NFT_META_OIFGROUP:
break;
default:
return -EOPNOTSUPP;
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 79ff58cd36dc..799550b476fb 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -33,6 +33,7 @@ struct nft_nat {
enum nft_registers sreg_proto_max:8;
enum nf_nat_manip_type type:8;
u8 family;
+ u16 flags;
};
static void nft_nat_eval(const struct nft_expr *expr,
@@ -71,6 +72,8 @@ static void nft_nat_eval(const struct nft_expr *expr,
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
+ range.flags |= priv->flags;
+
data[NFT_REG_VERDICT].verdict =
nf_nat_setup_info(ct, &range, priv->type);
}
@@ -82,6 +85,7 @@ static const struct nla_policy nft_nat_policy[NFTA_NAT_MAX + 1] = {
[NFTA_NAT_REG_ADDR_MAX] = { .type = NLA_U32 },
[NFTA_NAT_REG_PROTO_MIN] = { .type = NLA_U32 },
[NFTA_NAT_REG_PROTO_MAX] = { .type = NLA_U32 },
+ [NFTA_NAT_FLAGS] = { .type = NLA_U32 },
};
static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
@@ -149,6 +153,12 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
} else
priv->sreg_proto_max = priv->sreg_proto_min;
+ if (tb[NFTA_NAT_FLAGS]) {
+ priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS]));
+ if (priv->flags & ~NF_NAT_RANGE_MASK)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -183,6 +193,12 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
htonl(priv->sreg_proto_max)))
goto nla_put_failure;
}
+
+ if (priv->flags != 0) {
+ if (nla_put_be32(skb, NFTA_NAT_FLAGS, htonl(priv->flags)))
+ goto nla_put_failure;
+ }
+
return 0;
nla_put_failure:
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index f3448c296446..ec8a456092a7 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -17,6 +17,8 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
[NFTA_REJECT_TYPE] = { .type = NLA_U32 },
@@ -70,5 +72,40 @@ nla_put_failure:
}
EXPORT_SYMBOL_GPL(nft_reject_dump);
+static u8 icmp_code_v4[NFT_REJECT_ICMPX_MAX] = {
+ [NFT_REJECT_ICMPX_NO_ROUTE] = ICMP_NET_UNREACH,
+ [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMP_PORT_UNREACH,
+ [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMP_HOST_UNREACH,
+ [NFT_REJECT_ICMPX_ADMIN_PROHIBITED] = ICMP_PKT_FILTERED,
+};
+
+int nft_reject_icmp_code(u8 code)
+{
+ if (code > NFT_REJECT_ICMPX_MAX)
+ return -EINVAL;
+
+ return icmp_code_v4[code];
+}
+
+EXPORT_SYMBOL_GPL(nft_reject_icmp_code);
+
+
+static u8 icmp_code_v6[NFT_REJECT_ICMPX_MAX] = {
+ [NFT_REJECT_ICMPX_NO_ROUTE] = ICMPV6_NOROUTE,
+ [NFT_REJECT_ICMPX_PORT_UNREACH] = ICMPV6_PORT_UNREACH,
+ [NFT_REJECT_ICMPX_HOST_UNREACH] = ICMPV6_ADDR_UNREACH,
+ [NFT_REJECT_ICMPX_ADMIN_PROHIBITED] = ICMPV6_ADM_PROHIBITED,
+};
+
+int nft_reject_icmpv6_code(u8 code)
+{
+ if (code > NFT_REJECT_ICMPX_MAX)
+ return -EINVAL;
+
+ return icmp_code_v6[code];
+}
+
+EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index b718a52a4654..7b5f9d58680a 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -14,17 +14,103 @@
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nft_reject.h>
+#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/ipv6/nf_reject.h>
static void nft_reject_inet_eval(const struct nft_expr *expr,
struct nft_data data[NFT_REG_MAX + 1],
const struct nft_pktinfo *pkt)
{
+ struct nft_reject *priv = nft_expr_priv(expr);
+ struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
+
switch (pkt->ops->pf) {
case NFPROTO_IPV4:
- return nft_reject_ipv4_eval(expr, data, pkt);
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nf_send_unreach(pkt->skb, priv->icmp_code);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nf_send_reset(pkt->skb, pkt->ops->hooknum);
+ break;
+ case NFT_REJECT_ICMPX_UNREACH:
+ nf_send_unreach(pkt->skb,
+ nft_reject_icmp_code(priv->icmp_code));
+ break;
+ }
+ break;
case NFPROTO_IPV6:
- return nft_reject_ipv6_eval(expr, data, pkt);
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nf_send_unreach6(net, pkt->skb, priv->icmp_code,
+ pkt->ops->hooknum);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+ break;
+ case NFT_REJECT_ICMPX_UNREACH:
+ nf_send_unreach6(net, pkt->skb,
+ nft_reject_icmpv6_code(priv->icmp_code),
+ pkt->ops->hooknum);
+ break;
+ }
+ break;
+ }
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+
+static int nft_reject_inet_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_reject *priv = nft_expr_priv(expr);
+ int icmp_code;
+
+ if (tb[NFTA_REJECT_TYPE] == NULL)
+ return -EINVAL;
+
+ priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ case NFT_REJECT_ICMPX_UNREACH:
+ if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+ return -EINVAL;
+
+ icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+ if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
+ icmp_code > NFT_REJECT_ICMPX_MAX)
+ return -EINVAL;
+
+ priv->icmp_code = icmp_code;
+ break;
+ case NFT_REJECT_TCP_RST:
+ break;
+ default:
+ return -EINVAL;
}
+ return 0;
+}
+
+static int nft_reject_inet_dump(struct sk_buff *skb,
+ const struct nft_expr *expr)
+{
+ const struct nft_reject *priv = nft_expr_priv(expr);
+
+ if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
+ goto nla_put_failure;
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ case NFT_REJECT_ICMPX_UNREACH:
+ if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+ goto nla_put_failure;
+ break;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -1;
}
static struct nft_expr_type nft_reject_inet_type;
@@ -32,8 +118,8 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
.type = &nft_reject_inet_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
.eval = nft_reject_inet_eval,
- .init = nft_reject_init,
- .dump = nft_reject_dump,
+ .init = nft_reject_inet_init,
+ .dump = nft_reject_inet_dump,
};
static struct nft_expr_type nft_reject_inet_type __read_mostly = {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 272ae4d6fdf4..133eb4772f12 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1101,22 +1101,11 @@ static const struct seq_operations xt_match_seq_ops = {
static int xt_match_open(struct inode *inode, struct file *file)
{
- struct seq_file *seq;
struct nf_mttg_trav *trav;
- int ret;
-
- trav = kmalloc(sizeof(*trav), GFP_KERNEL);
- if (trav == NULL)
+ trav = __seq_open_private(file, &xt_match_seq_ops, sizeof(*trav));
+ if (!trav)
return -ENOMEM;
- ret = seq_open(file, &xt_match_seq_ops);
- if (ret < 0) {
- kfree(trav);
- return ret;
- }
-
- seq = file->private_data;
- seq->private = trav;
trav->nfproto = (unsigned long)PDE_DATA(inode);
return 0;
}
@@ -1165,22 +1154,11 @@ static const struct seq_operations xt_target_seq_ops = {
static int xt_target_open(struct inode *inode, struct file *file)
{
- struct seq_file *seq;
struct nf_mttg_trav *trav;
- int ret;
-
- trav = kmalloc(sizeof(*trav), GFP_KERNEL);
- if (trav == NULL)
+ trav = __seq_open_private(file, &xt_target_seq_ops, sizeof(*trav));
+ if (!trav)
return -ENOMEM;
- ret = seq_open(file, &xt_target_seq_ops);
- if (ret < 0) {
- kfree(trav);
- return ret;
- }
-
- seq = file->private_data;
- seq->private = trav;
trav->nfproto = (unsigned long)PDE_DATA(inode);
return 0;
}
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 73b73f687c58..02afaf48a729 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -126,7 +126,7 @@ hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
hash = hash ^ (t->proto & info->proto_mask);
- return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
+ return reciprocal_scale(hash, info->hmodulus) + info->hoffset;
}
static void
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 370adf622cef..604df6fae6fc 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -136,7 +136,7 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
cfg.est.interval = info->interval;
cfg.est.ewma_log = info->ewma_log;
- ret = gen_new_estimator(&est->bstats, &est->rstats,
+ ret = gen_new_estimator(&est->bstats, NULL, &est->rstats,
&est->lock, &cfg.opt);
if (ret < 0)
goto err2;
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index f4af1bfafb1c..96fa26b20b67 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -55,7 +55,8 @@ xt_cluster_hash(const struct nf_conn *ct,
WARN_ON(1);
break;
}
- return (((u64)hash * info->total_nodes) >> 32);
+
+ return reciprocal_scale(hash, info->total_nodes);
}
static inline bool
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 1e634615ab9d..d4bec261e74e 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -120,7 +120,7 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par)
* accounting is enabled, so complain in the hope that someone notices.
*/
if (!nf_ct_acct_enabled(par->net)) {
- pr_warning("Forcing CT accounting to be enabled\n");
+ pr_warn("Forcing CT accounting to be enabled\n");
nf_ct_set_acct(par->net, true);
}
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 47dc6836830a..05fbc2a0be46 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -135,7 +135,7 @@ hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
* give results between [0 and cfg.size-1] and same hash distribution,
* but using a multiply, less expensive than a divide
*/
- return ((u64)hash * ht->cfg.size) >> 32;
+ return reciprocal_scale(hash, ht->cfg.size);
}
static struct dsthash_ent *
@@ -943,7 +943,7 @@ static int __init hashlimit_mt_init(void)
sizeof(struct dsthash_ent), 0, 0,
NULL);
if (!hashlimit_cachep) {
- pr_warning("unable to create slab cache\n");
+ pr_warn("unable to create slab cache\n");
goto err2;
}
return 0;
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index d7ca16b8b8df..f440f57a452f 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -13,6 +13,7 @@
#include <linux/netfilter_bridge.h>
#include <linux/netfilter/xt_physdev.h>
#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/br_netfilter.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
@@ -87,6 +88,8 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
{
const struct xt_physdev_info *info = par->matchinfo;
+ br_netfilter_enable();
+
if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
info->bitmask & ~XT_PHYSDEV_OP_MASK)
return -EINVAL;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 80c2e2d603e0..5732cd64acc0 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -84,13 +84,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warning("Cannot find set identified by id %u to match\n",
- info->match_set.index);
+ pr_warn("Cannot find set identified by id %u to match\n",
+ info->match_set.index);
return -ENOENT;
}
if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) {
- pr_warning("Protocol error: set match dimension "
- "is over the limit!\n");
+ pr_warn("Protocol error: set match dimension is over the limit!\n");
ip_set_nfnl_put(par->net, info->match_set.index);
return -ERANGE;
}
@@ -134,13 +133,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par)
index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warning("Cannot find set identified by id %u to match\n",
- info->match_set.index);
+ pr_warn("Cannot find set identified by id %u to match\n",
+ info->match_set.index);
return -ENOENT;
}
if (info->match_set.dim > IPSET_DIM_MAX) {
- pr_warning("Protocol error: set match dimension "
- "is over the limit!\n");
+ pr_warn("Protocol error: set match dimension is over the limit!\n");
ip_set_nfnl_put(par->net, info->match_set.index);
return -ERANGE;
}
@@ -230,8 +228,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
if (info->add_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warning("Cannot find add_set index %u as target\n",
- info->add_set.index);
+ pr_warn("Cannot find add_set index %u as target\n",
+ info->add_set.index);
return -ENOENT;
}
}
@@ -239,8 +237,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
if (info->del_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warning("Cannot find del_set index %u as target\n",
- info->del_set.index);
+ pr_warn("Cannot find del_set index %u as target\n",
+ info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
return -ENOENT;
@@ -248,8 +246,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
}
if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 ||
info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) {
- pr_warning("Protocol error: SET target dimension "
- "is over the limit!\n");
+ pr_warn("Protocol error: SET target dimension is over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
@@ -303,8 +300,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
if (info->add_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warning("Cannot find add_set index %u as target\n",
- info->add_set.index);
+ pr_warn("Cannot find add_set index %u as target\n",
+ info->add_set.index);
return -ENOENT;
}
}
@@ -312,8 +309,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
if (info->del_set.index != IPSET_INVALID_ID) {
index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
if (index == IPSET_INVALID_ID) {
- pr_warning("Cannot find del_set index %u as target\n",
- info->del_set.index);
+ pr_warn("Cannot find del_set index %u as target\n",
+ info->del_set.index);
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
return -ENOENT;
@@ -321,8 +318,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
}
if (info->add_set.dim > IPSET_DIM_MAX ||
info->del_set.dim > IPSET_DIM_MAX) {
- pr_warning("Protocol error: SET target dimension "
- "is over the limit!\n");
+ pr_warn("Protocol error: SET target dimension is over the limit!\n");
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_nfnl_put(par->net, info->add_set.index);
if (info->del_set.index != IPSET_INVALID_ID)
@@ -370,6 +366,140 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
#define set_target_v2_checkentry set_target_v1_checkentry
#define set_target_v2_destroy set_target_v1_destroy
+/* Revision 3 target */
+
+static unsigned int
+set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_set_info_target_v3 *info = par->targinfo;
+ ADT_OPT(add_opt, par->family, info->add_set.dim,
+ info->add_set.flags, info->flags, info->timeout);
+ ADT_OPT(del_opt, par->family, info->del_set.dim,
+ info->del_set.flags, 0, UINT_MAX);
+ ADT_OPT(map_opt, par->family, info->map_set.dim,
+ info->map_set.flags, 0, UINT_MAX);
+
+ int ret;
+
+ /* Normalize to fit into jiffies */
+ if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
+ add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC)
+ add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC;
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_add(info->add_set.index, skb, par, &add_opt);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_del(info->del_set.index, skb, par, &del_opt);
+ if (info->map_set.index != IPSET_INVALID_ID) {
+ map_opt.cmdflags |= info->flags & (IPSET_FLAG_MAP_SKBMARK |
+ IPSET_FLAG_MAP_SKBPRIO |
+ IPSET_FLAG_MAP_SKBQUEUE);
+ ret = match_set(info->map_set.index, skb, par, &map_opt,
+ info->map_set.flags & IPSET_INV_MATCH);
+ if (!ret)
+ return XT_CONTINUE;
+ if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBMARK)
+ skb->mark = (skb->mark & ~(map_opt.ext.skbmarkmask))
+ ^ (map_opt.ext.skbmark);
+ if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBPRIO)
+ skb->priority = map_opt.ext.skbprio;
+ if ((map_opt.cmdflags & IPSET_FLAG_MAP_SKBQUEUE) &&
+ skb->dev &&
+ skb->dev->real_num_tx_queues > map_opt.ext.skbqueue)
+ skb_set_queue_mapping(skb, map_opt.ext.skbqueue);
+ }
+ return XT_CONTINUE;
+}
+
+
+static int
+set_target_v3_checkentry(const struct xt_tgchk_param *par)
+{
+ const struct xt_set_info_target_v3 *info = par->targinfo;
+ ip_set_id_t index;
+
+ if (info->add_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(par->net,
+ info->add_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warn("Cannot find add_set index %u as target\n",
+ info->add_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->del_set.index != IPSET_INVALID_ID) {
+ index = ip_set_nfnl_get_byindex(par->net,
+ info->del_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warn("Cannot find del_set index %u as target\n",
+ info->del_set.index);
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net,
+ info->add_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->map_set.index != IPSET_INVALID_ID) {
+ if (strncmp(par->table, "mangle", 7)) {
+ pr_warn("--map-set only usable from mangle table\n");
+ return -EINVAL;
+ }
+ if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
+ (info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
+ !(par->hook_mask & (1 << NF_INET_FORWARD |
+ 1 << NF_INET_LOCAL_OUT |
+ 1 << NF_INET_POST_ROUTING))) {
+ pr_warn("mapping of prio or/and queue is allowed only"
+ "from OUTPUT/FORWARD/POSTROUTING chains\n");
+ return -EINVAL;
+ }
+ index = ip_set_nfnl_get_byindex(par->net,
+ info->map_set.index);
+ if (index == IPSET_INVALID_ID) {
+ pr_warn("Cannot find map_set index %u as target\n",
+ info->map_set.index);
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net,
+ info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net,
+ info->del_set.index);
+ return -ENOENT;
+ }
+ }
+
+ if (info->add_set.dim > IPSET_DIM_MAX ||
+ info->del_set.dim > IPSET_DIM_MAX ||
+ info->map_set.dim > IPSET_DIM_MAX) {
+ pr_warn("Protocol error: SET target dimension "
+ "is over the limit!\n");
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net, info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net, info->del_set.index);
+ if (info->map_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net, info->map_set.index);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static void
+set_target_v3_destroy(const struct xt_tgdtor_param *par)
+{
+ const struct xt_set_info_target_v3 *info = par->targinfo;
+
+ if (info->add_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net, info->add_set.index);
+ if (info->del_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net, info->del_set.index);
+ if (info->map_set.index != IPSET_INVALID_ID)
+ ip_set_nfnl_put(par->net, info->map_set.index);
+}
+
+
static struct xt_match set_matches[] __read_mostly = {
{
.name = "set",
@@ -497,6 +627,27 @@ static struct xt_target set_targets[] __read_mostly = {
.destroy = set_target_v2_destroy,
.me = THIS_MODULE
},
+ /* --map-set support */
+ {
+ .name = "SET",
+ .revision = 3,
+ .family = NFPROTO_IPV4,
+ .target = set_target_v3,
+ .targetsize = sizeof(struct xt_set_info_target_v3),
+ .checkentry = set_target_v3_checkentry,
+ .destroy = set_target_v3_destroy,
+ .me = THIS_MODULE
+ },
+ {
+ .name = "SET",
+ .revision = 3,
+ .family = NFPROTO_IPV6,
+ .target = set_target_v3,
+ .targetsize = sizeof(struct xt_set_info_target_v3),
+ .checkentry = set_target_v3_checkentry,
+ .destroy = set_target_v3_destroy,
+ .me = THIS_MODULE
+ },
};
static int __init xt_set_init(void)
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
index d3c48b14ab94..5699adb97652 100644
--- a/net/netfilter/xt_string.c
+++ b/net/netfilter/xt_string.c
@@ -29,7 +29,6 @@ string_mt(const struct sk_buff *skb, struct xt_action_param *par)
struct ts_state state;
bool invert;
- memset(&state, 0, sizeof(struct ts_state));
invert = conf->u.v1.flags & XT_STRING_FLAG_INVERT;
return (skb_find_text((struct sk_buff *)skb, conf->from_offset,
diff --git a/net/netlabel/netlabel_user.c b/net/netlabel/netlabel_user.c
index 1e779bb7fa43..adf8b7900da2 100644
--- a/net/netlabel/netlabel_user.c
+++ b/net/netlabel/netlabel_user.c
@@ -71,11 +71,7 @@ int __init netlbl_netlink_init(void)
if (ret_val != 0)
return ret_val;
- ret_val = netlbl_unlabel_genl_init();
- if (ret_val != 0)
- return ret_val;
-
- return 0;
+ return netlbl_unlabel_genl_init();
}
/*
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index e1638dab076d..b60aa35c074f 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -33,6 +33,8 @@
#define DIGITAL_ATR_REQ_MAX_SIZE 64
#define DIGITAL_LR_BITS_PAYLOAD_SIZE_254B 0x30
+#define DIGITAL_FSL_BITS_PAYLOAD_SIZE_254B \
+ (DIGITAL_LR_BITS_PAYLOAD_SIZE_254B >> 4)
#define DIGITAL_GB_BIT 0x02
#define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0)
@@ -127,6 +129,98 @@ static int digital_skb_pull_dep_sod(struct nfc_digital_dev *ddev,
return 0;
}
+static void digital_in_recv_psl_res(struct nfc_digital_dev *ddev, void *arg,
+ struct sk_buff *resp)
+{
+ struct nfc_target *target = arg;
+ struct digital_psl_res *psl_res;
+ int rc;
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+ resp = NULL;
+ goto exit;
+ }
+
+ rc = ddev->skb_check_crc(resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.6");
+ goto exit;
+ }
+
+ rc = digital_skb_pull_dep_sod(ddev, resp);
+ if (rc) {
+ PROTOCOL_ERR("14.4.1.2");
+ goto exit;
+ }
+
+ psl_res = (struct digital_psl_res *)resp->data;
+
+ if ((resp->len != sizeof(*psl_res)) ||
+ (psl_res->dir != DIGITAL_NFC_DEP_FRAME_DIR_IN) ||
+ (psl_res->cmd != DIGITAL_CMD_PSL_RES)) {
+ rc = -EIO;
+ goto exit;
+ }
+
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH,
+ NFC_DIGITAL_RF_TECH_424F);
+ if (rc)
+ goto exit;
+
+ rc = digital_in_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING,
+ NFC_DIGITAL_FRAMING_NFCF_NFC_DEP);
+ if (rc)
+ goto exit;
+
+ if (!DIGITAL_DRV_CAPS_IN_CRC(ddev) &&
+ (ddev->curr_rf_tech == NFC_DIGITAL_RF_TECH_106A)) {
+ ddev->skb_add_crc = digital_skb_add_crc_f;
+ ddev->skb_check_crc = digital_skb_check_crc_f;
+ }
+
+ ddev->curr_rf_tech = NFC_DIGITAL_RF_TECH_424F;
+
+ nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE,
+ NFC_RF_INITIATOR);
+
+ ddev->curr_nfc_dep_pni = 0;
+
+exit:
+ dev_kfree_skb(resp);
+
+ if (rc)
+ ddev->curr_protocol = 0;
+}
+
+static int digital_in_send_psl_req(struct nfc_digital_dev *ddev,
+ struct nfc_target *target)
+{
+ struct sk_buff *skb;
+ struct digital_psl_req *psl_req;
+
+ skb = digital_skb_alloc(ddev, sizeof(*psl_req));
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(*psl_req));
+
+ psl_req = (struct digital_psl_req *)skb->data;
+
+ psl_req->dir = DIGITAL_NFC_DEP_FRAME_DIR_OUT;
+ psl_req->cmd = DIGITAL_CMD_PSL_REQ;
+ psl_req->did = 0;
+ psl_req->brs = (0x2 << 3) | 0x2; /* 424F both directions */
+ psl_req->fsl = DIGITAL_FSL_BITS_PAYLOAD_SIZE_254B;
+
+ digital_skb_push_dep_sod(ddev, skb);
+
+ ddev->skb_add_crc(skb);
+
+ return digital_in_send_cmd(ddev, skb, 500, digital_in_recv_psl_res,
+ target);
+}
+
static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg,
struct sk_buff *resp)
{
@@ -166,6 +260,13 @@ static void digital_in_recv_atr_res(struct nfc_digital_dev *ddev, void *arg,
if (rc)
goto exit;
+ if ((ddev->protocols & NFC_PROTO_FELICA_MASK) &&
+ (ddev->curr_rf_tech != NFC_DIGITAL_RF_TECH_424F)) {
+ rc = digital_in_send_psl_req(ddev, target);
+ if (!rc)
+ goto exit;
+ }
+
rc = nfc_dep_link_is_up(ddev->nfc_dev, target->idx, NFC_COMM_ACTIVE,
NFC_RF_INITIATOR);
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 2b400e1a8695..90b16cb40058 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -231,6 +231,14 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
cmd.num_disc_configs++;
}
+ if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
+ (protocols & NFC_PROTO_ISO15693_MASK)) {
+ cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
+ NCI_NFC_V_PASSIVE_POLL_MODE;
+ cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
+ cmd.num_disc_configs++;
+ }
+
nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
(1 + (cmd.num_disc_configs * sizeof(struct disc_config))),
&cmd);
@@ -751,10 +759,6 @@ int nci_register_device(struct nci_dev *ndev)
struct device *dev = &ndev->nfc_dev->dev;
char name[32];
- rc = nfc_register_device(ndev->nfc_dev);
- if (rc)
- goto exit;
-
ndev->flags = 0;
INIT_WORK(&ndev->cmd_work, nci_cmd_work);
@@ -762,7 +766,7 @@ int nci_register_device(struct nci_dev *ndev)
ndev->cmd_wq = create_singlethread_workqueue(name);
if (!ndev->cmd_wq) {
rc = -ENOMEM;
- goto unreg_exit;
+ goto exit;
}
INIT_WORK(&ndev->rx_work, nci_rx_work);
@@ -792,6 +796,10 @@ int nci_register_device(struct nci_dev *ndev)
mutex_init(&ndev->req_lock);
+ rc = nfc_register_device(ndev->nfc_dev);
+ if (rc)
+ goto destroy_rx_wq_exit;
+
goto exit;
destroy_rx_wq_exit:
@@ -800,9 +808,6 @@ destroy_rx_wq_exit:
destroy_cmd_wq_exit:
destroy_workqueue(ndev->cmd_wq);
-unreg_exit:
- nfc_unregister_device(ndev->nfc_dev);
-
exit:
return rc;
}
diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
index 6c3aef852876..427ef2c7ab68 100644
--- a/net/nfc/nci/data.c
+++ b/net/nfc/nci/data.c
@@ -241,9 +241,12 @@ void nci_rx_data_packet(struct nci_dev *ndev, struct sk_buff *skb)
/* strip the nci data header */
skb_pull(skb, NCI_DATA_HDR_SIZE);
- if (ndev->target_active_prot == NFC_PROTO_MIFARE) {
+ if (ndev->target_active_prot == NFC_PROTO_MIFARE ||
+ ndev->target_active_prot == NFC_PROTO_JEWEL ||
+ ndev->target_active_prot == NFC_PROTO_FELICA ||
+ ndev->target_active_prot == NFC_PROTO_ISO15693) {
/* frame I/F => remove the status byte */
- pr_debug("NFC_PROTO_MIFARE => remove the status byte\n");
+ pr_debug("frame I/F => remove the status byte\n");
skb_trim(skb, (skb->len - 1));
}
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index df91bb95b12a..205b35f666db 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -2,6 +2,7 @@
* The NFC Controller Interface is the communication protocol between an
* NFC Controller (NFCC) and a Device Host (DH).
*
+ * Copyright (C) 2014 Marvell International Ltd.
* Copyright (C) 2011 Texas Instruments, Inc.
*
* Written by Ilan Elias <ilane@ti.com>
@@ -155,6 +156,24 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
return data;
}
+static __u8 *nci_extract_rf_params_nfcv_passive_poll(struct nci_dev *ndev,
+ struct rf_tech_specific_params_nfcv_poll *nfcv_poll,
+ __u8 *data)
+{
+ ++data;
+ nfcv_poll->dsfid = *data++;
+ memcpy(nfcv_poll->uid, data, NFC_ISO15693_UID_MAXSIZE);
+ data += NFC_ISO15693_UID_MAXSIZE;
+ return data;
+}
+
+__u32 nci_get_prop_rf_protocol(struct nci_dev *ndev, __u8 rf_protocol)
+{
+ if (ndev->ops->get_rfprotocol)
+ return ndev->ops->get_rfprotocol(ndev, rf_protocol);
+ return 0;
+}
+
static int nci_add_new_protocol(struct nci_dev *ndev,
struct nfc_target *target,
__u8 rf_protocol,
@@ -164,6 +183,7 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
struct rf_tech_specific_params_nfca_poll *nfca_poll;
struct rf_tech_specific_params_nfcb_poll *nfcb_poll;
struct rf_tech_specific_params_nfcf_poll *nfcf_poll;
+ struct rf_tech_specific_params_nfcv_poll *nfcv_poll;
__u32 protocol;
if (rf_protocol == NCI_RF_PROTOCOL_T1T)
@@ -179,8 +199,10 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
protocol = NFC_PROTO_FELICA_MASK;
else if (rf_protocol == NCI_RF_PROTOCOL_NFC_DEP)
protocol = NFC_PROTO_NFC_DEP_MASK;
+ else if (rf_protocol == NCI_RF_PROTOCOL_T5T)
+ protocol = NFC_PROTO_ISO15693_MASK;
else
- protocol = 0;
+ protocol = nci_get_prop_rf_protocol(ndev, rf_protocol);
if (!(protocol & ndev->poll_prots)) {
pr_err("the target found does not have the desired protocol\n");
@@ -213,6 +235,12 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
memcpy(target->sensf_res, nfcf_poll->sensf_res,
target->sensf_res_len);
}
+ } else if (rf_tech_and_mode == NCI_NFC_V_PASSIVE_POLL_MODE) {
+ nfcv_poll = (struct rf_tech_specific_params_nfcv_poll *)params;
+
+ target->is_iso15693 = 1;
+ target->iso15693_dsfid = nfcv_poll->dsfid;
+ memcpy(target->iso15693_uid, nfcv_poll->uid, NFC_ISO15693_UID_MAXSIZE);
} else {
pr_err("unsupported rf_tech_and_mode 0x%x\n", rf_tech_and_mode);
return -EPROTO;
@@ -305,6 +333,11 @@ static void nci_rf_discover_ntf_packet(struct nci_dev *ndev,
&(ntf.rf_tech_specific_params.nfcf_poll), data);
break;
+ case NCI_NFC_V_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfcv_passive_poll(ndev,
+ &(ntf.rf_tech_specific_params.nfcv_poll), data);
+ break;
+
default:
pr_err("unsupported rf_tech_and_mode 0x%x\n",
ntf.rf_tech_and_mode);
@@ -455,6 +488,11 @@ static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
&(ntf.rf_tech_specific_params.nfcf_poll), data);
break;
+ case NCI_NFC_V_PASSIVE_POLL_MODE:
+ data = nci_extract_rf_params_nfcv_passive_poll(ndev,
+ &(ntf.rf_tech_specific_params.nfcv_poll), data);
+ break;
+
default:
pr_err("unsupported activation_rf_tech_and_mode 0x%x\n",
ntf.activation_rf_tech_and_mode);
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 6ecf491ad509..ba3bb8203b99 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -54,3 +54,14 @@ config OPENVSWITCH_VXLAN
Say N to exclude this support and reduce the binary size.
If unsure, say Y.
+
+config OPENVSWITCH_GENEVE
+ bool "Open vSwitch Geneve tunneling support"
+ depends on INET
+ depends on OPENVSWITCH
+ depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m)
+ default y
+ ---help---
+ If you say Y here, then the Open vSwitch will be able create geneve vport.
+
+ Say N to exclude this support and reduce the binary size.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 3591cb5dae91..9a33a273c375 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -15,6 +15,10 @@ openvswitch-y := \
vport-internal_dev.o \
vport-netdev.o
+ifneq ($(CONFIG_OPENVSWITCH_GENEVE),)
+openvswitch-y += vport-geneve.o
+endif
+
ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
openvswitch-y += vport-vxlan.o
endif
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 5231652a95d9..006886dbee36 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -35,11 +35,78 @@
#include <net/sctp/checksum.h>
#include "datapath.h"
+#include "flow.h"
#include "vport.h"
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key,
const struct nlattr *attr, int len);
+struct deferred_action {
+ struct sk_buff *skb;
+ const struct nlattr *actions;
+
+ /* Store pkt_key clone when creating deferred action. */
+ struct sw_flow_key pkt_key;
+};
+
+#define DEFERRED_ACTION_FIFO_SIZE 10
+struct action_fifo {
+ int head;
+ int tail;
+ /* Deferred action fifo queue storage. */
+ struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
+};
+
+static struct action_fifo __percpu *action_fifos;
+static DEFINE_PER_CPU(int, exec_actions_level);
+
+static void action_fifo_init(struct action_fifo *fifo)
+{
+ fifo->head = 0;
+ fifo->tail = 0;
+}
+
+static bool action_fifo_is_empty(struct action_fifo *fifo)
+{
+ return (fifo->head == fifo->tail);
+}
+
+static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
+{
+ if (action_fifo_is_empty(fifo))
+ return NULL;
+
+ return &fifo->fifo[fifo->tail++];
+}
+
+static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
+{
+ if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
+ return NULL;
+
+ return &fifo->fifo[fifo->head++];
+}
+
+/* Return true if fifo is not full */
+static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct nlattr *attr)
+{
+ struct action_fifo *fifo;
+ struct deferred_action *da;
+
+ fifo = this_cpu_ptr(action_fifos);
+ da = action_fifo_put(fifo);
+ if (da) {
+ da->skb = skb;
+ da->actions = attr;
+ da->pkt_key = *key;
+ }
+
+ return da;
+}
+
static int make_writable(struct sk_buff *skb, int write_len)
{
if (!pskb_may_pull(skb, write_len))
@@ -410,16 +477,14 @@ static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
}
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
- const struct nlattr *attr)
+ struct sw_flow_key *key, const struct nlattr *attr)
{
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
- BUG_ON(!OVS_CB(skb)->pkt_key);
-
upcall.cmd = OVS_PACKET_CMD_ACTION;
- upcall.key = OVS_CB(skb)->pkt_key;
+ upcall.key = key;
upcall.userdata = NULL;
upcall.portid = 0;
@@ -445,11 +510,10 @@ static bool last_action(const struct nlattr *a, int rem)
}
static int sample(struct datapath *dp, struct sk_buff *skb,
- const struct nlattr *attr)
+ struct sw_flow_key *key, const struct nlattr *attr)
{
const struct nlattr *acts_list = NULL;
const struct nlattr *a;
- struct sk_buff *sample_skb;
int rem;
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
@@ -469,31 +533,47 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
rem = nla_len(acts_list);
a = nla_data(acts_list);
- /* Actions list is either empty or only contains a single user-space
- * action, the latter being a special case as it is the only known
- * usage of the sample action.
- * In these special cases don't clone the skb as there are no
- * side-effects in the nested actions.
- * Otherwise, clone in case the nested actions have side effects.
+ /* Actions list is empty, do nothing */
+ if (unlikely(!rem))
+ return 0;
+
+ /* The only known usage of sample action is having a single user-space
+ * action. Treat this usage as a special case.
+ * The output_userspace() should clone the skb to be sent to the
+ * user space. This skb will be consumed by its caller.
*/
- if (likely(rem == 0 || (nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
- last_action(a, rem)))) {
- sample_skb = skb;
- skb_get(skb);
- } else {
- sample_skb = skb_clone(skb, GFP_ATOMIC);
- if (!sample_skb) /* Skip sample action when out of memory. */
- return 0;
+ if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
+ last_action(a, rem)))
+ return output_userspace(dp, skb, key, a);
+
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb)
+ /* Skip the sample action when out of memory. */
+ return 0;
+
+ if (!add_deferred_actions(skb, key, a)) {
+ if (net_ratelimit())
+ pr_warn("%s: deferred actions limit reached, dropping sample action\n",
+ ovs_dp_name(dp));
+
+ kfree_skb(skb);
}
+ return 0;
+}
- /* Note that do_execute_actions() never consumes skb.
- * In the case where skb has been cloned above it is the clone that
- * is consumed. Otherwise the skb_get(skb) call prevents
- * consumption by do_execute_actions(). Thus, it is safe to simply
- * return the error code and let the caller (also
- * do_execute_actions()) free skb on error.
- */
- return do_execute_actions(dp, sample_skb, a, rem);
+static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct nlattr *attr)
+{
+ struct ovs_action_hash *hash_act = nla_data(attr);
+ u32 hash = 0;
+
+ /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
+ hash = skb_get_hash(skb);
+ hash = jhash_1word(hash, hash_act->hash_basis);
+ if (!hash)
+ hash = 0x1;
+
+ key->ovs_flow_hash = hash;
}
static int execute_set_action(struct sk_buff *skb,
@@ -510,8 +590,8 @@ static int execute_set_action(struct sk_buff *skb,
skb->mark = nla_get_u32(nested_attr);
break;
- case OVS_KEY_ATTR_IPV4_TUNNEL:
- OVS_CB(skb)->tun_key = nla_data(nested_attr);
+ case OVS_KEY_ATTR_TUNNEL_INFO:
+ OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
break;
case OVS_KEY_ATTR_ETHERNET:
@@ -542,8 +622,47 @@ static int execute_set_action(struct sk_buff *skb,
return err;
}
+static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct nlattr *a, int rem)
+{
+ struct deferred_action *da;
+ int err;
+
+ err = ovs_flow_key_update(skb, key);
+ if (err)
+ return err;
+
+ if (!last_action(a, rem)) {
+ /* Recirc action is the not the last action
+ * of the action list, need to clone the skb.
+ */
+ skb = skb_clone(skb, GFP_ATOMIC);
+
+ /* Skip the recirc action when out of memory, but
+ * continue on with the rest of the action list.
+ */
+ if (!skb)
+ return 0;
+ }
+
+ da = add_deferred_actions(skb, key, NULL);
+ if (da) {
+ da->pkt_key.recirc_id = nla_get_u32(a);
+ } else {
+ kfree_skb(skb);
+
+ if (net_ratelimit())
+ pr_warn("%s: deferred action limit reached, drop recirc action\n",
+ ovs_dp_name(dp));
+ }
+
+ return 0;
+}
+
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key,
const struct nlattr *attr, int len)
{
/* Every output action needs a separate clone of 'skb', but the common
@@ -569,7 +688,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
break;
case OVS_ACTION_ATTR_USERSPACE:
- output_userspace(dp, skb, a);
+ output_userspace(dp, skb, key, a);
+ break;
+
+ case OVS_ACTION_ATTR_HASH:
+ execute_hash(skb, key, a);
break;
case OVS_ACTION_ATTR_PUSH_VLAN:
@@ -582,12 +705,23 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
err = pop_vlan(skb);
break;
+ case OVS_ACTION_ATTR_RECIRC:
+ err = execute_recirc(dp, skb, key, a, rem);
+ if (last_action(a, rem)) {
+ /* If this is the last action, the skb has
+ * been consumed or freed.
+ * Return immediately.
+ */
+ return err;
+ }
+ break;
+
case OVS_ACTION_ATTR_SET:
err = execute_set_action(skb, nla_data(a));
break;
case OVS_ACTION_ATTR_SAMPLE:
- err = sample(dp, skb, a);
+ err = sample(dp, skb, key, a);
if (unlikely(err)) /* skb already freed. */
return err;
break;
@@ -607,11 +741,64 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
return 0;
}
+static void process_deferred_actions(struct datapath *dp)
+{
+ struct action_fifo *fifo = this_cpu_ptr(action_fifos);
+
+ /* Do not touch the FIFO in case there is no deferred actions. */
+ if (action_fifo_is_empty(fifo))
+ return;
+
+ /* Finishing executing all deferred actions. */
+ do {
+ struct deferred_action *da = action_fifo_get(fifo);
+ struct sk_buff *skb = da->skb;
+ struct sw_flow_key *key = &da->pkt_key;
+ const struct nlattr *actions = da->actions;
+
+ if (actions)
+ do_execute_actions(dp, skb, key, actions,
+ nla_len(actions));
+ else
+ ovs_dp_process_packet(skb, key);
+ } while (!action_fifo_is_empty(fifo));
+
+ /* Reset FIFO for the next packet. */
+ action_fifo_init(fifo);
+}
+
/* Execute a list of actions against 'skb'. */
-int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key)
{
- struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+ int level = this_cpu_read(exec_actions_level);
+ struct sw_flow_actions *acts;
+ int err;
+
+ acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+
+ this_cpu_inc(exec_actions_level);
+ OVS_CB(skb)->egress_tun_info = NULL;
+ err = do_execute_actions(dp, skb, key,
+ acts->actions, acts->actions_len);
- OVS_CB(skb)->tun_key = NULL;
- return do_execute_actions(dp, skb, acts->actions, acts->actions_len);
+ if (!level)
+ process_deferred_actions(dp);
+
+ this_cpu_dec(exec_actions_level);
+ return err;
+}
+
+int action_fifos_init(void)
+{
+ action_fifos = alloc_percpu(struct action_fifo);
+ if (!action_fifos)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void action_fifos_exit(void)
+{
+ free_percpu(action_fifos);
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 64dc864a417f..2e31d9e7f4dc 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -157,7 +157,7 @@ static struct datapath *get_dp(struct net *net, int dp_ifindex)
}
/* Must be called with rcu_read_lock or ovs_mutex. */
-static const char *ovs_dp_name(const struct datapath *dp)
+const char *ovs_dp_name(const struct datapath *dp)
{
struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
return vport->ops->get_name(vport);
@@ -238,32 +238,25 @@ void ovs_dp_detach_port(struct vport *p)
}
/* Must be called with rcu_read_lock. */
-void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
+void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
{
+ const struct vport *p = OVS_CB(skb)->input_vport;
struct datapath *dp = p->dp;
struct sw_flow *flow;
struct dp_stats_percpu *stats;
- struct sw_flow_key key;
u64 *stats_counter;
u32 n_mask_hit;
- int error;
stats = this_cpu_ptr(dp->stats_percpu);
- /* Extract flow from 'skb' into 'key'. */
- error = ovs_flow_extract(skb, p->port_no, &key);
- if (unlikely(error)) {
- kfree_skb(skb);
- return;
- }
-
/* Look up flow. */
- flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit);
+ flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
+ int error;
upcall.cmd = OVS_PACKET_CMD_MISS;
- upcall.key = &key;
+ upcall.key = key;
upcall.userdata = NULL;
upcall.portid = ovs_vport_find_upcall_portid(p, skb);
error = ovs_dp_upcall(dp, skb, &upcall);
@@ -276,10 +269,9 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
}
OVS_CB(skb)->flow = flow;
- OVS_CB(skb)->pkt_key = &key;
- ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
- ovs_execute_actions(dp, skb);
+ ovs_flow_stats_update(OVS_CB(skb)->flow, key->tp.flags, skb);
+ ovs_execute_actions(dp, skb, key);
stats_counter = &stats->n_hit;
out:
@@ -377,6 +369,8 @@ static size_t key_attr_size(void)
+ nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
+ nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
+ + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */
+ + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */
+ nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
+ nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
+ nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
@@ -516,6 +510,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct sw_flow *flow;
struct datapath *dp;
struct ethhdr *eth;
+ struct vport *input_vport;
int len;
int err;
@@ -550,13 +545,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(flow))
goto err_kfree_skb;
- err = ovs_flow_extract(packet, -1, &flow->key);
+ err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
+ &flow->key);
if (err)
goto err_flow_free;
- err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
- if (err)
- goto err_flow_free;
acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
err = PTR_ERR(acts);
if (IS_ERR(acts))
@@ -564,12 +557,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
&flow->key, 0, &acts);
- rcu_assign_pointer(flow->sf_acts, acts);
if (err)
goto err_flow_free;
+ rcu_assign_pointer(flow->sf_acts, acts);
+
+ OVS_CB(packet)->egress_tun_info = NULL;
OVS_CB(packet)->flow = flow;
- OVS_CB(packet)->pkt_key = &flow->key;
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
@@ -579,8 +573,17 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
if (!dp)
goto err_unlock;
+ input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
+ if (!input_vport)
+ input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
+
+ if (!input_vport)
+ goto err_unlock;
+
+ OVS_CB(packet)->input_vport = input_vport;
+
local_bh_disable();
- err = ovs_execute_actions(dp, packet);
+ err = ovs_execute_actions(dp, packet, &flow->key);
local_bh_enable();
rcu_read_unlock();
@@ -933,11 +936,34 @@ error:
return error;
}
+static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
+ const struct sw_flow_key *key,
+ const struct sw_flow_mask *mask)
+{
+ struct sw_flow_actions *acts;
+ struct sw_flow_key masked_key;
+ int error;
+
+ acts = ovs_nla_alloc_flow_actions(nla_len(a));
+ if (IS_ERR(acts))
+ return acts;
+
+ ovs_flow_mask_key(&masked_key, key, mask);
+ error = ovs_nla_copy_actions(a, &masked_key, 0, &acts);
+ if (error) {
+ OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
+ kfree(acts);
+ return ERR_PTR(error);
+ }
+
+ return acts;
+}
+
static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
- struct sw_flow_key key, masked_key;
+ struct sw_flow_key key;
struct sw_flow *flow;
struct sw_flow_mask mask;
struct sk_buff *reply = NULL;
@@ -959,17 +985,10 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
/* Validate actions. */
if (a[OVS_FLOW_ATTR_ACTIONS]) {
- acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
- error = PTR_ERR(acts);
- if (IS_ERR(acts))
+ acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask);
+ if (IS_ERR(acts)) {
+ error = PTR_ERR(acts);
goto error;
-
- ovs_flow_mask_key(&masked_key, &key, &mask);
- error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
- &masked_key, 0, &acts);
- if (error) {
- OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
- goto err_kfree_acts;
}
}
@@ -2067,10 +2086,14 @@ static int __init dp_init(void)
pr_info("Open vSwitch switching datapath\n");
- err = ovs_internal_dev_rtnl_link_register();
+ err = action_fifos_init();
if (err)
goto error;
+ err = ovs_internal_dev_rtnl_link_register();
+ if (err)
+ goto error_action_fifos_exit;
+
err = ovs_flow_init();
if (err)
goto error_unreg_rtnl_link;
@@ -2103,6 +2126,8 @@ error_flow_exit:
ovs_flow_exit();
error_unreg_rtnl_link:
ovs_internal_dev_rtnl_link_unregister();
+error_action_fifos_exit:
+ action_fifos_exit();
error:
return err;
}
@@ -2116,6 +2141,7 @@ static void dp_cleanup(void)
ovs_vport_exit();
ovs_flow_exit();
ovs_internal_dev_rtnl_link_unregister();
+ action_fifos_exit();
}
module_init(dp_init);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 701b5738c38a..974135439c5c 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -95,14 +95,15 @@ struct datapath {
/**
* struct ovs_skb_cb - OVS data in skb CB
* @flow: The flow associated with this packet. May be %NULL if no flow.
- * @pkt_key: The flow information extracted from the packet. Must be nonnull.
- * @tun_key: Key for the tunnel that encapsulated this packet. NULL if the
- * packet is not being tunneled.
+ * @egress_tun_key: Tunnel information about this packet on egress path.
+ * NULL if the packet is not being tunneled.
+ * @input_vport: The original vport packet came in on. This value is cached
+ * when a packet is received by OVS.
*/
struct ovs_skb_cb {
struct sw_flow *flow;
- struct sw_flow_key *pkt_key;
- struct ovs_key_ipv4_tunnel *tun_key;
+ struct ovs_tunnel_info *egress_tun_info;
+ struct vport *input_vport;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
@@ -183,17 +184,23 @@ static inline struct vport *ovs_vport_ovsl(const struct datapath *dp, int port_n
extern struct notifier_block ovs_dp_device_notifier;
extern struct genl_family dp_vport_genl_family;
-void ovs_dp_process_received_packet(struct vport *, struct sk_buff *);
+void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key);
void ovs_dp_detach_port(struct vport *);
int ovs_dp_upcall(struct datapath *, struct sk_buff *,
const struct dp_upcall_info *);
+const char *ovs_dp_name(const struct datapath *dp);
struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
u8 cmd);
-int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *);
+
void ovs_dp_notify_wq(struct work_struct *work);
+int action_fifos_init(void);
+void action_fifos_exit(void);
+
#define OVS_NLERR(fmt, ...) \
do { \
if (net_ratelimit()) \
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index d07ab538fc9d..62db02ba36bc 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -16,8 +16,6 @@
* 02110-1301, USA
*/
-#include "flow.h"
-#include "datapath.h"
#include <linux/uaccess.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -46,6 +44,10 @@
#include <net/ipv6.h>
#include <net/ndisc.h>
+#include "datapath.h"
+#include "flow.h"
+#include "flow_netlink.h"
+
u64 ovs_flow_used_time(unsigned long flow_jiffies)
{
struct timespec cur_ts;
@@ -89,7 +91,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
* allocated stats as we have already locked them.
*/
if (likely(flow->stats_last_writer != NUMA_NO_NODE)
- && likely(!rcu_dereference(flow->stats[node]))) {
+ && likely(!rcu_access_pointer(flow->stats[node]))) {
/* Try to allocate node-specific stats. */
struct flow_stats *new_stats;
@@ -420,10 +422,9 @@ invalid:
}
/**
- * ovs_flow_extract - extracts a flow key from an Ethernet frame.
+ * key_extract - extracts a flow key from an Ethernet frame.
* @skb: sk_buff that contains the frame, with skb->data pointing to the
* Ethernet header
- * @in_port: port number on which @skb was received.
* @key: output flow key
*
* The caller must ensure that skb->len >= ETH_HLEN.
@@ -442,18 +443,13 @@ invalid:
* of a correct length, otherwise the same as skb->network_header.
* For other key->eth.type values it is left untouched.
*/
-int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
+static int key_extract(struct sk_buff *skb, struct sw_flow_key *key)
{
int error;
struct ethhdr *eth;
- memset(key, 0, sizeof(*key));
-
- key->phy.priority = skb->priority;
- if (OVS_CB(skb)->tun_key)
- memcpy(&key->tun_key, OVS_CB(skb)->tun_key, sizeof(key->tun_key));
- key->phy.in_port = in_port;
- key->phy.skb_mark = skb->mark;
+ /* Flags are always used as part of stats */
+ key->tp.flags = 0;
skb_reset_mac_header(skb);
@@ -469,6 +465,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
* update skb->csum here.
*/
+ key->eth.tci = 0;
if (vlan_tx_tag_present(skb))
key->eth.tci = htons(skb->vlan_tci);
else if (eth->h_proto == htons(ETH_P_8021Q))
@@ -489,6 +486,8 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
error = check_iphdr(skb);
if (unlikely(error)) {
+ memset(&key->ip, 0, sizeof(key->ip));
+ memset(&key->ipv4, 0, sizeof(key->ipv4));
if (error == -EINVAL) {
skb->transport_header = skb->network_header;
error = 0;
@@ -510,8 +509,10 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
return 0;
}
if (nh->frag_off & htons(IP_MF) ||
- skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
+ else
+ key->ip.frag = OVS_FRAG_TYPE_NONE;
/* Transport layer. */
if (key->ip.proto == IPPROTO_TCP) {
@@ -520,18 +521,25 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
key->tp.src = tcp->source;
key->tp.dst = tcp->dest;
key->tp.flags = TCP_FLAGS_BE16(tcp);
+ } else {
+ memset(&key->tp, 0, sizeof(key->tp));
}
+
} else if (key->ip.proto == IPPROTO_UDP) {
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->tp.src = udp->source;
key->tp.dst = udp->dest;
+ } else {
+ memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == IPPROTO_SCTP) {
if (sctphdr_ok(skb)) {
struct sctphdr *sctp = sctp_hdr(skb);
key->tp.src = sctp->source;
key->tp.dst = sctp->dest;
+ } else {
+ memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == IPPROTO_ICMP) {
if (icmphdr_ok(skb)) {
@@ -541,33 +549,44 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
* them in 16-bit network byte order. */
key->tp.src = htons(icmp->type);
key->tp.dst = htons(icmp->code);
+ } else {
+ memset(&key->tp, 0, sizeof(key->tp));
}
}
- } else if ((key->eth.type == htons(ETH_P_ARP) ||
- key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) {
+ } else if (key->eth.type == htons(ETH_P_ARP) ||
+ key->eth.type == htons(ETH_P_RARP)) {
struct arp_eth_header *arp;
arp = (struct arp_eth_header *)skb_network_header(skb);
- if (arp->ar_hrd == htons(ARPHRD_ETHER)
- && arp->ar_pro == htons(ETH_P_IP)
- && arp->ar_hln == ETH_ALEN
- && arp->ar_pln == 4) {
+ if (arphdr_ok(skb) &&
+ arp->ar_hrd == htons(ARPHRD_ETHER) &&
+ arp->ar_pro == htons(ETH_P_IP) &&
+ arp->ar_hln == ETH_ALEN &&
+ arp->ar_pln == 4) {
/* We only match on the lower 8 bits of the opcode. */
if (ntohs(arp->ar_op) <= 0xff)
key->ip.proto = ntohs(arp->ar_op);
+ else
+ key->ip.proto = 0;
+
memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha);
ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha);
+ } else {
+ memset(&key->ip, 0, sizeof(key->ip));
+ memset(&key->ipv4, 0, sizeof(key->ipv4));
}
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
nh_len = parse_ipv6hdr(skb, key);
if (unlikely(nh_len < 0)) {
+ memset(&key->ip, 0, sizeof(key->ip));
+ memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
if (nh_len == -EINVAL) {
skb->transport_header = skb->network_header;
error = 0;
@@ -589,27 +608,87 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
key->tp.src = tcp->source;
key->tp.dst = tcp->dest;
key->tp.flags = TCP_FLAGS_BE16(tcp);
+ } else {
+ memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_UDP) {
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->tp.src = udp->source;
key->tp.dst = udp->dest;
+ } else {
+ memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_SCTP) {
if (sctphdr_ok(skb)) {
struct sctphdr *sctp = sctp_hdr(skb);
key->tp.src = sctp->source;
key->tp.dst = sctp->dest;
+ } else {
+ memset(&key->tp, 0, sizeof(key->tp));
}
} else if (key->ip.proto == NEXTHDR_ICMP) {
if (icmp6hdr_ok(skb)) {
error = parse_icmpv6(skb, key, nh_len);
if (error)
return error;
+ } else {
+ memset(&key->tp, 0, sizeof(key->tp));
}
}
}
-
return 0;
}
+
+int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
+{
+ return key_extract(skb, key);
+}
+
+int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info,
+ struct sk_buff *skb, struct sw_flow_key *key)
+{
+ /* Extract metadata from packet. */
+ if (tun_info) {
+ memcpy(&key->tun_key, &tun_info->tunnel, sizeof(key->tun_key));
+
+ if (tun_info->options) {
+ BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) *
+ 8)) - 1
+ > sizeof(key->tun_opts));
+ memcpy(GENEVE_OPTS(key, tun_info->options_len),
+ tun_info->options, tun_info->options_len);
+ key->tun_opts_len = tun_info->options_len;
+ } else {
+ key->tun_opts_len = 0;
+ }
+ } else {
+ key->tun_opts_len = 0;
+ memset(&key->tun_key, 0, sizeof(key->tun_key));
+ }
+
+ key->phy.priority = skb->priority;
+ key->phy.in_port = OVS_CB(skb)->input_vport->port_no;
+ key->phy.skb_mark = skb->mark;
+ key->ovs_flow_hash = 0;
+ key->recirc_id = 0;
+
+ /* Flags are always used as part of stats */
+ key->tp.flags = 0;
+
+ return key_extract(skb, key);
+}
+
+int ovs_flow_key_extract_userspace(const struct nlattr *attr,
+ struct sk_buff *skb,
+ struct sw_flow_key *key)
+{
+ int err;
+
+ /* Extract metadata from netlink attributes. */
+ err = ovs_nla_get_flow_metadata(attr, key);
+ if (err)
+ return err;
+
+ return key_extract(skb, key);
+}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 5e5aaed3a85b..71813318c8c7 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -49,29 +49,53 @@ struct ovs_key_ipv4_tunnel {
u8 ipv4_ttl;
} __packed __aligned(4); /* Minimize padding. */
-static inline void ovs_flow_tun_key_init(struct ovs_key_ipv4_tunnel *tun_key,
- const struct iphdr *iph, __be64 tun_id,
- __be16 tun_flags)
+struct ovs_tunnel_info {
+ struct ovs_key_ipv4_tunnel tunnel;
+ struct geneve_opt *options;
+ u8 options_len;
+};
+
+/* Store options at the end of the array if they are less than the
+ * maximum size. This allows us to get the benefits of variable length
+ * matching for small options.
+ */
+#define GENEVE_OPTS(flow_key, opt_len) \
+ ((struct geneve_opt *)((flow_key)->tun_opts + \
+ FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \
+ opt_len))
+
+static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info,
+ const struct iphdr *iph,
+ __be64 tun_id, __be16 tun_flags,
+ struct geneve_opt *opts,
+ u8 opts_len)
{
- tun_key->tun_id = tun_id;
- tun_key->ipv4_src = iph->saddr;
- tun_key->ipv4_dst = iph->daddr;
- tun_key->ipv4_tos = iph->tos;
- tun_key->ipv4_ttl = iph->ttl;
- tun_key->tun_flags = tun_flags;
+ tun_info->tunnel.tun_id = tun_id;
+ tun_info->tunnel.ipv4_src = iph->saddr;
+ tun_info->tunnel.ipv4_dst = iph->daddr;
+ tun_info->tunnel.ipv4_tos = iph->tos;
+ tun_info->tunnel.ipv4_ttl = iph->ttl;
+ tun_info->tunnel.tun_flags = tun_flags;
/* clear struct padding. */
- memset((unsigned char *) tun_key + OVS_TUNNEL_KEY_SIZE, 0,
- sizeof(*tun_key) - OVS_TUNNEL_KEY_SIZE);
+ memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, 0,
+ sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE);
+
+ tun_info->options = opts;
+ tun_info->options_len = opts_len;
}
struct sw_flow_key {
+ u8 tun_opts[255];
+ u8 tun_opts_len;
struct ovs_key_ipv4_tunnel tun_key; /* Encapsulating tunnel key. */
struct {
u32 priority; /* Packet QoS priority. */
u32 skb_mark; /* SKB mark. */
u16 in_port; /* Input switch port (or DP_MAX_PORTS). */
} __packed phy; /* Safe when right after 'tun_key'. */
+ u32 ovs_flow_hash; /* Datapath computed hash value. */
+ u32 recirc_id; /* Recirculation ID. */
struct {
u8 src[ETH_ALEN]; /* Ethernet source address. */
u8 dst[ETH_ALEN]; /* Ethernet destination address. */
@@ -187,6 +211,12 @@ void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
void ovs_flow_stats_clear(struct sw_flow *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
-int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
+int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
+int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, struct sk_buff *skb,
+ struct sw_flow_key *key);
+/* Extract key from packet coming from userspace. */
+int ovs_flow_key_extract_userspace(const struct nlattr *attr,
+ struct sk_buff *skb,
+ struct sw_flow_key *key);
#endif /* flow.h */
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index d757848da89c..368f23307911 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -42,6 +42,7 @@
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/rculist.h>
+#include <net/geneve.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
@@ -88,18 +89,20 @@ static void update_range__(struct sw_flow_match *match,
} \
} while (0)
-#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
- do { \
- update_range__(match, offsetof(struct sw_flow_key, field), \
- len, is_mask); \
- if (is_mask) { \
- if ((match)->mask) \
- memcpy(&(match)->mask->key.field, value_p, len);\
- } else { \
- memcpy(&(match)->key->field, value_p, len); \
- } \
+#define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \
+ do { \
+ update_range__(match, offset, len, is_mask); \
+ if (is_mask) \
+ memcpy((u8 *)&(match)->mask->key + offset, value_p, \
+ len); \
+ else \
+ memcpy((u8 *)(match)->key + offset, value_p, len); \
} while (0)
+#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
+ SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \
+ value_p, len, is_mask)
+
static u16 range_n_bytes(const struct sw_flow_key_range *range)
{
return range->end - range->start;
@@ -251,6 +254,8 @@ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
[OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
[OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
+ [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32),
+ [OVS_KEY_ATTR_DP_HASH] = sizeof(u32),
[OVS_KEY_ATTR_TUNNEL] = -1,
};
@@ -333,6 +338,7 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
int rem;
bool ttl = false;
__be16 tun_flags = 0;
+ unsigned long opt_key_offset;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
@@ -344,6 +350,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
[OVS_TUNNEL_KEY_ATTR_TTL] = 1,
[OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0,
[OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
+ [OVS_TUNNEL_KEY_ATTR_OAM] = 0,
+ [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1,
};
if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
@@ -352,7 +360,8 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
return -EINVAL;
}
- if (ovs_tunnel_key_lens[type] != nla_len(a)) {
+ if (ovs_tunnel_key_lens[type] != nla_len(a) &&
+ ovs_tunnel_key_lens[type] != -1) {
OVS_NLERR("IPv4 tunnel attribute type has unexpected "
" length (type=%d, length=%d, expected=%d).\n",
type, nla_len(a), ovs_tunnel_key_lens[type]);
@@ -388,7 +397,63 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
case OVS_TUNNEL_KEY_ATTR_CSUM:
tun_flags |= TUNNEL_CSUM;
break;
+ case OVS_TUNNEL_KEY_ATTR_OAM:
+ tun_flags |= TUNNEL_OAM;
+ break;
+ case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS:
+ tun_flags |= TUNNEL_OPTIONS_PRESENT;
+ if (nla_len(a) > sizeof(match->key->tun_opts)) {
+ OVS_NLERR("Geneve option length exceeds maximum size (len %d, max %zu).\n",
+ nla_len(a),
+ sizeof(match->key->tun_opts));
+ return -EINVAL;
+ }
+
+ if (nla_len(a) % 4 != 0) {
+ OVS_NLERR("Geneve option length is not a multiple of 4 (len %d).\n",
+ nla_len(a));
+ return -EINVAL;
+ }
+
+ /* We need to record the length of the options passed
+ * down, otherwise packets with the same format but
+ * additional options will be silently matched.
+ */
+ if (!is_mask) {
+ SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a),
+ false);
+ } else {
+ /* This is somewhat unusual because it looks at
+ * both the key and mask while parsing the
+ * attributes (and by extension assumes the key
+ * is parsed first). Normally, we would verify
+ * that each is the correct length and that the
+ * attributes line up in the validate function.
+ * However, that is difficult because this is
+ * variable length and we won't have the
+ * information later.
+ */
+ if (match->key->tun_opts_len != nla_len(a)) {
+ OVS_NLERR("Geneve option key length (%d) is different from mask length (%d).",
+ match->key->tun_opts_len,
+ nla_len(a));
+ return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff,
+ true);
+ }
+
+ opt_key_offset = (unsigned long)GENEVE_OPTS(
+ (struct sw_flow_key *)0,
+ nla_len(a));
+ SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset,
+ nla_data(a), nla_len(a),
+ is_mask);
+ break;
default:
+ OVS_NLERR("Unknown IPv4 tunnel attribute (%d).\n",
+ type);
return -EINVAL;
}
}
@@ -415,45 +480,80 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr,
return 0;
}
-static int ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *tun_key,
- const struct ovs_key_ipv4_tunnel *output)
+static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
+ const struct ovs_key_ipv4_tunnel *output,
+ const struct geneve_opt *tun_opts,
+ int swkey_tun_opts_len)
{
- struct nlattr *nla;
-
- nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
- if (!nla)
- return -EMSGSIZE;
-
if (output->tun_flags & TUNNEL_KEY &&
nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
return -EMSGSIZE;
if (output->ipv4_src &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
+ nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
return -EMSGSIZE;
if (output->ipv4_dst &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
+ nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
return -EMSGSIZE;
if (output->ipv4_tos &&
- nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
+ nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
return -EMSGSIZE;
if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
- nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
+ nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
if ((output->tun_flags & TUNNEL_CSUM) &&
- nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
+ nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
+ return -EMSGSIZE;
+ if ((output->tun_flags & TUNNEL_OAM) &&
+ nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM))
+ return -EMSGSIZE;
+ if (tun_opts &&
+ nla_put(skb, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
+ swkey_tun_opts_len, tun_opts))
return -EMSGSIZE;
- nla_nest_end(skb, nla);
return 0;
}
+static int ipv4_tun_to_nlattr(struct sk_buff *skb,
+ const struct ovs_key_ipv4_tunnel *output,
+ const struct geneve_opt *tun_opts,
+ int swkey_tun_opts_len)
+{
+ struct nlattr *nla;
+ int err;
+
+ nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL);
+ if (!nla)
+ return -EMSGSIZE;
+
+ err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
+ if (err)
+ return err;
+
+ nla_nest_end(skb, nla);
+ return 0;
+}
+
static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
const struct nlattr **a, bool is_mask)
{
+ if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) {
+ u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]);
+
+ SW_FLOW_KEY_PUT(match, ovs_flow_hash, hash_val, is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_DP_HASH);
+ }
+
+ if (*attrs & (1 << OVS_KEY_ATTR_RECIRC_ID)) {
+ u32 recirc_id = nla_get_u32(a[OVS_KEY_ATTR_RECIRC_ID]);
+
+ SW_FLOW_KEY_PUT(match, recirc_id, recirc_id, is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_RECIRC_ID);
+ }
+
if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
SW_FLOW_KEY_PUT(match, phy.priority,
nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
@@ -836,7 +936,7 @@ int ovs_nla_get_match(struct sw_flow_match *match,
/**
* ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key.
- * @flow: Receives extracted in_port, priority, tun_key and skb_mark.
+ * @key: Receives extracted in_port, priority, tun_key and skb_mark.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
*
@@ -846,32 +946,24 @@ int ovs_nla_get_match(struct sw_flow_match *match,
* extracted from the packet itself.
*/
-int ovs_nla_get_flow_metadata(struct sw_flow *flow,
- const struct nlattr *attr)
+int ovs_nla_get_flow_metadata(const struct nlattr *attr,
+ struct sw_flow_key *key)
{
- struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+ struct sw_flow_match match;
u64 attrs = 0;
int err;
- struct sw_flow_match match;
-
- flow->key.phy.in_port = DP_MAX_PORTS;
- flow->key.phy.priority = 0;
- flow->key.phy.skb_mark = 0;
- memset(tun_key, 0, sizeof(flow->key.tun_key));
err = parse_flow_nlattrs(attr, a, &attrs);
if (err)
return -EINVAL;
memset(&match, 0, sizeof(match));
- match.key = &flow->key;
+ match.key = key;
- err = metadata_from_nlattrs(&match, &attrs, a, false);
- if (err)
- return err;
+ key->phy.in_port = DP_MAX_PORTS;
- return 0;
+ return metadata_from_nlattrs(&match, &attrs, a, false);
}
int ovs_nla_put_flow(const struct sw_flow_key *swkey,
@@ -881,13 +973,26 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey,
struct nlattr *nla, *encap;
bool is_mask = (swkey != output);
- if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_RECIRC_ID, output->recirc_id))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, OVS_KEY_ATTR_DP_HASH, output->ovs_flow_hash))
goto nla_put_failure;
- if ((swkey->tun_key.ipv4_dst || is_mask) &&
- ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure;
+ if ((swkey->tun_key.ipv4_dst || is_mask)) {
+ const struct geneve_opt *opts = NULL;
+
+ if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
+ opts = GENEVE_OPTS(output, swkey->tun_opts_len);
+
+ if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
+ swkey->tun_opts_len))
+ goto nla_put_failure;
+ }
+
if (swkey->phy.in_port == DP_MAX_PORTS) {
if (is_mask && (output->phy.in_port == 0xffff))
if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
@@ -1127,13 +1232,14 @@ out:
return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
}
-static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
+static struct nlattr *__add_action(struct sw_flow_actions **sfa,
+ int attrtype, void *data, int len)
{
struct nlattr *a;
a = reserve_sfa_size(sfa, nla_attr_size(len));
if (IS_ERR(a))
- return PTR_ERR(a);
+ return a;
a->nla_type = attrtype;
a->nla_len = nla_attr_size(len);
@@ -1142,6 +1248,18 @@ static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, in
memcpy(nla_data(a), data, len);
memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
+ return a;
+}
+
+static int add_action(struct sw_flow_actions **sfa, int attrtype,
+ void *data, int len)
+{
+ struct nlattr *a;
+
+ a = __add_action(sfa, attrtype, data, len);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
return 0;
}
@@ -1247,6 +1365,8 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
{
struct sw_flow_match match;
struct sw_flow_key key;
+ struct ovs_tunnel_info *tun_info;
+ struct nlattr *a;
int err, start;
ovs_match_init(&match, &key, NULL);
@@ -1254,12 +1374,56 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
if (err)
return err;
+ if (key.tun_opts_len) {
+ struct geneve_opt *option = GENEVE_OPTS(&key,
+ key.tun_opts_len);
+ int opts_len = key.tun_opts_len;
+ bool crit_opt = false;
+
+ while (opts_len > 0) {
+ int len;
+
+ if (opts_len < sizeof(*option))
+ return -EINVAL;
+
+ len = sizeof(*option) + option->length * 4;
+ if (len > opts_len)
+ return -EINVAL;
+
+ crit_opt |= !!(option->type & GENEVE_CRIT_OPT_TYPE);
+
+ option = (struct geneve_opt *)((u8 *)option + len);
+ opts_len -= len;
+ };
+
+ key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0;
+ };
+
start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
if (start < 0)
return start;
- err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
- sizeof(match.key->tun_key));
+ a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL,
+ sizeof(*tun_info) + key.tun_opts_len);
+ if (IS_ERR(a))
+ return PTR_ERR(a);
+
+ tun_info = nla_data(a);
+ tun_info->tunnel = key.tun_key;
+ tun_info->options_len = key.tun_opts_len;
+
+ if (tun_info->options_len) {
+ /* We need to store the options in the action itself since
+ * everything else will go away after flow setup. We can append
+ * it to tun_info and then point there.
+ */
+ memcpy((tun_info + 1), GENEVE_OPTS(&key, key.tun_opts_len),
+ key.tun_opts_len);
+ tun_info->options = (struct geneve_opt *)(tun_info + 1);
+ } else {
+ tun_info->options = NULL;
+ }
+
add_nested_action_end(*sfa, start);
return err;
@@ -1409,11 +1573,13 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
/* Expected argument lengths, (u32)-1 for variable length. */
static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
[OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
+ [OVS_ACTION_ATTR_RECIRC] = sizeof(u32),
[OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
[OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
[OVS_ACTION_ATTR_POP_VLAN] = 0,
[OVS_ACTION_ATTR_SET] = (u32)-1,
- [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
+ [OVS_ACTION_ATTR_SAMPLE] = (u32)-1,
+ [OVS_ACTION_ATTR_HASH] = sizeof(struct ovs_action_hash)
};
const struct ovs_action_push_vlan *vlan;
int type = nla_type(a);
@@ -1440,6 +1606,18 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
return -EINVAL;
break;
+ case OVS_ACTION_ATTR_HASH: {
+ const struct ovs_action_hash *act_hash = nla_data(a);
+
+ switch (act_hash->hash_alg) {
+ case OVS_HASH_ALG_L4:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ }
case OVS_ACTION_ATTR_POP_VLAN:
break;
@@ -1452,6 +1630,9 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
return -EINVAL;
break;
+ case OVS_ACTION_ATTR_RECIRC:
+ break;
+
case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa, &skip_copy);
if (err)
@@ -1525,17 +1706,22 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
int err;
switch (key_type) {
- case OVS_KEY_ATTR_IPV4_TUNNEL:
+ case OVS_KEY_ATTR_TUNNEL_INFO: {
+ struct ovs_tunnel_info *tun_info = nla_data(ovs_key);
+
start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
if (!start)
return -EMSGSIZE;
- err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
- nla_data(ovs_key));
+ err = ipv4_tun_to_nlattr(skb, &tun_info->tunnel,
+ tun_info->options_len ?
+ tun_info->options : NULL,
+ tun_info->options_len);
if (err)
return err;
nla_nest_end(skb, start);
break;
+ }
default:
if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
return -EMSGSIZE;
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h
index 440151045d39..206e45add888 100644
--- a/net/openvswitch/flow_netlink.h
+++ b/net/openvswitch/flow_netlink.h
@@ -42,8 +42,8 @@ void ovs_match_init(struct sw_flow_match *match,
int ovs_nla_put_flow(const struct sw_flow_key *,
const struct sw_flow_key *, struct sk_buff *);
-int ovs_nla_get_flow_metadata(struct sw_flow *flow,
- const struct nlattr *attr);
+int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *);
+
int ovs_nla_get_match(struct sw_flow_match *match,
const struct nlattr *,
const struct nlattr *);
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
new file mode 100644
index 000000000000..910b3ef2c0d5
--- /dev/null
+++ b/net/openvswitch/vport-geneve.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2014 Nicira, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/version.h>
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/net.h>
+#include <linux/rculist.h>
+#include <linux/udp.h>
+#include <linux/if_vlan.h>
+
+#include <net/geneve.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/udp.h>
+#include <net/xfrm.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+/**
+ * struct geneve_port - Keeps track of open UDP ports
+ * @sock: The socket created for this port number.
+ * @name: vport name.
+ */
+struct geneve_port {
+ struct geneve_sock *gs;
+ char name[IFNAMSIZ];
+};
+
+static LIST_HEAD(geneve_ports);
+
+static inline struct geneve_port *geneve_vport(const struct vport *vport)
+{
+ return vport_priv(vport);
+}
+
+static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb)
+{
+ return (struct genevehdr *)(udp_hdr(skb) + 1);
+}
+
+/* Convert 64 bit tunnel ID to 24 bit VNI. */
+static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+ vni[0] = (__force __u8)(tun_id >> 16);
+ vni[1] = (__force __u8)(tun_id >> 8);
+ vni[2] = (__force __u8)tun_id;
+#else
+ vni[0] = (__force __u8)((__force u64)tun_id >> 40);
+ vni[1] = (__force __u8)((__force u64)tun_id >> 48);
+ vni[2] = (__force __u8)((__force u64)tun_id >> 56);
+#endif
+}
+
+/* Convert 24 bit VNI to 64 bit tunnel ID. */
+static __be64 vni_to_tunnel_id(__u8 *vni)
+{
+#ifdef __BIG_ENDIAN
+ return (vni[0] << 16) | (vni[1] << 8) | vni[2];
+#else
+ return (__force __be64)(((__force u64)vni[0] << 40) |
+ ((__force u64)vni[1] << 48) |
+ ((__force u64)vni[2] << 56));
+#endif
+}
+
+static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb)
+{
+ struct vport *vport = gs->rcv_data;
+ struct genevehdr *geneveh = geneve_hdr(skb);
+ int opts_len;
+ struct ovs_tunnel_info tun_info;
+ __be64 key;
+ __be16 flags;
+
+ opts_len = geneveh->opt_len * 4;
+
+ flags = TUNNEL_KEY | TUNNEL_OPTIONS_PRESENT |
+ (udp_hdr(skb)->check != 0 ? TUNNEL_CSUM : 0) |
+ (geneveh->oam ? TUNNEL_OAM : 0) |
+ (geneveh->critical ? TUNNEL_CRIT_OPT : 0);
+
+ key = vni_to_tunnel_id(geneveh->vni);
+
+ ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, flags,
+ geneveh->options, opts_len);
+
+ ovs_vport_receive(vport, skb, &tun_info);
+}
+
+static int geneve_get_options(const struct vport *vport,
+ struct sk_buff *skb)
+{
+ struct geneve_port *geneve_port = geneve_vport(vport);
+ struct inet_sock *sk = inet_sk(geneve_port->gs->sock->sk);
+
+ if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(sk->inet_sport)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static void geneve_tnl_destroy(struct vport *vport)
+{
+ struct geneve_port *geneve_port = geneve_vport(vport);
+
+ geneve_sock_release(geneve_port->gs);
+
+ ovs_vport_deferred_free(vport);
+}
+
+static struct vport *geneve_tnl_create(const struct vport_parms *parms)
+{
+ struct net *net = ovs_dp_get_net(parms->dp);
+ struct nlattr *options = parms->options;
+ struct geneve_port *geneve_port;
+ struct geneve_sock *gs;
+ struct vport *vport;
+ struct nlattr *a;
+ int err;
+ u16 dst_port;
+
+ if (!options) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
+ if (a && nla_len(a) == sizeof(u16)) {
+ dst_port = nla_get_u16(a);
+ } else {
+ /* Require destination port from userspace. */
+ err = -EINVAL;
+ goto error;
+ }
+
+ vport = ovs_vport_alloc(sizeof(struct geneve_port),
+ &ovs_geneve_vport_ops, parms);
+ if (IS_ERR(vport))
+ return vport;
+
+ geneve_port = geneve_vport(vport);
+ strncpy(geneve_port->name, parms->name, IFNAMSIZ);
+
+ gs = geneve_sock_add(net, htons(dst_port), geneve_rcv, vport, true, 0);
+ if (IS_ERR(gs)) {
+ ovs_vport_free(vport);
+ return (void *)gs;
+ }
+ geneve_port->gs = gs;
+
+ return vport;
+error:
+ return ERR_PTR(err);
+}
+
+static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+ struct ovs_key_ipv4_tunnel *tun_key;
+ struct ovs_tunnel_info *tun_info;
+ struct net *net = ovs_dp_get_net(vport->dp);
+ struct geneve_port *geneve_port = geneve_vport(vport);
+ __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport;
+ __be16 sport;
+ struct rtable *rt;
+ struct flowi4 fl;
+ u8 vni[3];
+ __be16 df;
+ int err;
+
+ tun_info = OVS_CB(skb)->egress_tun_info;
+ if (unlikely(!tun_info)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ tun_key = &tun_info->tunnel;
+
+ /* Route lookup */
+ memset(&fl, 0, sizeof(fl));
+ fl.daddr = tun_key->ipv4_dst;
+ fl.saddr = tun_key->ipv4_src;
+ fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
+ fl.flowi4_mark = skb->mark;
+ fl.flowi4_proto = IPPROTO_UDP;
+
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto error;
+ }
+
+ df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true);
+ tunnel_id_to_vni(tun_key->tun_id, vni);
+ skb->ignore_df = 1;
+
+ err = geneve_xmit_skb(geneve_port->gs, rt, skb, fl.saddr,
+ tun_key->ipv4_dst, tun_key->ipv4_tos,
+ tun_key->ipv4_ttl, df, sport, dport,
+ tun_key->tun_flags, vni,
+ tun_info->options_len, (u8 *)tun_info->options,
+ false);
+ if (err < 0)
+ ip_rt_put(rt);
+error:
+ return err;
+}
+
+static const char *geneve_get_name(const struct vport *vport)
+{
+ struct geneve_port *geneve_port = geneve_vport(vport);
+
+ return geneve_port->name;
+}
+
+const struct vport_ops ovs_geneve_vport_ops = {
+ .type = OVS_VPORT_TYPE_GENEVE,
+ .create = geneve_tnl_create,
+ .destroy = geneve_tnl_destroy,
+ .get_name = geneve_get_name,
+ .get_options = geneve_get_options,
+ .send = geneve_tnl_send,
+};
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index f49148a07da2..108b82da2fd9 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -63,8 +63,10 @@ static __be16 filter_tnl_flags(__be16 flags)
static struct sk_buff *__build_header(struct sk_buff *skb,
int tunnel_hlen)
{
- const struct ovs_key_ipv4_tunnel *tun_key = OVS_CB(skb)->tun_key;
struct tnl_ptk_info tpi;
+ const struct ovs_key_ipv4_tunnel *tun_key;
+
+ tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
if (IS_ERR(skb))
@@ -92,7 +94,7 @@ static __be64 key_to_tunnel_id(__be32 key, __be32 seq)
static int gre_rcv(struct sk_buff *skb,
const struct tnl_ptk_info *tpi)
{
- struct ovs_key_ipv4_tunnel tun_key;
+ struct ovs_tunnel_info tun_info;
struct ovs_net *ovs_net;
struct vport *vport;
__be64 key;
@@ -103,10 +105,10 @@ static int gre_rcv(struct sk_buff *skb,
return PACKET_REJECT;
key = key_to_tunnel_id(tpi->key, tpi->seq);
- ovs_flow_tun_key_init(&tun_key, ip_hdr(skb), key,
- filter_tnl_flags(tpi->flags));
+ ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key,
+ filter_tnl_flags(tpi->flags), NULL, 0);
- ovs_vport_receive(vport, skb, &tun_key);
+ ovs_vport_receive(vport, skb, &tun_info);
return PACKET_RCVD;
}
@@ -129,6 +131,7 @@ static int gre_err(struct sk_buff *skb, u32 info,
static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
{
struct net *net = ovs_dp_get_net(vport->dp);
+ struct ovs_key_ipv4_tunnel *tun_key;
struct flowi4 fl;
struct rtable *rt;
int min_headroom;
@@ -136,16 +139,17 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
__be16 df;
int err;
- if (unlikely(!OVS_CB(skb)->tun_key)) {
+ if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL;
goto error;
}
+ tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
/* Route lookup */
memset(&fl, 0, sizeof(fl));
- fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
- fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
- fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
+ fl.daddr = tun_key->ipv4_dst;
+ fl.saddr = tun_key->ipv4_src;
+ fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
fl.flowi4_mark = skb->mark;
fl.flowi4_proto = IPPROTO_GRE;
@@ -153,7 +157,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
if (IS_ERR(rt))
return PTR_ERR(rt);
- tunnel_hlen = ip_gre_calc_hlen(OVS_CB(skb)->tun_key->tun_flags);
+ tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ tunnel_hlen + sizeof(struct iphdr)
@@ -185,15 +189,14 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
goto err_free_rt;
}
- df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
+ df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
htons(IP_DF) : 0;
skb->ignore_df = 1;
return iptunnel_xmit(skb->sk, rt, skb, fl.saddr,
- OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
- OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
+ tun_key->ipv4_dst, IPPROTO_GRE,
+ tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
error:
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index d8b7e247bebf..2735e01dca73 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Nicira, Inc.
+ * Copyright (c) 2014 Nicira, Inc.
* Copyright (c) 2013 Cisco Systems, Inc.
*
* This program is free software; you can redistribute it and/or
@@ -58,7 +58,7 @@ static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
/* Called with rcu_read_lock and BH disabled. */
static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
{
- struct ovs_key_ipv4_tunnel tun_key;
+ struct ovs_tunnel_info tun_info;
struct vport *vport = vs->data;
struct iphdr *iph;
__be64 key;
@@ -66,9 +66,9 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
/* Save outer tunnel values */
iph = ip_hdr(skb);
key = cpu_to_be64(ntohl(vx_vni) >> 8);
- ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
+ ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY, NULL, 0);
- ovs_vport_receive(vport, skb, &tun_key);
+ ovs_vport_receive(vport, skb, &tun_info);
}
static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
@@ -140,22 +140,24 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
struct net *net = ovs_dp_get_net(vport->dp);
struct vxlan_port *vxlan_port = vxlan_vport(vport);
__be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ struct ovs_key_ipv4_tunnel *tun_key;
struct rtable *rt;
struct flowi4 fl;
__be16 src_port;
__be16 df;
int err;
- if (unlikely(!OVS_CB(skb)->tun_key)) {
+ if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL;
goto error;
}
+ tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
/* Route lookup */
memset(&fl, 0, sizeof(fl));
- fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
- fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
- fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
+ fl.daddr = tun_key->ipv4_dst;
+ fl.saddr = tun_key->ipv4_src;
+ fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos);
fl.flowi4_mark = skb->mark;
fl.flowi4_proto = IPPROTO_UDP;
@@ -165,7 +167,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
goto error;
}
- df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
+ df = tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
htons(IP_DF) : 0;
skb->ignore_df = 1;
@@ -173,11 +175,10 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
src_port = udp_flow_src_port(net, skb, 0, 0, true);
err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
- fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst,
- OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df,
+ fl.saddr, tun_key->ipv4_dst,
+ tun_key->ipv4_tos, tun_key->ipv4_ttl, df,
src_port, dst_port,
- htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8),
+ htonl(be64_to_cpu(tun_key->tun_id) << 8),
false);
if (err < 0)
ip_rt_put(rt);
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6d8f2ec481d9..53001b020ca7 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -48,6 +48,9 @@ static const struct vport_ops *vport_ops_list[] = {
#ifdef CONFIG_OPENVSWITCH_VXLAN
&ovs_vxlan_vport_ops,
#endif
+#ifdef CONFIG_OPENVSWITCH_GENEVE
+ &ovs_geneve_vport_ops,
+#endif
};
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
@@ -148,8 +151,6 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
return ERR_PTR(-ENOMEM);
}
- spin_lock_init(&vport->stats_lock);
-
return vport;
}
@@ -268,14 +269,10 @@ void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
* netdev-stats can be directly read over netlink-ioctl.
*/
- spin_lock_bh(&vport->stats_lock);
-
- stats->rx_errors = vport->err_stats.rx_errors;
- stats->tx_errors = vport->err_stats.tx_errors;
- stats->tx_dropped = vport->err_stats.tx_dropped;
- stats->rx_dropped = vport->err_stats.rx_dropped;
-
- spin_unlock_bh(&vport->stats_lock);
+ stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors);
+ stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors);
+ stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
+ stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);
for_each_possible_cpu(i) {
const struct pcpu_sw_netstats *percpu_stats;
@@ -438,9 +435,11 @@ u32 ovs_vport_find_upcall_portid(const struct vport *p, struct sk_buff *skb)
* skb->data should point to the Ethernet header.
*/
void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
- struct ovs_key_ipv4_tunnel *tun_key)
+ struct ovs_tunnel_info *tun_info)
{
struct pcpu_sw_netstats *stats;
+ struct sw_flow_key key;
+ int error;
stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->syncp);
@@ -448,8 +447,15 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
- OVS_CB(skb)->tun_key = tun_key;
- ovs_dp_process_received_packet(vport, skb);
+ OVS_CB(skb)->input_vport = vport;
+ OVS_CB(skb)->egress_tun_info = NULL;
+ /* Extract flow from 'skb' into 'key'. */
+ error = ovs_flow_key_extract(tun_info, skb, &key);
+ if (unlikely(error)) {
+ kfree_skb(skb);
+ return;
+ }
+ ovs_dp_process_packet(skb, &key);
}
/**
@@ -495,27 +501,24 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
static void ovs_vport_record_error(struct vport *vport,
enum vport_err_type err_type)
{
- spin_lock(&vport->stats_lock);
-
switch (err_type) {
case VPORT_E_RX_DROPPED:
- vport->err_stats.rx_dropped++;
+ atomic_long_inc(&vport->err_stats.rx_dropped);
break;
case VPORT_E_RX_ERROR:
- vport->err_stats.rx_errors++;
+ atomic_long_inc(&vport->err_stats.rx_errors);
break;
case VPORT_E_TX_DROPPED:
- vport->err_stats.tx_dropped++;
+ atomic_long_inc(&vport->err_stats.tx_dropped);
break;
case VPORT_E_TX_ERROR:
- vport->err_stats.tx_errors++;
+ atomic_long_inc(&vport->err_stats.tx_errors);
break;
}
- spin_unlock(&vport->stats_lock);
}
static void free_vport_rcu(struct rcu_head *rcu)
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 35f89d84b45e..8942125de3a6 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -35,7 +35,6 @@ struct vport_parms;
/* The following definitions are for users of the vport subsytem: */
-/* The following definitions are for users of the vport subsytem: */
struct vport_net {
struct vport __rcu *gre_vport;
};
@@ -62,10 +61,10 @@ int ovs_vport_send(struct vport *, struct sk_buff *);
/* The following definitions are for implementers of vport devices: */
struct vport_err_stats {
- u64 rx_dropped;
- u64 rx_errors;
- u64 tx_dropped;
- u64 tx_errors;
+ atomic_long_t rx_dropped;
+ atomic_long_t rx_errors;
+ atomic_long_t tx_dropped;
+ atomic_long_t tx_errors;
};
/**
* struct vport_portids - array of netlink portids of a vport.
@@ -93,7 +92,6 @@ struct vport_portids {
* @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
* @ops: Class structure.
* @percpu_stats: Points to per-CPU statistics used and maintained by vport
- * @stats_lock: Protects @err_stats;
* @err_stats: Points to error statistics used and maintained by vport
*/
struct vport {
@@ -108,7 +106,6 @@ struct vport {
struct pcpu_sw_netstats __percpu *percpu_stats;
- spinlock_t stats_lock;
struct vport_err_stats err_stats;
};
@@ -210,7 +207,7 @@ static inline struct vport *vport_from_priv(void *priv)
}
void ovs_vport_receive(struct vport *, struct sk_buff *,
- struct ovs_key_ipv4_tunnel *);
+ struct ovs_tunnel_info *);
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the top of vport.c. */
@@ -218,6 +215,7 @@ extern const struct vport_ops ovs_netdev_vport_ops;
extern const struct vport_ops ovs_internal_vport_ops;
extern const struct vport_ops ovs_gre_vport_ops;
extern const struct vport_ops ovs_vxlan_vport_ops;
+extern const struct vport_ops ovs_geneve_vport_ops;
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 93896d2092f6..87d20f48ff06 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -240,11 +240,9 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
static int packet_direct_xmit(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
- const struct net_device_ops *ops = dev->netdev_ops;
netdev_features_t features;
struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
- u16 queue_map;
if (unlikely(!netif_running(dev) ||
!netif_carrier_ok(dev)))
@@ -255,17 +253,13 @@ static int packet_direct_xmit(struct sk_buff *skb)
__skb_linearize(skb))
goto drop;
- queue_map = skb_get_queue_mapping(skb);
- txq = netdev_get_tx_queue(dev, queue_map);
+ txq = skb_get_tx_queue(dev, skb);
local_bh_disable();
HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_xmit_frozen_or_drv_stopped(txq)) {
- ret = ops->ndo_start_xmit(skb, dev);
- if (ret == NETDEV_TX_OK)
- txq_trans_update(txq);
- }
+ if (!netif_xmit_frozen_or_drv_stopped(txq))
+ ret = netdev_start_xmit(skb, dev, txq, false);
HARD_TX_UNLOCK(dev, txq);
local_bh_enable();
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 56a6146ac94b..a58680016472 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -36,7 +36,7 @@
struct phonet_routes {
struct mutex lock;
- struct net_device *table[64];
+ struct net_device __rcu *table[64];
};
struct phonet_net {
@@ -275,7 +275,7 @@ static void phonet_route_autodel(struct net_device *dev)
bitmap_zero(deleted, 64);
mutex_lock(&pnn->routes.lock);
for (i = 0; i < 64; i++)
- if (dev == pnn->routes.table[i]) {
+ if (rcu_access_pointer(pnn->routes.table[i]) == dev) {
RCU_INIT_POINTER(pnn->routes.table[i], NULL);
set_bit(i, deleted);
}
@@ -388,7 +388,7 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
daddr = daddr >> 2;
mutex_lock(&routes->lock);
- if (dev == routes->table[daddr])
+ if (rcu_access_pointer(routes->table[daddr]) == dev)
RCU_INIT_POINTER(routes->table[daddr], NULL);
else
dev = NULL;
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index bc5514211b0c..e873d7d9f857 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -160,7 +160,8 @@ void rose_link_rx_restart(struct sk_buff *skb, struct rose_neigh *neigh, unsigne
break;
case ROSE_DIAGNOSTIC:
- printk(KERN_WARNING "ROSE: received diagnostic #%d - %02X %02X %02X\n", skb->data[3], skb->data[4], skb->data[5], skb->data[6]);
+ pr_warn("ROSE: received diagnostic #%d - %3ph\n", skb->data[3],
+ skb->data + 4);
break;
default:
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index db57458c824c..74c0fcd36838 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -37,7 +37,7 @@ void rxrpc_UDP_error_report(struct sock *sk)
_enter("%p{%d}", sk, local->debug_id);
- skb = skb_dequeue(&sk->sk_error_queue);
+ skb = sock_dequeue_err_skb(sk);
if (!skb) {
_leave("UDP socket errqueue empty");
return;
@@ -111,18 +111,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
skb_queue_tail(&trans->error_queue, skb);
rxrpc_queue_work(&trans->error_handler);
- /* reset and regenerate socket error */
- spin_lock_bh(&sk->sk_error_queue.lock);
- sk->sk_err = 0;
- skb = skb_peek(&sk->sk_error_queue);
- if (skb) {
- sk->sk_err = SKB_EXT_ERR(skb)->ee.ee_errno;
- spin_unlock_bh(&sk->sk_error_queue.lock);
- sk->sk_error_report(sk);
- } else {
- spin_unlock_bh(&sk->sk_error_queue.lock);
- }
-
_leave("");
}
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 63b21e580de9..481f89f93789 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -45,7 +45,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
struct rxrpc_skb_priv *sp;
struct rxrpc_sock *rx = call->socket;
struct sock *sk;
- int skb_len, ret;
+ int ret;
_enter(",,%d,%d", force, terminal);
@@ -101,13 +101,6 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
rx->interceptor(sk, call->user_call_ID, skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
} else {
-
- /* Cache the SKB length before we tack it onto the
- * receive queue. Once it is added it no longer
- * belongs to us and may be freed by other threads of
- * control pulling packets from the queue */
- skb_len = skb->len;
-
_net("post skb %p", skb);
__skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock_bh(&sk->sk_receive_queue.lock);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 648778aef1a2..3d43e4979f27 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -252,7 +252,8 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
if (est) {
- int err = gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
+ int err = gen_new_estimator(&p->tcfc_bstats, NULL,
+ &p->tcfc_rate_est,
&p->tcfc_lock, est);
if (err) {
kfree(p);
@@ -619,10 +620,12 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (err < 0)
goto errout;
- if (gnet_stats_copy_basic(&d, &p->tcfc_bstats) < 0 ||
+ if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
&p->tcfc_rate_est) < 0 ||
- gnet_stats_copy_queue(&d, &p->tcfc_qstats) < 0)
+ gnet_stats_copy_queue(&d, NULL,
+ &p->tcfc_qstats,
+ p->tcfc_qstats.qlen) < 0)
goto errout;
if (gnet_stats_finish_copy(&d) < 0)
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 0566e4606a4a..69791ca77a05 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -178,7 +178,7 @@ override:
spin_lock_bh(&police->tcf_lock);
if (est) {
- err = gen_replace_estimator(&police->tcf_bstats,
+ err = gen_replace_estimator(&police->tcf_bstats, NULL,
&police->tcf_rate_est,
&police->tcf_lock, est);
if (err)
@@ -231,7 +231,7 @@ override:
if (ret != ACT_P_CREATED)
return ret;
- police->tcfp_t_c = ktime_to_ns(ktime_get());
+ police->tcfp_t_c = ktime_get_ns();
police->tcf_index = parm->index ? parm->index :
tcf_hash_new_index(hinfo);
h = tcf_hash(police->tcf_index, POL_TAB_MASK);
@@ -279,7 +279,7 @@ static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
return police->tcfp_result;
}
- now = ktime_to_ns(ktime_get());
+ now = ktime_get_ns();
toks = min_t(s64, now - police->tcfp_t_c,
police->tcfp_burst);
if (police->peak_present) {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 4f4e08b0e2b7..aad6a679fb13 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -117,7 +117,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tca[TCA_MAX + 1];
- spinlock_t *root_lock;
struct tcmsg *t;
u32 protocol;
u32 prio;
@@ -125,7 +124,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
u32 parent;
struct net_device *dev;
struct Qdisc *q;
- struct tcf_proto **back, **chain;
+ struct tcf_proto __rcu **back;
+ struct tcf_proto __rcu **chain;
struct tcf_proto *tp;
const struct tcf_proto_ops *tp_ops;
const struct Qdisc_class_ops *cops;
@@ -197,7 +197,9 @@ replay:
goto errout;
/* Check the chain for existence of proto-tcf with this priority */
- for (back = chain; (tp = *back) != NULL; back = &tp->next) {
+ for (back = chain;
+ (tp = rtnl_dereference(*back)) != NULL;
+ back = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
if (!nprio ||
@@ -209,8 +211,6 @@ replay:
}
}
- root_lock = qdisc_root_sleeping_lock(q);
-
if (tp == NULL) {
/* Proto-tcf does not exist, create new one */
@@ -259,7 +259,8 @@ replay:
}
tp->ops = tp_ops;
tp->protocol = protocol;
- tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back));
+ tp->prio = nprio ? :
+ TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back)));
tp->q = q;
tp->classify = tp_ops->classify;
tp->classid = parent;
@@ -280,9 +281,9 @@ replay:
if (fh == 0) {
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
- spin_lock_bh(root_lock);
- *back = tp->next;
- spin_unlock_bh(root_lock);
+ struct tcf_proto *next = rtnl_dereference(tp->next);
+
+ RCU_INIT_POINTER(*back, next);
tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
tcf_destroy(tp);
@@ -322,10 +323,8 @@ replay:
n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
if (err == 0) {
if (tp_created) {
- spin_lock_bh(root_lock);
- tp->next = *back;
- *back = tp;
- spin_unlock_bh(root_lock);
+ RCU_INIT_POINTER(tp->next, rtnl_dereference(*back));
+ rcu_assign_pointer(*back, tp);
}
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
} else {
@@ -420,7 +419,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
int s_t;
struct net_device *dev;
struct Qdisc *q;
- struct tcf_proto *tp, **chain;
+ struct tcf_proto *tp, __rcu **chain;
struct tcmsg *tcm = nlmsg_data(cb->nlh);
unsigned long cl = 0;
const struct Qdisc_class_ops *cops;
@@ -454,7 +453,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
- for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
+ for (tp = rtnl_dereference(*chain), t = 0;
+ tp; tp = rtnl_dereference(tp->next), t++) {
if (t < s_t)
continue;
if (TC_H_MAJ(tcm->tcm_info) &&
@@ -496,7 +496,7 @@ out:
return skb->len;
}
-void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
+void tcf_exts_destroy(struct tcf_exts *exts)
{
#ifdef CONFIG_NET_CLS_ACT
tcf_action_destroy(&exts->actions, TCA_ACT_UNBIND);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 0ae1813e3e90..cd61280941e5 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -24,6 +24,7 @@
struct basic_head {
u32 hgenerator;
struct list_head flist;
+ struct rcu_head rcu;
};
struct basic_filter {
@@ -31,17 +32,19 @@ struct basic_filter {
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
struct tcf_result res;
+ struct tcf_proto *tp;
struct list_head link;
+ struct rcu_head rcu;
};
static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
int r;
- struct basic_head *head = tp->root;
+ struct basic_head *head = rcu_dereference_bh(tp->root);
struct basic_filter *f;
- list_for_each_entry(f, &head->flist, link) {
+ list_for_each_entry_rcu(f, &head->flist, link) {
if (!tcf_em_tree_match(skb, &f->ematches, NULL))
continue;
*res = f->res;
@@ -56,7 +59,7 @@ static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
{
unsigned long l = 0UL;
- struct basic_head *head = tp->root;
+ struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f;
if (head == NULL)
@@ -81,41 +84,43 @@ static int basic_init(struct tcf_proto *tp)
if (head == NULL)
return -ENOBUFS;
INIT_LIST_HEAD(&head->flist);
- tp->root = head;
+ rcu_assign_pointer(tp->root, head);
return 0;
}
-static void basic_delete_filter(struct tcf_proto *tp, struct basic_filter *f)
+static void basic_delete_filter(struct rcu_head *head)
{
- tcf_unbind_filter(tp, &f->res);
- tcf_exts_destroy(tp, &f->exts);
- tcf_em_tree_destroy(tp, &f->ematches);
+ struct basic_filter *f = container_of(head, struct basic_filter, rcu);
+
+ tcf_exts_destroy(&f->exts);
+ tcf_em_tree_destroy(&f->ematches);
kfree(f);
}
static void basic_destroy(struct tcf_proto *tp)
{
- struct basic_head *head = tp->root;
+ struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f, *n;
list_for_each_entry_safe(f, n, &head->flist, link) {
- list_del(&f->link);
- basic_delete_filter(tp, f);
+ list_del_rcu(&f->link);
+ tcf_unbind_filter(tp, &f->res);
+ call_rcu(&f->rcu, basic_delete_filter);
}
- kfree(head);
+ RCU_INIT_POINTER(tp->root, NULL);
+ kfree_rcu(head, rcu);
}
static int basic_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct basic_head *head = tp->root;
+ struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *t, *f = (struct basic_filter *) arg;
list_for_each_entry(t, &head->flist, link)
if (t == f) {
- tcf_tree_lock(tp);
- list_del(&t->link);
- tcf_tree_unlock(tp);
- basic_delete_filter(tp, t);
+ list_del_rcu(&t->link);
+ tcf_unbind_filter(tp, &t->res);
+ call_rcu(&t->rcu, basic_delete_filter);
return 0;
}
@@ -152,10 +157,11 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp,
tcf_exts_change(tp, &f->exts, &e);
tcf_em_tree_change(tp, &f->ematches, &t);
+ f->tp = tp;
return 0;
errout:
- tcf_exts_destroy(tp, &e);
+ tcf_exts_destroy(&e);
return err;
}
@@ -164,9 +170,10 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
struct nlattr **tca, unsigned long *arg, bool ovr)
{
int err;
- struct basic_head *head = tp->root;
+ struct basic_head *head = rtnl_dereference(tp->root);
struct nlattr *tb[TCA_BASIC_MAX + 1];
- struct basic_filter *f = (struct basic_filter *) *arg;
+ struct basic_filter *fold = (struct basic_filter *) *arg;
+ struct basic_filter *fnew;
if (tca[TCA_OPTIONS] == NULL)
return -EINVAL;
@@ -176,22 +183,23 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
return err;
- if (f != NULL) {
- if (handle && f->handle != handle)
+ if (fold != NULL) {
+ if (handle && fold->handle != handle)
return -EINVAL;
- return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
}
err = -ENOBUFS;
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (f == NULL)
+ fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
+ if (fnew == NULL)
goto errout;
- tcf_exts_init(&f->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE);
+ tcf_exts_init(&fnew->exts, TCA_BASIC_ACT, TCA_BASIC_POLICE);
err = -EINVAL;
- if (handle)
- f->handle = handle;
- else {
+ if (handle) {
+ fnew->handle = handle;
+ } else if (fold) {
+ fnew->handle = fold->handle;
+ } else {
unsigned int i = 0x80000000;
do {
if (++head->hgenerator == 0x7FFFFFFF)
@@ -203,29 +211,32 @@ static int basic_change(struct net *net, struct sk_buff *in_skb,
goto errout;
}
- f->handle = head->hgenerator;
+ fnew->handle = head->hgenerator;
}
- err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
+ err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr);
if (err < 0)
goto errout;
- tcf_tree_lock(tp);
- list_add(&f->link, &head->flist);
- tcf_tree_unlock(tp);
- *arg = (unsigned long) f;
+ *arg = (unsigned long)fnew;
+
+ if (fold) {
+ list_replace_rcu(&fold->link, &fnew->link);
+ tcf_unbind_filter(tp, &fold->res);
+ call_rcu(&fold->rcu, basic_delete_filter);
+ } else {
+ list_add_rcu(&fnew->link, &head->flist);
+ }
return 0;
errout:
- if (*arg == 0UL && f)
- kfree(f);
-
+ kfree(fnew);
return err;
}
static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct basic_head *head = tp->root;
+ struct basic_head *head = rtnl_dereference(tp->root);
struct basic_filter *f;
list_for_each_entry(f, &head->flist, link) {
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 0e30d58149da..eed49d1d0878 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -27,6 +27,7 @@ MODULE_DESCRIPTION("TC BPF based classifier");
struct cls_bpf_head {
struct list_head plist;
u32 hgen;
+ struct rcu_head rcu;
};
struct cls_bpf_prog {
@@ -37,6 +38,8 @@ struct cls_bpf_prog {
struct list_head link;
u32 handle;
u16 bpf_len;
+ struct tcf_proto *tp;
+ struct rcu_head rcu;
};
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@@ -49,11 +52,11 @@ static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
struct cls_bpf_prog *prog;
int ret;
- list_for_each_entry(prog, &head->plist, link) {
+ list_for_each_entry_rcu(prog, &head->plist, link) {
int filter_res = BPF_PROG_RUN(prog->filter, skb);
if (filter_res == 0)
@@ -81,16 +84,15 @@ static int cls_bpf_init(struct tcf_proto *tp)
if (head == NULL)
return -ENOBUFS;
- INIT_LIST_HEAD(&head->plist);
- tp->root = head;
+ INIT_LIST_HEAD_RCU(&head->plist);
+ rcu_assign_pointer(tp->root, head);
return 0;
}
static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
{
- tcf_unbind_filter(tp, &prog->res);
- tcf_exts_destroy(tp, &prog->exts);
+ tcf_exts_destroy(&prog->exts);
bpf_prog_destroy(prog->filter);
@@ -98,18 +100,23 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
kfree(prog);
}
+static void __cls_bpf_delete_prog(struct rcu_head *rcu)
+{
+ struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
+
+ cls_bpf_delete_prog(prog->tp, prog);
+}
+
static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
list_for_each_entry(prog, &head->plist, link) {
if (prog == todel) {
- tcf_tree_lock(tp);
- list_del(&prog->link);
- tcf_tree_unlock(tp);
-
- cls_bpf_delete_prog(tp, prog);
+ list_del_rcu(&prog->link);
+ tcf_unbind_filter(tp, &prog->res);
+ call_rcu(&prog->rcu, __cls_bpf_delete_prog);
return 0;
}
}
@@ -119,27 +126,29 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
static void cls_bpf_destroy(struct tcf_proto *tp)
{
- struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog, *tmp;
list_for_each_entry_safe(prog, tmp, &head->plist, link) {
- list_del(&prog->link);
- cls_bpf_delete_prog(tp, prog);
+ list_del_rcu(&prog->link);
+ tcf_unbind_filter(tp, &prog->res);
+ call_rcu(&prog->rcu, __cls_bpf_delete_prog);
}
- kfree(head);
+ RCU_INIT_POINTER(tp->root, NULL);
+ kfree_rcu(head, rcu);
}
static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
{
- struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog;
unsigned long ret = 0UL;
if (head == NULL)
return 0UL;
- list_for_each_entry(prog, &head->plist, link) {
+ list_for_each_entry_rcu(prog, &head->plist, link) {
if (prog->handle == handle) {
ret = (unsigned long) prog;
break;
@@ -158,10 +167,10 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr)
{
- struct sock_filter *bpf_ops, *bpf_old;
+ struct sock_filter *bpf_ops;
struct tcf_exts exts;
struct sock_fprog_kern tmp;
- struct bpf_prog *fp, *fp_old;
+ struct bpf_prog *fp;
u16 bpf_size, bpf_len;
u32 classid;
int ret;
@@ -197,30 +206,19 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
if (ret)
goto errout_free;
- tcf_tree_lock(tp);
- fp_old = prog->filter;
- bpf_old = prog->bpf_ops;
-
prog->bpf_len = bpf_len;
prog->bpf_ops = bpf_ops;
prog->filter = fp;
prog->res.classid = classid;
- tcf_tree_unlock(tp);
tcf_bind_filter(tp, &prog->res, base);
tcf_exts_change(tp, &prog->exts, &exts);
- if (fp_old)
- bpf_prog_destroy(fp_old);
- if (bpf_old)
- kfree(bpf_old);
-
return 0;
-
errout_free:
kfree(bpf_ops);
errout:
- tcf_exts_destroy(tp, &exts);
+ tcf_exts_destroy(&exts);
return ret;
}
@@ -244,9 +242,10 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
u32 handle, struct nlattr **tca,
unsigned long *arg, bool ovr)
{
- struct cls_bpf_head *head = tp->root;
- struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
+ struct cls_bpf_head *head = rtnl_dereference(tp->root);
+ struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
struct nlattr *tb[TCA_BPF_MAX + 1];
+ struct cls_bpf_prog *prog;
int ret;
if (tca[TCA_OPTIONS] == NULL)
@@ -256,18 +255,19 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (ret < 0)
return ret;
- if (prog != NULL) {
- if (handle && prog->handle != handle)
- return -EINVAL;
- return cls_bpf_modify_existing(net, tp, prog, base, tb,
- tca[TCA_RATE], ovr);
- }
-
prog = kzalloc(sizeof(*prog), GFP_KERNEL);
- if (prog == NULL)
+ if (!prog)
return -ENOBUFS;
tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
+
+ if (oldprog) {
+ if (handle && oldprog->handle != handle) {
+ ret = -EINVAL;
+ goto errout;
+ }
+ }
+
if (handle == 0)
prog->handle = cls_bpf_grab_new_handle(tp, head);
else
@@ -281,16 +281,18 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (ret < 0)
goto errout;
- tcf_tree_lock(tp);
- list_add(&prog->link, &head->plist);
- tcf_tree_unlock(tp);
+ if (oldprog) {
+ list_replace_rcu(&prog->link, &oldprog->link);
+ tcf_unbind_filter(tp, &oldprog->res);
+ call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
+ } else {
+ list_add_rcu(&prog->link, &head->plist);
+ }
*arg = (unsigned long) prog;
-
return 0;
errout:
- if (*arg == 0UL && prog)
- kfree(prog);
+ kfree(prog);
return ret;
}
@@ -339,10 +341,10 @@ nla_put_failure:
static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct cls_bpf_head *head = tp->root;
+ struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog;
- list_for_each_entry(prog, &head->plist, link) {
+ list_for_each_entry_rcu(prog, &head->plist, link) {
if (arg->count < arg->skip)
goto skip;
if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index cacf01bd04f0..d61a801222c1 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -22,17 +22,17 @@ struct cls_cgroup_head {
u32 handle;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
+ struct tcf_proto *tp;
+ struct rcu_head rcu;
};
static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- struct cls_cgroup_head *head = tp->root;
+ struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
u32 classid;
- rcu_read_lock();
classid = task_cls_state(current)->classid;
- rcu_read_unlock();
/*
* Due to the nature of the classifier it is required to ignore all
@@ -80,13 +80,25 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
+static void cls_cgroup_destroy_rcu(struct rcu_head *root)
+{
+ struct cls_cgroup_head *head = container_of(root,
+ struct cls_cgroup_head,
+ rcu);
+
+ tcf_exts_destroy(&head->exts);
+ tcf_em_tree_destroy(&head->ematches);
+ kfree(head);
+}
+
static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg, bool ovr)
{
struct nlattr *tb[TCA_CGROUP_MAX + 1];
- struct cls_cgroup_head *head = tp->root;
+ struct cls_cgroup_head *head = rtnl_dereference(tp->root);
+ struct cls_cgroup_head *new;
struct tcf_ematch_tree t;
struct tcf_exts e;
int err;
@@ -94,53 +106,58 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
if (!tca[TCA_OPTIONS])
return -EINVAL;
- if (head == NULL) {
- if (!handle)
- return -EINVAL;
-
- head = kzalloc(sizeof(*head), GFP_KERNEL);
- if (head == NULL)
- return -ENOBUFS;
+ if (!head && !handle)
+ return -EINVAL;
- tcf_exts_init(&head->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
- head->handle = handle;
+ if (head && handle != head->handle)
+ return -ENOENT;
- tcf_tree_lock(tp);
- tp->root = head;
- tcf_tree_unlock(tp);
- }
+ new = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!new)
+ return -ENOBUFS;
- if (handle != head->handle)
- return -ENOENT;
+ tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
+ if (head)
+ new->handle = head->handle;
+ else
+ new->handle = handle;
+ new->tp = tp;
err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
cgroup_policy);
if (err < 0)
- return err;
+ goto errout;
tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
if (err < 0)
- return err;
+ goto errout;
err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
- if (err < 0)
- return err;
+ if (err < 0) {
+ tcf_exts_destroy(&e);
+ goto errout;
+ }
- tcf_exts_change(tp, &head->exts, &e);
- tcf_em_tree_change(tp, &head->ematches, &t);
+ tcf_exts_change(tp, &new->exts, &e);
+ tcf_em_tree_change(tp, &new->ematches, &t);
+ rcu_assign_pointer(tp->root, new);
+ if (head)
+ call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
return 0;
+errout:
+ kfree(new);
+ return err;
}
static void cls_cgroup_destroy(struct tcf_proto *tp)
{
- struct cls_cgroup_head *head = tp->root;
+ struct cls_cgroup_head *head = rtnl_dereference(tp->root);
if (head) {
- tcf_exts_destroy(tp, &head->exts);
- tcf_em_tree_destroy(tp, &head->ematches);
- kfree(head);
+ RCU_INIT_POINTER(tp->root, NULL);
+ call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
}
}
@@ -151,7 +168,7 @@ static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct cls_cgroup_head *head = tp->root;
+ struct cls_cgroup_head *head = rtnl_dereference(tp->root);
if (arg->count < arg->skip)
goto skip;
@@ -167,7 +184,7 @@ skip:
static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct cls_cgroup_head *head = tp->root;
+ struct cls_cgroup_head *head = rtnl_dereference(tp->root);
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 35be16f7c192..4ac515f2a6ce 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -34,12 +34,14 @@
struct flow_head {
struct list_head filters;
+ struct rcu_head rcu;
};
struct flow_filter {
struct list_head list;
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
+ struct tcf_proto *tp;
struct timer_list perturb_timer;
u32 perturb_period;
u32 handle;
@@ -54,6 +56,7 @@ struct flow_filter {
u32 divisor;
u32 baseclass;
u32 hashrnd;
+ struct rcu_head rcu;
};
static inline u32 addr_fold(void *addr)
@@ -276,14 +279,14 @@ static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- struct flow_head *head = tp->root;
+ struct flow_head *head = rcu_dereference_bh(tp->root);
struct flow_filter *f;
u32 keymask;
u32 classid;
unsigned int n, key;
int r;
- list_for_each_entry(f, &head->filters, list) {
+ list_for_each_entry_rcu(f, &head->filters, list) {
u32 keys[FLOW_KEY_MAX + 1];
struct flow_keys flow_keys;
@@ -346,13 +349,23 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_PERTURB] = { .type = NLA_U32 },
};
+static void flow_destroy_filter(struct rcu_head *head)
+{
+ struct flow_filter *f = container_of(head, struct flow_filter, rcu);
+
+ del_timer_sync(&f->perturb_timer);
+ tcf_exts_destroy(&f->exts);
+ tcf_em_tree_destroy(&f->ematches);
+ kfree(f);
+}
+
static int flow_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg, bool ovr)
{
- struct flow_head *head = tp->root;
- struct flow_filter *f;
+ struct flow_head *head = rtnl_dereference(tp->root);
+ struct flow_filter *fold, *fnew;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FLOW_MAX + 1];
struct tcf_exts e;
@@ -401,20 +414,42 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
goto err1;
- f = (struct flow_filter *)*arg;
- if (f != NULL) {
+ err = -ENOBUFS;
+ fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
+ if (!fnew)
+ goto err2;
+
+ fold = (struct flow_filter *)*arg;
+ if (fold) {
err = -EINVAL;
- if (f->handle != handle && handle)
+ if (fold->handle != handle && handle)
goto err2;
- mode = f->mode;
+ /* Copy fold into fnew */
+ fnew->handle = fold->handle;
+ fnew->keymask = fold->keymask;
+ fnew->tp = fold->tp;
+
+ fnew->handle = fold->handle;
+ fnew->nkeys = fold->nkeys;
+ fnew->keymask = fold->keymask;
+ fnew->mode = fold->mode;
+ fnew->mask = fold->mask;
+ fnew->xor = fold->xor;
+ fnew->rshift = fold->rshift;
+ fnew->addend = fold->addend;
+ fnew->divisor = fold->divisor;
+ fnew->baseclass = fold->baseclass;
+ fnew->hashrnd = fold->hashrnd;
+
+ mode = fold->mode;
if (tb[TCA_FLOW_MODE])
mode = nla_get_u32(tb[TCA_FLOW_MODE]);
if (mode != FLOW_MODE_HASH && nkeys > 1)
goto err2;
if (mode == FLOW_MODE_HASH)
- perturb_period = f->perturb_period;
+ perturb_period = fold->perturb_period;
if (tb[TCA_FLOW_PERTURB]) {
if (mode != FLOW_MODE_HASH)
goto err2;
@@ -444,83 +479,72 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (TC_H_MIN(baseclass) == 0)
baseclass = TC_H_MAKE(baseclass, 1);
- err = -ENOBUFS;
- f = kzalloc(sizeof(*f), GFP_KERNEL);
- if (f == NULL)
- goto err2;
-
- f->handle = handle;
- f->mask = ~0U;
- tcf_exts_init(&f->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
-
- get_random_bytes(&f->hashrnd, 4);
- f->perturb_timer.function = flow_perturbation;
- f->perturb_timer.data = (unsigned long)f;
- init_timer_deferrable(&f->perturb_timer);
+ fnew->handle = handle;
+ fnew->mask = ~0U;
+ fnew->tp = tp;
+ get_random_bytes(&fnew->hashrnd, 4);
+ tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
}
- tcf_exts_change(tp, &f->exts, &e);
- tcf_em_tree_change(tp, &f->ematches, &t);
+ fnew->perturb_timer.function = flow_perturbation;
+ fnew->perturb_timer.data = (unsigned long)fnew;
+ init_timer_deferrable(&fnew->perturb_timer);
- tcf_tree_lock(tp);
+ tcf_exts_change(tp, &fnew->exts, &e);
+ tcf_em_tree_change(tp, &fnew->ematches, &t);
+
+ netif_keep_dst(qdisc_dev(tp->q));
if (tb[TCA_FLOW_KEYS]) {
- f->keymask = keymask;
- f->nkeys = nkeys;
+ fnew->keymask = keymask;
+ fnew->nkeys = nkeys;
}
- f->mode = mode;
+ fnew->mode = mode;
if (tb[TCA_FLOW_MASK])
- f->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
+ fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
if (tb[TCA_FLOW_XOR])
- f->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
+ fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
if (tb[TCA_FLOW_RSHIFT])
- f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
+ fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
if (tb[TCA_FLOW_ADDEND])
- f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
+ fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
if (tb[TCA_FLOW_DIVISOR])
- f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
+ fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
if (baseclass)
- f->baseclass = baseclass;
+ fnew->baseclass = baseclass;
- f->perturb_period = perturb_period;
- del_timer(&f->perturb_timer);
+ fnew->perturb_period = perturb_period;
if (perturb_period)
- mod_timer(&f->perturb_timer, jiffies + perturb_period);
+ mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
if (*arg == 0)
- list_add_tail(&f->list, &head->filters);
+ list_add_tail_rcu(&fnew->list, &head->filters);
+ else
+ list_replace_rcu(&fnew->list, &fold->list);
- tcf_tree_unlock(tp);
+ *arg = (unsigned long)fnew;
- *arg = (unsigned long)f;
+ if (fold)
+ call_rcu(&fold->rcu, flow_destroy_filter);
return 0;
err2:
- tcf_em_tree_destroy(tp, &t);
+ tcf_em_tree_destroy(&t);
+ kfree(fnew);
err1:
- tcf_exts_destroy(tp, &e);
+ tcf_exts_destroy(&e);
return err;
}
-static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
-{
- del_timer_sync(&f->perturb_timer);
- tcf_exts_destroy(tp, &f->exts);
- tcf_em_tree_destroy(tp, &f->ematches);
- kfree(f);
-}
-
static int flow_delete(struct tcf_proto *tp, unsigned long arg)
{
struct flow_filter *f = (struct flow_filter *)arg;
- tcf_tree_lock(tp);
- list_del(&f->list);
- tcf_tree_unlock(tp);
- flow_destroy_filter(tp, f);
+ list_del_rcu(&f->list);
+ call_rcu(&f->rcu, flow_destroy_filter);
return 0;
}
@@ -532,28 +556,29 @@ static int flow_init(struct tcf_proto *tp)
if (head == NULL)
return -ENOBUFS;
INIT_LIST_HEAD(&head->filters);
- tp->root = head;
+ rcu_assign_pointer(tp->root, head);
return 0;
}
static void flow_destroy(struct tcf_proto *tp)
{
- struct flow_head *head = tp->root;
+ struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f, *next;
list_for_each_entry_safe(f, next, &head->filters, list) {
- list_del(&f->list);
- flow_destroy_filter(tp, f);
+ list_del_rcu(&f->list);
+ call_rcu(&f->rcu, flow_destroy_filter);
}
- kfree(head);
+ RCU_INIT_POINTER(tp->root, NULL);
+ kfree_rcu(head, rcu);
}
static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
{
- struct flow_head *head = tp->root;
+ struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f;
- list_for_each_entry(f, &head->filters, list)
+ list_for_each_entry_rcu(f, &head->filters, list)
if (f->handle == handle)
return (unsigned long)f;
return 0;
@@ -626,10 +651,10 @@ nla_put_failure:
static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct flow_head *head = tp->root;
+ struct flow_head *head = rtnl_dereference(tp->root);
struct flow_filter *f;
- list_for_each_entry(f, &head->filters, list) {
+ list_for_each_entry_rcu(f, &head->filters, list) {
if (arg->count < arg->skip)
goto skip;
if (arg->fn(tp, (unsigned long)f, arg) < 0) {
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 861b03ccfed0..dbfdfd1f1a9f 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -33,17 +33,20 @@
struct fw_head {
u32 mask;
- struct fw_filter *ht[HTSIZE];
+ struct fw_filter __rcu *ht[HTSIZE];
+ struct rcu_head rcu;
};
struct fw_filter {
- struct fw_filter *next;
+ struct fw_filter __rcu *next;
u32 id;
struct tcf_result res;
#ifdef CONFIG_NET_CLS_IND
int ifindex;
#endif /* CONFIG_NET_CLS_IND */
struct tcf_exts exts;
+ struct tcf_proto *tp;
+ struct rcu_head rcu;
};
static u32 fw_hash(u32 handle)
@@ -56,14 +59,16 @@ static u32 fw_hash(u32 handle)
static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- struct fw_head *head = tp->root;
+ struct fw_head *head = rcu_dereference_bh(tp->root);
struct fw_filter *f;
int r;
u32 id = skb->mark;
if (head != NULL) {
id &= head->mask;
- for (f = head->ht[fw_hash(id)]; f; f = f->next) {
+
+ for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f;
+ f = rcu_dereference_bh(f->next)) {
if (f->id == id) {
*res = f->res;
#ifdef CONFIG_NET_CLS_IND
@@ -92,13 +97,14 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
{
- struct fw_head *head = tp->root;
+ struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f;
if (head == NULL)
return 0;
- for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
+ f = rtnl_dereference(head->ht[fw_hash(handle)]);
+ for (; f; f = rtnl_dereference(f->next)) {
if (f->id == handle)
return (unsigned long)f;
}
@@ -114,16 +120,17 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
-static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
+static void fw_delete_filter(struct rcu_head *head)
{
- tcf_unbind_filter(tp, &f->res);
- tcf_exts_destroy(tp, &f->exts);
+ struct fw_filter *f = container_of(head, struct fw_filter, rcu);
+
+ tcf_exts_destroy(&f->exts);
kfree(f);
}
static void fw_destroy(struct tcf_proto *tp)
{
- struct fw_head *head = tp->root;
+ struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f;
int h;
@@ -131,29 +138,35 @@ static void fw_destroy(struct tcf_proto *tp)
return;
for (h = 0; h < HTSIZE; h++) {
- while ((f = head->ht[h]) != NULL) {
- head->ht[h] = f->next;
- fw_delete_filter(tp, f);
+ while ((f = rtnl_dereference(head->ht[h])) != NULL) {
+ RCU_INIT_POINTER(head->ht[h],
+ rtnl_dereference(f->next));
+ tcf_unbind_filter(tp, &f->res);
+ call_rcu(&f->rcu, fw_delete_filter);
}
}
- kfree(head);
+ RCU_INIT_POINTER(tp->root, NULL);
+ kfree_rcu(head, rcu);
}
static int fw_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct fw_head *head = tp->root;
+ struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = (struct fw_filter *)arg;
- struct fw_filter **fp;
+ struct fw_filter __rcu **fp;
+ struct fw_filter *pfp;
if (head == NULL || f == NULL)
goto out;
- for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
- if (*fp == f) {
- tcf_tree_lock(tp);
- *fp = f->next;
- tcf_tree_unlock(tp);
- fw_delete_filter(tp, f);
+ fp = &head->ht[fw_hash(f->id)];
+
+ for (pfp = rtnl_dereference(*fp); pfp;
+ fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
+ if (pfp == f) {
+ RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
+ tcf_unbind_filter(tp, &f->res);
+ call_rcu(&f->rcu, fw_delete_filter);
return 0;
}
}
@@ -171,7 +184,7 @@ static int
fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
struct nlattr **tb, struct nlattr **tca, unsigned long base, bool ovr)
{
- struct fw_head *head = tp->root;
+ struct fw_head *head = rtnl_dereference(tp->root);
struct tcf_exts e;
u32 mask;
int err;
@@ -210,7 +223,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
return 0;
errout:
- tcf_exts_destroy(tp, &e);
+ tcf_exts_destroy(&e);
return err;
}
@@ -220,7 +233,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
struct nlattr **tca,
unsigned long *arg, bool ovr)
{
- struct fw_head *head = tp->root;
+ struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = (struct fw_filter *) *arg;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FW_MAX + 1];
@@ -233,10 +246,45 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
return err;
- if (f != NULL) {
+ if (f) {
+ struct fw_filter *pfp, *fnew;
+ struct fw_filter __rcu **fp;
+
if (f->id != handle && handle)
return -EINVAL;
- return fw_change_attrs(net, tp, f, tb, tca, base, ovr);
+
+ fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
+ if (!fnew)
+ return -ENOBUFS;
+
+ fnew->id = f->id;
+ fnew->res = f->res;
+#ifdef CONFIG_NET_CLS_IND
+ fnew->ifindex = f->ifindex;
+#endif /* CONFIG_NET_CLS_IND */
+ fnew->tp = f->tp;
+
+ tcf_exts_init(&fnew->exts, TCA_FW_ACT, TCA_FW_POLICE);
+
+ err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr);
+ if (err < 0) {
+ kfree(fnew);
+ return err;
+ }
+
+ fp = &head->ht[fw_hash(fnew->id)];
+ for (pfp = rtnl_dereference(*fp); pfp;
+ fp = &pfp->next, pfp = rtnl_dereference(*fp))
+ if (pfp == f)
+ break;
+
+ RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next));
+ rcu_assign_pointer(*fp, fnew);
+ tcf_unbind_filter(tp, &f->res);
+ call_rcu(&f->rcu, fw_delete_filter);
+
+ *arg = (unsigned long)fnew;
+ return err;
}
if (!handle)
@@ -252,9 +300,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
return -ENOBUFS;
head->mask = mask;
- tcf_tree_lock(tp);
- tp->root = head;
- tcf_tree_unlock(tp);
+ rcu_assign_pointer(tp->root, head);
}
f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
@@ -263,15 +309,14 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
tcf_exts_init(&f->exts, TCA_FW_ACT, TCA_FW_POLICE);
f->id = handle;
+ f->tp = tp;
err = fw_change_attrs(net, tp, f, tb, tca, base, ovr);
if (err < 0)
goto errout;
- f->next = head->ht[fw_hash(handle)];
- tcf_tree_lock(tp);
- head->ht[fw_hash(handle)] = f;
- tcf_tree_unlock(tp);
+ RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]);
+ rcu_assign_pointer(head->ht[fw_hash(handle)], f);
*arg = (unsigned long)f;
return 0;
@@ -283,7 +328,7 @@ errout:
static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct fw_head *head = tp->root;
+ struct fw_head *head = rtnl_dereference(tp->root);
int h;
if (head == NULL)
@@ -295,7 +340,8 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
for (h = 0; h < HTSIZE; h++) {
struct fw_filter *f;
- for (f = head->ht[h]; f; f = f->next) {
+ for (f = rtnl_dereference(head->ht[h]); f;
+ f = rtnl_dereference(f->next)) {
if (arg->count < arg->skip) {
arg->count++;
continue;
@@ -312,7 +358,7 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct fw_head *head = tp->root;
+ struct fw_head *head = rtnl_dereference(tp->root);
struct fw_filter *f = (struct fw_filter *)fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index dd9fc2523c76..109a329b7198 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -29,25 +29,26 @@
* are mutually exclusive.
* 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
*/
-
struct route4_fastmap {
- struct route4_filter *filter;
- u32 id;
- int iif;
+ struct route4_filter *filter;
+ u32 id;
+ int iif;
};
struct route4_head {
- struct route4_fastmap fastmap[16];
- struct route4_bucket *table[256 + 1];
+ struct route4_fastmap fastmap[16];
+ struct route4_bucket __rcu *table[256 + 1];
+ struct rcu_head rcu;
};
struct route4_bucket {
/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
- struct route4_filter *ht[16 + 16 + 1];
+ struct route4_filter __rcu *ht[16 + 16 + 1];
+ struct rcu_head rcu;
};
struct route4_filter {
- struct route4_filter *next;
+ struct route4_filter __rcu *next;
u32 id;
int iif;
@@ -55,6 +56,8 @@ struct route4_filter {
struct tcf_exts exts;
u32 handle;
struct route4_bucket *bkt;
+ struct tcf_proto *tp;
+ struct rcu_head rcu;
};
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
@@ -64,14 +67,13 @@ static inline int route4_fastmap_hash(u32 id, int iif)
return id & 0xF;
}
+static DEFINE_SPINLOCK(fastmap_lock);
static void
-route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
+route4_reset_fastmap(struct route4_head *head)
{
- spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
-
- spin_lock_bh(root_lock);
+ spin_lock_bh(&fastmap_lock);
memset(head->fastmap, 0, sizeof(head->fastmap));
- spin_unlock_bh(root_lock);
+ spin_unlock_bh(&fastmap_lock);
}
static void
@@ -80,9 +82,12 @@ route4_set_fastmap(struct route4_head *head, u32 id, int iif,
{
int h = route4_fastmap_hash(id, iif);
+ /* fastmap updates must look atomic to aling id, iff, filter */
+ spin_lock_bh(&fastmap_lock);
head->fastmap[h].id = id;
head->fastmap[h].iif = iif;
head->fastmap[h].filter = f;
+ spin_unlock_bh(&fastmap_lock);
}
static inline int route4_hash_to(u32 id)
@@ -123,7 +128,7 @@ static inline int route4_hash_wild(void)
static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- struct route4_head *head = tp->root;
+ struct route4_head *head = rcu_dereference_bh(tp->root);
struct dst_entry *dst;
struct route4_bucket *b;
struct route4_filter *f;
@@ -141,32 +146,43 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
iif = inet_iif(skb);
h = route4_fastmap_hash(id, iif);
+
+ spin_lock(&fastmap_lock);
if (id == head->fastmap[h].id &&
iif == head->fastmap[h].iif &&
(f = head->fastmap[h].filter) != NULL) {
- if (f == ROUTE4_FAILURE)
+ if (f == ROUTE4_FAILURE) {
+ spin_unlock(&fastmap_lock);
goto failure;
+ }
*res = f->res;
+ spin_unlock(&fastmap_lock);
return 0;
}
+ spin_unlock(&fastmap_lock);
h = route4_hash_to(id);
restart:
- b = head->table[h];
+ b = rcu_dereference_bh(head->table[h]);
if (b) {
- for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
+ for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
+ f;
+ f = rcu_dereference_bh(f->next))
if (f->id == id)
ROUTE4_APPLY_RESULT();
- for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
+ for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
+ f;
+ f = rcu_dereference_bh(f->next))
if (f->iif == iif)
ROUTE4_APPLY_RESULT();
- for (f = b->ht[route4_hash_wild()]; f; f = f->next)
+ for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
+ f;
+ f = rcu_dereference_bh(f->next))
ROUTE4_APPLY_RESULT();
-
}
if (h < 256) {
h = 256;
@@ -213,7 +229,7 @@ static inline u32 from_hash(u32 id)
static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
{
- struct route4_head *head = tp->root;
+ struct route4_head *head = rtnl_dereference(tp->root);
struct route4_bucket *b;
struct route4_filter *f;
unsigned int h1, h2;
@@ -229,9 +245,11 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
if (h2 > 32)
return 0;
- b = head->table[h1];
+ b = rtnl_dereference(head->table[h1]);
if (b) {
- for (f = b->ht[h2]; f; f = f->next)
+ for (f = rtnl_dereference(b->ht[h2]);
+ f;
+ f = rtnl_dereference(f->next))
if (f->handle == handle)
return (unsigned long)f;
}
@@ -248,16 +266,17 @@ static int route4_init(struct tcf_proto *tp)
}
static void
-route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
+route4_delete_filter(struct rcu_head *head)
{
- tcf_unbind_filter(tp, &f->res);
- tcf_exts_destroy(tp, &f->exts);
+ struct route4_filter *f = container_of(head, struct route4_filter, rcu);
+
+ tcf_exts_destroy(&f->exts);
kfree(f);
}
static void route4_destroy(struct tcf_proto *tp)
{
- struct route4_head *head = tp->root;
+ struct route4_head *head = rtnl_dereference(tp->root);
int h1, h2;
if (head == NULL)
@@ -266,28 +285,36 @@ static void route4_destroy(struct tcf_proto *tp)
for (h1 = 0; h1 <= 256; h1++) {
struct route4_bucket *b;
- b = head->table[h1];
+ b = rtnl_dereference(head->table[h1]);
if (b) {
for (h2 = 0; h2 <= 32; h2++) {
struct route4_filter *f;
- while ((f = b->ht[h2]) != NULL) {
- b->ht[h2] = f->next;
- route4_delete_filter(tp, f);
+ while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
+ struct route4_filter *next;
+
+ next = rtnl_dereference(f->next);
+ RCU_INIT_POINTER(b->ht[h2], next);
+ tcf_unbind_filter(tp, &f->res);
+ call_rcu(&f->rcu, route4_delete_filter);
}
}
- kfree(b);
+ RCU_INIT_POINTER(head->table[h1], NULL);
+ kfree_rcu(b, rcu);
}
}
- kfree(head);
+ RCU_INIT_POINTER(tp->root, NULL);
+ kfree_rcu(head, rcu);
}
static int route4_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct route4_head *head = tp->root;
- struct route4_filter **fp, *f = (struct route4_filter *)arg;
- unsigned int h = 0;
+ struct route4_head *head = rtnl_dereference(tp->root);
+ struct route4_filter *f = (struct route4_filter *)arg;
+ struct route4_filter __rcu **fp;
+ struct route4_filter *nf;
struct route4_bucket *b;
+ unsigned int h = 0;
int i;
if (!head || !f)
@@ -296,27 +323,36 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
h = f->handle;
b = f->bkt;
- for (fp = &b->ht[from_hash(h >> 16)]; *fp; fp = &(*fp)->next) {
- if (*fp == f) {
- tcf_tree_lock(tp);
- *fp = f->next;
- tcf_tree_unlock(tp);
-
- route4_reset_fastmap(tp->q, head, f->id);
- route4_delete_filter(tp, f);
-
- /* Strip tree */
-
- for (i = 0; i <= 32; i++)
- if (b->ht[i])
+ fp = &b->ht[from_hash(h >> 16)];
+ for (nf = rtnl_dereference(*fp); nf;
+ fp = &nf->next, nf = rtnl_dereference(*fp)) {
+ if (nf == f) {
+ /* unlink it */
+ RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
+
+ /* Remove any fastmap lookups that might ref filter
+ * notice we unlink'd the filter so we can't get it
+ * back in the fastmap.
+ */
+ route4_reset_fastmap(head);
+
+ /* Delete it */
+ tcf_unbind_filter(tp, &f->res);
+ call_rcu(&f->rcu, route4_delete_filter);
+
+ /* Strip RTNL protected tree */
+ for (i = 0; i <= 32; i++) {
+ struct route4_filter *rt;
+
+ rt = rtnl_dereference(b->ht[i]);
+ if (rt)
return 0;
+ }
/* OK, session has no flows */
- tcf_tree_lock(tp);
- head->table[to_hash(h)] = NULL;
- tcf_tree_unlock(tp);
+ RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
+ kfree_rcu(b, rcu);
- kfree(b);
return 0;
}
}
@@ -380,26 +416,25 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
}
h1 = to_hash(nhandle);
- b = head->table[h1];
+ b = rtnl_dereference(head->table[h1]);
if (!b) {
err = -ENOBUFS;
b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
if (b == NULL)
goto errout;
- tcf_tree_lock(tp);
- head->table[h1] = b;
- tcf_tree_unlock(tp);
+ rcu_assign_pointer(head->table[h1], b);
} else {
unsigned int h2 = from_hash(nhandle >> 16);
err = -EEXIST;
- for (fp = b->ht[h2]; fp; fp = fp->next)
+ for (fp = rtnl_dereference(b->ht[h2]);
+ fp;
+ fp = rtnl_dereference(fp->next))
if (fp->handle == f->handle)
goto errout;
}
- tcf_tree_lock(tp);
if (tb[TCA_ROUTE4_TO])
f->id = to;
@@ -410,7 +445,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
f->handle = nhandle;
f->bkt = b;
- tcf_tree_unlock(tp);
+ f->tp = tp;
if (tb[TCA_ROUTE4_CLASSID]) {
f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
@@ -421,7 +456,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp,
return 0;
errout:
- tcf_exts_destroy(tp, &e);
+ tcf_exts_destroy(&e);
return err;
}
@@ -431,14 +466,15 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
struct nlattr **tca,
unsigned long *arg, bool ovr)
{
- struct route4_head *head = tp->root;
- struct route4_filter *f, *f1, **fp;
+ struct route4_head *head = rtnl_dereference(tp->root);
+ struct route4_filter __rcu **fp;
+ struct route4_filter *fold, *f1, *pfp, *f = NULL;
struct route4_bucket *b;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_ROUTE4_MAX + 1];
unsigned int h, th;
- u32 old_handle = 0;
int err;
+ bool new = true;
if (opt == NULL)
return handle ? -EINVAL : 0;
@@ -447,70 +483,73 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
if (err < 0)
return err;
- f = (struct route4_filter *)*arg;
- if (f) {
- if (f->handle != handle && handle)
+ fold = (struct route4_filter *)*arg;
+ if (fold && handle && fold->handle != handle)
return -EINVAL;
- if (f->bkt)
- old_handle = f->handle;
-
- err = route4_set_parms(net, tp, base, f, handle, head, tb,
- tca[TCA_RATE], 0, ovr);
- if (err < 0)
- return err;
-
- goto reinsert;
- }
-
err = -ENOBUFS;
if (head == NULL) {
head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
if (head == NULL)
goto errout;
-
- tcf_tree_lock(tp);
- tp->root = head;
- tcf_tree_unlock(tp);
+ rcu_assign_pointer(tp->root, head);
}
f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
- if (f == NULL)
+ if (!f)
goto errout;
tcf_exts_init(&f->exts, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
+ if (fold) {
+ f->id = fold->id;
+ f->iif = fold->iif;
+ f->res = fold->res;
+ f->handle = fold->handle;
+
+ f->tp = fold->tp;
+ f->bkt = fold->bkt;
+ new = false;
+ }
+
err = route4_set_parms(net, tp, base, f, handle, head, tb,
- tca[TCA_RATE], 1, ovr);
+ tca[TCA_RATE], new, ovr);
if (err < 0)
goto errout;
-reinsert:
h = from_hash(f->handle >> 16);
- for (fp = &f->bkt->ht[h]; (f1 = *fp) != NULL; fp = &f1->next)
+ fp = &f->bkt->ht[h];
+ for (pfp = rtnl_dereference(*fp);
+ (f1 = rtnl_dereference(*fp)) != NULL;
+ fp = &f1->next)
if (f->handle < f1->handle)
break;
- f->next = f1;
- tcf_tree_lock(tp);
- *fp = f;
+ netif_keep_dst(qdisc_dev(tp->q));
+ rcu_assign_pointer(f->next, f1);
+ rcu_assign_pointer(*fp, f);
- if (old_handle && f->handle != old_handle) {
- th = to_hash(old_handle);
- h = from_hash(old_handle >> 16);
- b = head->table[th];
+ if (fold && fold->handle && f->handle != fold->handle) {
+ th = to_hash(fold->handle);
+ h = from_hash(fold->handle >> 16);
+ b = rtnl_dereference(head->table[th]);
if (b) {
- for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
- if (*fp == f) {
+ fp = &b->ht[h];
+ for (pfp = rtnl_dereference(*fp); pfp;
+ fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
+ if (pfp == f) {
*fp = f->next;
break;
}
}
}
}
- tcf_tree_unlock(tp);
- route4_reset_fastmap(tp->q, head, f->id);
+ route4_reset_fastmap(head);
*arg = (unsigned long)f;
+ if (fold) {
+ tcf_unbind_filter(tp, &fold->res);
+ call_rcu(&fold->rcu, route4_delete_filter);
+ }
return 0;
errout:
@@ -520,7 +559,7 @@ errout:
static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct route4_head *head = tp->root;
+ struct route4_head *head = rtnl_dereference(tp->root);
unsigned int h, h1;
if (head == NULL)
@@ -530,13 +569,15 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
return;
for (h = 0; h <= 256; h++) {
- struct route4_bucket *b = head->table[h];
+ struct route4_bucket *b = rtnl_dereference(head->table[h]);
if (b) {
for (h1 = 0; h1 <= 32; h1++) {
struct route4_filter *f;
- for (f = b->ht[h1]; f; f = f->next) {
+ for (f = rtnl_dereference(b->ht[h1]);
+ f;
+ f = rtnl_dereference(f->next)) {
if (arg->count < arg->skip) {
arg->count++;
continue;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 1020e233a5d6..6bb55f277a5a 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -70,31 +70,34 @@ struct rsvp_head {
u32 tmap[256/32];
u32 hgenerator;
u8 tgenerator;
- struct rsvp_session *ht[256];
+ struct rsvp_session __rcu *ht[256];
+ struct rcu_head rcu;
};
struct rsvp_session {
- struct rsvp_session *next;
- __be32 dst[RSVP_DST_LEN];
- struct tc_rsvp_gpi dpi;
- u8 protocol;
- u8 tunnelid;
+ struct rsvp_session __rcu *next;
+ __be32 dst[RSVP_DST_LEN];
+ struct tc_rsvp_gpi dpi;
+ u8 protocol;
+ u8 tunnelid;
/* 16 (src,sport) hash slots, and one wildcard source slot */
- struct rsvp_filter *ht[16 + 1];
+ struct rsvp_filter __rcu *ht[16 + 1];
+ struct rcu_head rcu;
};
struct rsvp_filter {
- struct rsvp_filter *next;
- __be32 src[RSVP_DST_LEN];
- struct tc_rsvp_gpi spi;
- u8 tunnelhdr;
+ struct rsvp_filter __rcu *next;
+ __be32 src[RSVP_DST_LEN];
+ struct tc_rsvp_gpi spi;
+ u8 tunnelhdr;
- struct tcf_result res;
- struct tcf_exts exts;
+ struct tcf_result res;
+ struct tcf_exts exts;
- u32 handle;
- struct rsvp_session *sess;
+ u32 handle;
+ struct rsvp_session *sess;
+ struct rcu_head rcu;
};
static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
@@ -128,7 +131,7 @@ static inline unsigned int hash_src(__be32 *src)
static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
+ struct rsvp_head *head = rcu_dereference_bh(tp->root);
struct rsvp_session *s;
struct rsvp_filter *f;
unsigned int h1, h2;
@@ -169,7 +172,8 @@ restart:
h1 = hash_dst(dst, protocol, tunnelid);
h2 = hash_src(src);
- for (s = sht[h1]; s; s = s->next) {
+ for (s = rcu_dereference_bh(head->ht[h1]); s;
+ s = rcu_dereference_bh(s->next)) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN - 1] &&
protocol == s->protocol &&
!(s->dpi.mask &
@@ -181,7 +185,8 @@ restart:
#endif
tunnelid == s->tunnelid) {
- for (f = s->ht[h2]; f; f = f->next) {
+ for (f = rcu_dereference_bh(s->ht[h2]); f;
+ f = rcu_dereference_bh(f->next)) {
if (src[RSVP_DST_LEN-1] == f->src[RSVP_DST_LEN - 1] &&
!(f->spi.mask & (*(u32 *)(xprt + f->spi.offset) ^ f->spi.key))
#if RSVP_DST_LEN == 4
@@ -205,7 +210,8 @@ matched:
}
/* And wildcard bucket... */
- for (f = s->ht[16]; f; f = f->next) {
+ for (f = rcu_dereference_bh(s->ht[16]); f;
+ f = rcu_dereference_bh(f->next)) {
*res = f->res;
RSVP_APPLY_RESULT();
goto matched;
@@ -216,9 +222,36 @@ matched:
return -1;
}
+static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h)
+{
+ struct rsvp_head *head = rtnl_dereference(tp->root);
+ struct rsvp_session *s;
+ struct rsvp_filter __rcu **ins;
+ struct rsvp_filter *pins;
+ unsigned int h1 = h & 0xFF;
+ unsigned int h2 = (h >> 8) & 0xFF;
+
+ for (s = rtnl_dereference(head->ht[h1]); s;
+ s = rtnl_dereference(s->next)) {
+ for (ins = &s->ht[h2], pins = rtnl_dereference(*ins); ;
+ ins = &pins->next, pins = rtnl_dereference(*ins)) {
+ if (pins->handle == h) {
+ RCU_INIT_POINTER(n->next, pins->next);
+ rcu_assign_pointer(*ins, n);
+ return;
+ }
+ }
+ }
+
+ /* Something went wrong if we are trying to replace a non-existant
+ * node. Mind as well halt instead of silently failing.
+ */
+ BUG_ON(1);
+}
+
static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
{
- struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
+ struct rsvp_head *head = rtnl_dereference(tp->root);
struct rsvp_session *s;
struct rsvp_filter *f;
unsigned int h1 = handle & 0xFF;
@@ -227,8 +260,10 @@ static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle)
if (h2 > 16)
return 0;
- for (s = sht[h1]; s; s = s->next) {
- for (f = s->ht[h2]; f; f = f->next) {
+ for (s = rtnl_dereference(head->ht[h1]); s;
+ s = rtnl_dereference(s->next)) {
+ for (f = rtnl_dereference(s->ht[h2]); f;
+ f = rtnl_dereference(f->next)) {
if (f->handle == handle)
return (unsigned long)f;
}
@@ -246,7 +281,7 @@ static int rsvp_init(struct tcf_proto *tp)
data = kzalloc(sizeof(struct rsvp_head), GFP_KERNEL);
if (data) {
- tp->root = data;
+ rcu_assign_pointer(tp->root, data);
return 0;
}
return -ENOBUFS;
@@ -256,54 +291,55 @@ static void
rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
{
tcf_unbind_filter(tp, &f->res);
- tcf_exts_destroy(tp, &f->exts);
- kfree(f);
+ tcf_exts_destroy(&f->exts);
+ kfree_rcu(f, rcu);
}
static void rsvp_destroy(struct tcf_proto *tp)
{
- struct rsvp_head *data = xchg(&tp->root, NULL);
- struct rsvp_session **sht;
+ struct rsvp_head *data = rtnl_dereference(tp->root);
int h1, h2;
if (data == NULL)
return;
- sht = data->ht;
+ RCU_INIT_POINTER(tp->root, NULL);
for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
- while ((s = sht[h1]) != NULL) {
- sht[h1] = s->next;
+ while ((s = rtnl_dereference(data->ht[h1])) != NULL) {
+ RCU_INIT_POINTER(data->ht[h1], s->next);
for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
- while ((f = s->ht[h2]) != NULL) {
- s->ht[h2] = f->next;
+ while ((f = rtnl_dereference(s->ht[h2])) != NULL) {
+ rcu_assign_pointer(s->ht[h2], f->next);
rsvp_delete_filter(tp, f);
}
}
- kfree(s);
+ kfree_rcu(s, rcu);
}
}
- kfree(data);
+ kfree_rcu(data, rcu);
}
static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct rsvp_filter **fp, *f = (struct rsvp_filter *)arg;
+ struct rsvp_head *head = rtnl_dereference(tp->root);
+ struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg;
+ struct rsvp_filter __rcu **fp;
unsigned int h = f->handle;
- struct rsvp_session **sp;
- struct rsvp_session *s = f->sess;
+ struct rsvp_session __rcu **sp;
+ struct rsvp_session *nsp, *s = f->sess;
int i;
- for (fp = &s->ht[(h >> 8) & 0xFF]; *fp; fp = &(*fp)->next) {
- if (*fp == f) {
- tcf_tree_lock(tp);
- *fp = f->next;
- tcf_tree_unlock(tp);
+ fp = &s->ht[(h >> 8) & 0xFF];
+ for (nfp = rtnl_dereference(*fp); nfp;
+ fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
+ if (nfp == f) {
+ RCU_INIT_POINTER(*fp, f->next);
rsvp_delete_filter(tp, f);
/* Strip tree */
@@ -313,14 +349,12 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
return 0;
/* OK, session has no flows */
- for (sp = &((struct rsvp_head *)tp->root)->ht[h & 0xFF];
- *sp; sp = &(*sp)->next) {
- if (*sp == s) {
- tcf_tree_lock(tp);
- *sp = s->next;
- tcf_tree_unlock(tp);
-
- kfree(s);
+ sp = &head->ht[h & 0xFF];
+ for (nsp = rtnl_dereference(*sp); nsp;
+ sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
+ if (nsp == s) {
+ RCU_INIT_POINTER(*sp, s->next);
+ kfree_rcu(s, rcu);
return 0;
}
}
@@ -333,7 +367,7 @@ static int rsvp_delete(struct tcf_proto *tp, unsigned long arg)
static unsigned int gen_handle(struct tcf_proto *tp, unsigned salt)
{
- struct rsvp_head *data = tp->root;
+ struct rsvp_head *data = rtnl_dereference(tp->root);
int i = 0xFFFF;
while (i-- > 0) {
@@ -361,7 +395,7 @@ static int tunnel_bts(struct rsvp_head *data)
static void tunnel_recycle(struct rsvp_head *data)
{
- struct rsvp_session **sht = data->ht;
+ struct rsvp_session __rcu **sht = data->ht;
u32 tmap[256/32];
int h1, h2;
@@ -369,11 +403,13 @@ static void tunnel_recycle(struct rsvp_head *data)
for (h1 = 0; h1 < 256; h1++) {
struct rsvp_session *s;
- for (s = sht[h1]; s; s = s->next) {
+ for (s = rtnl_dereference(sht[h1]); s;
+ s = rtnl_dereference(s->next)) {
for (h2 = 0; h2 <= 16; h2++) {
struct rsvp_filter *f;
- for (f = s->ht[h2]; f; f = f->next) {
+ for (f = rtnl_dereference(s->ht[h2]); f;
+ f = rtnl_dereference(f->next)) {
if (f->tunnelhdr == 0)
continue;
data->tgenerator = f->res.classid;
@@ -417,9 +453,11 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
struct nlattr **tca,
unsigned long *arg, bool ovr)
{
- struct rsvp_head *data = tp->root;
- struct rsvp_filter *f, **fp;
- struct rsvp_session *s, **sp;
+ struct rsvp_head *data = rtnl_dereference(tp->root);
+ struct rsvp_filter *f, *nfp;
+ struct rsvp_filter __rcu **fp;
+ struct rsvp_session *nsp, *s;
+ struct rsvp_session __rcu **sp;
struct tc_rsvp_pinfo *pinfo = NULL;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_RSVP_MAX + 1];
@@ -443,15 +481,26 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
f = (struct rsvp_filter *)*arg;
if (f) {
/* Node exists: adjust only classid */
+ struct rsvp_filter *n;
if (f->handle != handle && handle)
goto errout2;
+
+ n = kmemdup(f, sizeof(*f), GFP_KERNEL);
+ if (!n) {
+ err = -ENOMEM;
+ goto errout2;
+ }
+
+ tcf_exts_init(&n->exts, TCA_RSVP_ACT, TCA_RSVP_POLICE);
+
if (tb[TCA_RSVP_CLASSID]) {
- f->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
- tcf_bind_filter(tp, &f->res, base);
+ n->res.classid = nla_get_u32(tb[TCA_RSVP_CLASSID]);
+ tcf_bind_filter(tp, &n->res, base);
}
- tcf_exts_change(tp, &f->exts, &e);
+ tcf_exts_change(tp, &n->exts, &e);
+ rsvp_replace(tp, n, handle);
return 0;
}
@@ -499,7 +548,9 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb,
goto errout;
}
- for (sp = &data->ht[h1]; (s = *sp) != NULL; sp = &s->next) {
+ for (sp = &data->ht[h1];
+ (s = rtnl_dereference(*sp)) != NULL;
+ sp = &s->next) {
if (dst[RSVP_DST_LEN-1] == s->dst[RSVP_DST_LEN-1] &&
pinfo && pinfo->protocol == s->protocol &&
memcmp(&pinfo->dpi, &s->dpi, sizeof(s->dpi)) == 0 &&
@@ -521,12 +572,16 @@ insert:
tcf_exts_change(tp, &f->exts, &e);
- for (fp = &s->ht[h2]; *fp; fp = &(*fp)->next)
- if (((*fp)->spi.mask & f->spi.mask) != f->spi.mask)
+ fp = &s->ht[h2];
+ for (nfp = rtnl_dereference(*fp); nfp;
+ fp = &nfp->next, nfp = rtnl_dereference(*fp)) {
+ __u32 mask = nfp->spi.mask & f->spi.mask;
+
+ if (mask != f->spi.mask)
break;
- f->next = *fp;
- wmb();
- *fp = f;
+ }
+ RCU_INIT_POINTER(f->next, nfp);
+ rcu_assign_pointer(*fp, f);
*arg = (unsigned long)f;
return 0;
@@ -546,26 +601,27 @@ insert:
s->protocol = pinfo->protocol;
s->tunnelid = pinfo->tunnelid;
}
- for (sp = &data->ht[h1]; *sp; sp = &(*sp)->next) {
- if (((*sp)->dpi.mask&s->dpi.mask) != s->dpi.mask)
+ sp = &data->ht[h1];
+ for (nsp = rtnl_dereference(*sp); nsp;
+ sp = &nsp->next, nsp = rtnl_dereference(*sp)) {
+ if ((nsp->dpi.mask & s->dpi.mask) != s->dpi.mask)
break;
}
- s->next = *sp;
- wmb();
- *sp = s;
+ RCU_INIT_POINTER(s->next, nsp);
+ rcu_assign_pointer(*sp, s);
goto insert;
errout:
kfree(f);
errout2:
- tcf_exts_destroy(tp, &e);
+ tcf_exts_destroy(&e);
return err;
}
static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{
- struct rsvp_head *head = tp->root;
+ struct rsvp_head *head = rtnl_dereference(tp->root);
unsigned int h, h1;
if (arg->stop)
@@ -574,11 +630,13 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg)
for (h = 0; h < 256; h++) {
struct rsvp_session *s;
- for (s = head->ht[h]; s; s = s->next) {
+ for (s = rtnl_dereference(head->ht[h]); s;
+ s = rtnl_dereference(s->next)) {
for (h1 = 0; h1 <= 16; h1++) {
struct rsvp_filter *f;
- for (f = s->ht[h1]; f; f = f->next) {
+ for (f = rtnl_dereference(s->ht[h1]); f;
+ f = rtnl_dereference(f->next)) {
if (arg->count < arg->skip) {
arg->count++;
continue;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 3e9f76413b3b..30f10fb07f4a 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -32,19 +32,21 @@ struct tcindex_filter_result {
struct tcindex_filter {
u16 key;
struct tcindex_filter_result result;
- struct tcindex_filter *next;
+ struct tcindex_filter __rcu *next;
+ struct rcu_head rcu;
};
struct tcindex_data {
struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
- struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
- NULL if unused */
+ struct tcindex_filter __rcu **h; /* imperfect hash; */
+ struct tcf_proto *tp;
u16 mask; /* AND key with mask */
- int shift; /* shift ANDed key to the right */
- int hash; /* hash table size; 0 if undefined */
- int alloc_hash; /* allocated size */
- int fall_through; /* 0: only classify if explicit match */
+ u32 shift; /* shift ANDed key to the right */
+ u32 hash; /* hash table size; 0 if undefined */
+ u32 alloc_hash; /* allocated size */
+ u32 fall_through; /* 0: only classify if explicit match */
+ struct rcu_head rcu;
};
static inline int
@@ -56,13 +58,18 @@ tcindex_filter_is_set(struct tcindex_filter_result *r)
static struct tcindex_filter_result *
tcindex_lookup(struct tcindex_data *p, u16 key)
{
- struct tcindex_filter *f;
+ if (p->perfect) {
+ struct tcindex_filter_result *f = p->perfect + key;
+
+ return tcindex_filter_is_set(f) ? f : NULL;
+ } else if (p->h) {
+ struct tcindex_filter __rcu **fp;
+ struct tcindex_filter *f;
- if (p->perfect)
- return tcindex_filter_is_set(p->perfect + key) ?
- p->perfect + key : NULL;
- else if (p->h) {
- for (f = p->h[key % p->hash]; f; f = f->next)
+ fp = &p->h[key % p->hash];
+ for (f = rcu_dereference_bh_rtnl(*fp);
+ f;
+ fp = &f->next, f = rcu_dereference_bh_rtnl(*fp))
if (f->key == key)
return &f->result;
}
@@ -74,7 +81,7 @@ tcindex_lookup(struct tcindex_data *p, u16 key)
static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
- struct tcindex_data *p = tp->root;
+ struct tcindex_data *p = rcu_dereference_bh(tp->root);
struct tcindex_filter_result *f;
int key = (skb->tc_index & p->mask) >> p->shift;
@@ -99,7 +106,7 @@ static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
{
- struct tcindex_data *p = tp->root;
+ struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r;
pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
@@ -129,49 +136,59 @@ static int tcindex_init(struct tcf_proto *tp)
p->hash = DEFAULT_HASH_SIZE;
p->fall_through = 1;
- tp->root = p;
+ rcu_assign_pointer(tp->root, p);
return 0;
}
-
static int
-__tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock)
+tcindex_delete(struct tcf_proto *tp, unsigned long arg)
{
- struct tcindex_data *p = tp->root;
+ struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
+ struct tcindex_filter __rcu **walk;
struct tcindex_filter *f = NULL;
- pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f);
+ pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp, arg, p);
if (p->perfect) {
if (!r->res.class)
return -ENOENT;
} else {
int i;
- struct tcindex_filter **walk = NULL;
- for (i = 0; i < p->hash; i++)
- for (walk = p->h+i; *walk; walk = &(*walk)->next)
- if (&(*walk)->result == r)
+ for (i = 0; i < p->hash; i++) {
+ walk = p->h + i;
+ for (f = rtnl_dereference(*walk); f;
+ walk = &f->next, f = rtnl_dereference(*walk)) {
+ if (&f->result == r)
goto found;
+ }
+ }
return -ENOENT;
found:
- f = *walk;
- if (lock)
- tcf_tree_lock(tp);
- *walk = f->next;
- if (lock)
- tcf_tree_unlock(tp);
+ rcu_assign_pointer(*walk, rtnl_dereference(f->next));
}
tcf_unbind_filter(tp, &r->res);
- tcf_exts_destroy(tp, &r->exts);
- kfree(f);
+ tcf_exts_destroy(&r->exts);
+ if (f)
+ kfree_rcu(f, rcu);
return 0;
}
-static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
+static int tcindex_destroy_element(struct tcf_proto *tp,
+ unsigned long arg,
+ struct tcf_walker *walker)
{
- return __tcindex_delete(tp, arg, 1);
+ return tcindex_delete(tp, arg);
+}
+
+static void __tcindex_destroy(struct rcu_head *head)
+{
+ struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+
+ kfree(p->perfect);
+ kfree(p->h);
+ kfree(p);
}
static inline int
@@ -194,6 +211,14 @@ static void tcindex_filter_result_init(struct tcindex_filter_result *r)
tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
}
+static void __tcindex_partial_destroy(struct rcu_head *head)
+{
+ struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+
+ kfree(p->perfect);
+ kfree(p);
+}
+
static int
tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
u32 handle, struct tcindex_data *p,
@@ -203,7 +228,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
int err, balloc = 0;
struct tcindex_filter_result new_filter_result, *old_r = r;
struct tcindex_filter_result cr;
- struct tcindex_data cp;
+ struct tcindex_data *cp, *oldp;
struct tcindex_filter *f = NULL; /* make gcc behave */
struct tcf_exts e;
@@ -212,89 +237,130 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (err < 0)
return err;
- memcpy(&cp, p, sizeof(cp));
- tcindex_filter_result_init(&new_filter_result);
+ err = -ENOMEM;
+ /* tcindex_data attributes must look atomic to classifier/lookup so
+ * allocate new tcindex data and RCU assign it onto root. Keeping
+ * perfect hash and hash pointers from old data.
+ */
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ goto errout;
+
+ cp->mask = p->mask;
+ cp->shift = p->shift;
+ cp->hash = p->hash;
+ cp->alloc_hash = p->alloc_hash;
+ cp->fall_through = p->fall_through;
+ cp->tp = tp;
+ if (p->perfect) {
+ int i;
+
+ cp->perfect = kmemdup(p->perfect,
+ sizeof(*r) * cp->hash, GFP_KERNEL);
+ if (!cp->perfect)
+ goto errout;
+ for (i = 0; i < cp->hash; i++)
+ tcf_exts_init(&cp->perfect[i].exts,
+ TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ balloc = 1;
+ }
+ cp->h = p->h;
+
+ tcindex_filter_result_init(&new_filter_result);
tcindex_filter_result_init(&cr);
if (old_r)
cr.res = r->res;
if (tb[TCA_TCINDEX_HASH])
- cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
+ cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
if (tb[TCA_TCINDEX_MASK])
- cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
+ cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
if (tb[TCA_TCINDEX_SHIFT])
- cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
+ cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
err = -EBUSY;
+
/* Hash already allocated, make sure that we still meet the
* requirements for the allocated hash.
*/
- if (cp.perfect) {
- if (!valid_perfect_hash(&cp) ||
- cp.hash > cp.alloc_hash)
- goto errout;
- } else if (cp.h && cp.hash != cp.alloc_hash)
- goto errout;
+ if (cp->perfect) {
+ if (!valid_perfect_hash(cp) ||
+ cp->hash > cp->alloc_hash)
+ goto errout_alloc;
+ } else if (cp->h && cp->hash != cp->alloc_hash) {
+ goto errout_alloc;
+ }
err = -EINVAL;
if (tb[TCA_TCINDEX_FALL_THROUGH])
- cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
+ cp->fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
- if (!cp.hash) {
+ if (!cp->hash) {
/* Hash not specified, use perfect hash if the upper limit
* of the hashing index is below the threshold.
*/
- if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
- cp.hash = (cp.mask >> cp.shift) + 1;
+ if ((cp->mask >> cp->shift) < PERFECT_HASH_THRESHOLD)
+ cp->hash = (cp->mask >> cp->shift) + 1;
else
- cp.hash = DEFAULT_HASH_SIZE;
+ cp->hash = DEFAULT_HASH_SIZE;
}
- if (!cp.perfect && !cp.h)
- cp.alloc_hash = cp.hash;
+ if (!cp->perfect && !cp->h)
+ cp->alloc_hash = cp->hash;
/* Note: this could be as restrictive as if (handle & ~(mask >> shift))
* but then, we'd fail handles that may become valid after some future
* mask change. While this is extremely unlikely to ever matter,
* the check below is safer (and also more backwards-compatible).
*/
- if (cp.perfect || valid_perfect_hash(&cp))
- if (handle >= cp.alloc_hash)
- goto errout;
+ if (cp->perfect || valid_perfect_hash(cp))
+ if (handle >= cp->alloc_hash)
+ goto errout_alloc;
err = -ENOMEM;
- if (!cp.perfect && !cp.h) {
- if (valid_perfect_hash(&cp)) {
+ if (!cp->perfect && !cp->h) {
+ if (valid_perfect_hash(cp)) {
int i;
- cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
- if (!cp.perfect)
- goto errout;
- for (i = 0; i < cp.hash; i++)
- tcf_exts_init(&cp.perfect[i].exts, TCA_TCINDEX_ACT,
+ cp->perfect = kcalloc(cp->hash, sizeof(*r), GFP_KERNEL);
+ if (!cp->perfect)
+ goto errout_alloc;
+ for (i = 0; i < cp->hash; i++)
+ tcf_exts_init(&cp->perfect[i].exts,
+ TCA_TCINDEX_ACT,
TCA_TCINDEX_POLICE);
balloc = 1;
} else {
- cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
- if (!cp.h)
- goto errout;
+ struct tcindex_filter __rcu **hash;
+
+ hash = kcalloc(cp->hash,
+ sizeof(struct tcindex_filter *),
+ GFP_KERNEL);
+
+ if (!hash)
+ goto errout_alloc;
+
+ cp->h = hash;
balloc = 2;
}
}
- if (cp.perfect)
- r = cp.perfect + handle;
+ if (cp->perfect)
+ r = cp->perfect + handle;
else
- r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
+ r = tcindex_lookup(cp, handle) ? : &new_filter_result;
if (r == &new_filter_result) {
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
goto errout_alloc;
+ f->key = handle;
+ tcindex_filter_result_init(&f->result);
+ f->next = NULL;
}
if (tb[TCA_TCINDEX_CLASSID]) {
@@ -307,34 +373,40 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
else
tcf_exts_change(tp, &cr.exts, &e);
- tcf_tree_lock(tp);
if (old_r && old_r != r)
tcindex_filter_result_init(old_r);
- memcpy(p, &cp, sizeof(cp));
+ oldp = p;
r->res = cr.res;
+ rcu_assign_pointer(tp->root, cp);
if (r == &new_filter_result) {
- struct tcindex_filter **fp;
+ struct tcindex_filter *nfp;
+ struct tcindex_filter __rcu **fp;
- f->key = handle;
- f->result = new_filter_result;
- f->next = NULL;
- for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
- /* nothing */;
- *fp = f;
+ tcf_exts_change(tp, &f->result.exts, &r->exts);
+
+ fp = cp->h + (handle % cp->hash);
+ for (nfp = rtnl_dereference(*fp);
+ nfp;
+ fp = &nfp->next, nfp = rtnl_dereference(*fp))
+ ; /* nothing */
+
+ rcu_assign_pointer(*fp, f);
}
- tcf_tree_unlock(tp);
+ if (oldp)
+ call_rcu(&oldp->rcu, __tcindex_partial_destroy);
return 0;
errout_alloc:
if (balloc == 1)
- kfree(cp.perfect);
+ kfree(cp->perfect);
else if (balloc == 2)
- kfree(cp.h);
+ kfree(cp->h);
errout:
- tcf_exts_destroy(tp, &e);
+ kfree(cp);
+ tcf_exts_destroy(&e);
return err;
}
@@ -345,7 +417,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
{
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_TCINDEX_MAX + 1];
- struct tcindex_data *p = tp->root;
+ struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
int err;
@@ -364,10 +436,9 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
tca[TCA_RATE], ovr);
}
-
static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
{
- struct tcindex_data *p = tp->root;
+ struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter *f, *next;
int i;
@@ -390,8 +461,8 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
if (!p->h)
return;
for (i = 0; i < p->hash; i++) {
- for (f = p->h[i]; f; f = next) {
- next = f->next;
+ for (f = rtnl_dereference(p->h[i]); f; f = next) {
+ next = rtnl_dereference(f->next);
if (walker->count >= walker->skip) {
if (walker->fn(tp, (unsigned long) &f->result,
walker) < 0) {
@@ -404,17 +475,9 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
}
}
-
-static int tcindex_destroy_element(struct tcf_proto *tp,
- unsigned long arg, struct tcf_walker *walker)
-{
- return __tcindex_delete(tp, arg, 0);
-}
-
-
static void tcindex_destroy(struct tcf_proto *tp)
{
- struct tcindex_data *p = tp->root;
+ struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcf_walker walker;
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
@@ -422,17 +485,16 @@ static void tcindex_destroy(struct tcf_proto *tp)
walker.skip = 0;
walker.fn = tcindex_destroy_element;
tcindex_walk(tp, &walker);
- kfree(p->perfect);
- kfree(p->h);
- kfree(p);
- tp->root = NULL;
+
+ RCU_INIT_POINTER(tp->root, NULL);
+ call_rcu(&p->rcu, __tcindex_destroy);
}
static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
- struct tcindex_data *p = tp->root;
+ struct tcindex_data *p = rtnl_dereference(tp->root);
struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
@@ -455,15 +517,18 @@ static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
nla_nest_end(skb, nest);
} else {
if (p->perfect) {
- t->tcm_handle = r-p->perfect;
+ t->tcm_handle = r - p->perfect;
} else {
struct tcindex_filter *f;
+ struct tcindex_filter __rcu **fp;
int i;
t->tcm_handle = 0;
for (i = 0; !t->tcm_handle && i < p->hash; i++) {
- for (f = p->h[i]; !t->tcm_handle && f;
- f = f->next) {
+ fp = &p->h[i];
+ for (f = rtnl_dereference(*fp);
+ !t->tcm_handle && f;
+ fp = &f->next, f = rtnl_dereference(*fp)) {
if (&f->result == r)
t->tcm_handle = f->key;
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 70c0be8d0121..0472909bb014 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/percpu.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/bitmap.h>
@@ -44,40 +45,49 @@
#include <net/pkt_cls.h>
struct tc_u_knode {
- struct tc_u_knode *next;
+ struct tc_u_knode __rcu *next;
u32 handle;
- struct tc_u_hnode *ht_up;
+ struct tc_u_hnode __rcu *ht_up;
struct tcf_exts exts;
#ifdef CONFIG_NET_CLS_IND
int ifindex;
#endif
u8 fshift;
struct tcf_result res;
- struct tc_u_hnode *ht_down;
+ struct tc_u_hnode __rcu *ht_down;
#ifdef CONFIG_CLS_U32_PERF
- struct tc_u32_pcnt *pf;
+ struct tc_u32_pcnt __percpu *pf;
#endif
#ifdef CONFIG_CLS_U32_MARK
- struct tc_u32_mark mark;
+ u32 val;
+ u32 mask;
+ u32 __percpu *pcpu_success;
#endif
+ struct tcf_proto *tp;
+ struct rcu_head rcu;
+ /* The 'sel' field MUST be the last field in structure to allow for
+ * tc_u32_keys allocated at end of structure.
+ */
struct tc_u32_sel sel;
};
struct tc_u_hnode {
- struct tc_u_hnode *next;
+ struct tc_u_hnode __rcu *next;
u32 handle;
u32 prio;
struct tc_u_common *tp_c;
int refcnt;
unsigned int divisor;
- struct tc_u_knode *ht[1];
+ struct tc_u_knode __rcu *ht[1];
+ struct rcu_head rcu;
};
struct tc_u_common {
- struct tc_u_hnode *hlist;
+ struct tc_u_hnode __rcu *hlist;
struct Qdisc *q;
int refcnt;
u32 hgenerator;
+ struct rcu_head rcu;
};
static inline unsigned int u32_hash_fold(__be32 key,
@@ -96,7 +106,7 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct
unsigned int off;
} stack[TC_U32_MAXDEPTH];
- struct tc_u_hnode *ht = tp->root;
+ struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
unsigned int off = skb_network_offset(skb);
struct tc_u_knode *n;
int sdepth = 0;
@@ -108,23 +118,23 @@ static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct
int i, r;
next_ht:
- n = ht->ht[sel];
+ n = rcu_dereference_bh(ht->ht[sel]);
next_knode:
if (n) {
struct tc_u32_key *key = n->sel.keys;
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rcnt += 1;
+ __this_cpu_inc(n->pf->rcnt);
j = 0;
#endif
#ifdef CONFIG_CLS_U32_MARK
- if ((skb->mark & n->mark.mask) != n->mark.val) {
- n = n->next;
+ if ((skb->mark & n->mask) != n->val) {
+ n = rcu_dereference_bh(n->next);
goto next_knode;
} else {
- n->mark.success++;
+ __this_cpu_inc(*n->pcpu_success);
}
#endif
@@ -139,37 +149,39 @@ next_knode:
if (!data)
goto out;
if ((*data ^ key->val) & key->mask) {
- n = n->next;
+ n = rcu_dereference_bh(n->next);
goto next_knode;
}
#ifdef CONFIG_CLS_U32_PERF
- n->pf->kcnts[j] += 1;
+ __this_cpu_inc(n->pf->kcnts[j]);
j++;
#endif
}
- if (n->ht_down == NULL) {
+
+ ht = rcu_dereference_bh(n->ht_down);
+ if (!ht) {
check_terminal:
if (n->sel.flags & TC_U32_TERMINAL) {
*res = n->res;
#ifdef CONFIG_NET_CLS_IND
if (!tcf_match_indev(skb, n->ifindex)) {
- n = n->next;
+ n = rcu_dereference_bh(n->next);
goto next_knode;
}
#endif
#ifdef CONFIG_CLS_U32_PERF
- n->pf->rhit += 1;
+ __this_cpu_inc(n->pf->rhit);
#endif
r = tcf_exts_exec(skb, &n->exts, res);
if (r < 0) {
- n = n->next;
+ n = rcu_dereference_bh(n->next);
goto next_knode;
}
return r;
}
- n = n->next;
+ n = rcu_dereference_bh(n->next);
goto next_knode;
}
@@ -180,7 +192,7 @@ check_terminal:
stack[sdepth].off = off;
sdepth++;
- ht = n->ht_down;
+ ht = rcu_dereference_bh(n->ht_down);
sel = 0;
if (ht->divisor) {
__be32 *data, hdata;
@@ -222,7 +234,7 @@ check_terminal:
/* POP */
if (sdepth--) {
n = stack[sdepth].knode;
- ht = n->ht_up;
+ ht = rcu_dereference_bh(n->ht_up);
off = stack[sdepth].off;
goto check_terminal;
}
@@ -239,7 +251,9 @@ u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
{
struct tc_u_hnode *ht;
- for (ht = tp_c->hlist; ht; ht = ht->next)
+ for (ht = rtnl_dereference(tp_c->hlist);
+ ht;
+ ht = rtnl_dereference(ht->next))
if (ht->handle == handle)
break;
@@ -256,7 +270,9 @@ u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
if (sel > ht->divisor)
goto out;
- for (n = ht->ht[sel]; n; n = n->next)
+ for (n = rtnl_dereference(ht->ht[sel]);
+ n;
+ n = rtnl_dereference(n->next))
if (n->handle == handle)
break;
out:
@@ -270,7 +286,7 @@ static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
struct tc_u_common *tp_c = tp->data;
if (TC_U32_HTID(handle) == TC_U32_ROOT)
- ht = tp->root;
+ ht = rtnl_dereference(tp->root);
else
ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
@@ -291,6 +307,9 @@ static u32 gen_new_htid(struct tc_u_common *tp_c)
{
int i = 0x800;
+ /* hgenerator only used inside rtnl lock it is safe to increment
+ * without read _copy_ update semantics
+ */
do {
if (++tp_c->hgenerator == 0x7FF)
tp_c->hgenerator = 1;
@@ -326,41 +345,78 @@ static int u32_init(struct tcf_proto *tp)
}
tp_c->refcnt++;
- root_ht->next = tp_c->hlist;
- tp_c->hlist = root_ht;
+ RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
+ rcu_assign_pointer(tp_c->hlist, root_ht);
root_ht->tp_c = tp_c;
- tp->root = root_ht;
+ rcu_assign_pointer(tp->root, root_ht);
tp->data = tp_c;
return 0;
}
-static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
+static int u32_destroy_key(struct tcf_proto *tp,
+ struct tc_u_knode *n,
+ bool free_pf)
{
- tcf_unbind_filter(tp, &n->res);
- tcf_exts_destroy(tp, &n->exts);
+ tcf_exts_destroy(&n->exts);
if (n->ht_down)
n->ht_down->refcnt--;
#ifdef CONFIG_CLS_U32_PERF
- kfree(n->pf);
+ if (free_pf)
+ free_percpu(n->pf);
+#endif
+#ifdef CONFIG_CLS_U32_MARK
+ if (free_pf)
+ free_percpu(n->pcpu_success);
#endif
kfree(n);
return 0;
}
+/* u32_delete_key_rcu should be called when free'ing a copied
+ * version of a tc_u_knode obtained from u32_init_knode(). When
+ * copies are obtained from u32_init_knode() the statistics are
+ * shared between the old and new copies to allow readers to
+ * continue to update the statistics during the copy. To support
+ * this the u32_delete_key_rcu variant does not free the percpu
+ * statistics.
+ */
+static void u32_delete_key_rcu(struct rcu_head *rcu)
+{
+ struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
+
+ u32_destroy_key(key->tp, key, false);
+}
+
+/* u32_delete_key_freepf_rcu is the rcu callback variant
+ * that free's the entire structure including the statistics
+ * percpu variables. Only use this if the key is not a copy
+ * returned by u32_init_knode(). See u32_delete_key_rcu()
+ * for the variant that should be used with keys return from
+ * u32_init_knode()
+ */
+static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
+{
+ struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
+
+ u32_destroy_key(key->tp, key, true);
+}
+
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
{
- struct tc_u_knode **kp;
- struct tc_u_hnode *ht = key->ht_up;
+ struct tc_u_knode __rcu **kp;
+ struct tc_u_knode *pkp;
+ struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
if (ht) {
- for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
- if (*kp == key) {
- tcf_tree_lock(tp);
- *kp = key->next;
- tcf_tree_unlock(tp);
-
- u32_destroy_key(tp, key);
+ kp = &ht->ht[TC_U32_HASH(key->handle)];
+ for (pkp = rtnl_dereference(*kp); pkp;
+ kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
+ if (pkp == key) {
+ RCU_INIT_POINTER(*kp, key->next);
+
+ tcf_unbind_filter(tp, &key->res);
+ call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
return 0;
}
}
@@ -375,10 +431,11 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
unsigned int h;
for (h = 0; h <= ht->divisor; h++) {
- while ((n = ht->ht[h]) != NULL) {
- ht->ht[h] = n->next;
-
- u32_destroy_key(tp, n);
+ while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
+ RCU_INIT_POINTER(ht->ht[h],
+ rtnl_dereference(n->next));
+ tcf_unbind_filter(tp, &n->res);
+ call_rcu(&n->rcu, u32_delete_key_freepf_rcu);
}
}
}
@@ -386,28 +443,31 @@ static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
{
struct tc_u_common *tp_c = tp->data;
- struct tc_u_hnode **hn;
+ struct tc_u_hnode __rcu **hn;
+ struct tc_u_hnode *phn;
WARN_ON(ht->refcnt);
u32_clear_hnode(tp, ht);
- for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
- if (*hn == ht) {
- *hn = ht->next;
- kfree(ht);
+ hn = &tp_c->hlist;
+ for (phn = rtnl_dereference(*hn);
+ phn;
+ hn = &phn->next, phn = rtnl_dereference(*hn)) {
+ if (phn == ht) {
+ RCU_INIT_POINTER(*hn, ht->next);
+ kfree_rcu(ht, rcu);
return 0;
}
}
- WARN_ON(1);
return -ENOENT;
}
static void u32_destroy(struct tcf_proto *tp)
{
struct tc_u_common *tp_c = tp->data;
- struct tc_u_hnode *root_ht = tp->root;
+ struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
WARN_ON(root_ht == NULL);
@@ -419,17 +479,16 @@ static void u32_destroy(struct tcf_proto *tp)
tp->q->u32_node = NULL;
- for (ht = tp_c->hlist; ht; ht = ht->next) {
+ for (ht = rtnl_dereference(tp_c->hlist);
+ ht;
+ ht = rtnl_dereference(ht->next)) {
ht->refcnt--;
u32_clear_hnode(tp, ht);
}
- while ((ht = tp_c->hlist) != NULL) {
- tp_c->hlist = ht->next;
-
- WARN_ON(ht->refcnt != 0);
-
- kfree(ht);
+ while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
+ RCU_INIT_POINTER(tp_c->hlist, ht->next);
+ kfree_rcu(ht, rcu);
}
kfree(tp_c);
@@ -441,6 +500,7 @@ static void u32_destroy(struct tcf_proto *tp)
static int u32_delete(struct tcf_proto *tp, unsigned long arg)
{
struct tc_u_hnode *ht = (struct tc_u_hnode *)arg;
+ struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
if (ht == NULL)
return 0;
@@ -448,7 +508,7 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
if (TC_U32_KEY(ht->handle))
return u32_delete_key(tp, (struct tc_u_knode *)ht);
- if (tp->root == ht)
+ if (root_ht == ht)
return -EINVAL;
if (ht->refcnt == 1) {
@@ -471,7 +531,9 @@ static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
if (!bitmap)
return handle | 0xFFF;
- for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
+ for (n = rtnl_dereference(ht->ht[TC_U32_HASH(handle)]);
+ n;
+ n = rtnl_dereference(n->next))
set_bit(TC_U32_NODE(n->handle), bitmap);
i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
@@ -521,10 +583,8 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
ht_down->refcnt++;
}
- tcf_tree_lock(tp);
- ht_old = n->ht_down;
- n->ht_down = ht_down;
- tcf_tree_unlock(tp);
+ ht_old = rtnl_dereference(n->ht_down);
+ rcu_assign_pointer(n->ht_down, ht_down);
if (ht_old)
ht_old->refcnt--;
@@ -547,10 +607,86 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp,
return 0;
errout:
- tcf_exts_destroy(tp, &e);
+ tcf_exts_destroy(&e);
return err;
}
+static void u32_replace_knode(struct tcf_proto *tp,
+ struct tc_u_common *tp_c,
+ struct tc_u_knode *n)
+{
+ struct tc_u_knode __rcu **ins;
+ struct tc_u_knode *pins;
+ struct tc_u_hnode *ht;
+
+ if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
+ ht = rtnl_dereference(tp->root);
+ else
+ ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
+
+ ins = &ht->ht[TC_U32_HASH(n->handle)];
+
+ /* The node must always exist for it to be replaced if this is not the
+ * case then something went very wrong elsewhere.
+ */
+ for (pins = rtnl_dereference(*ins); ;
+ ins = &pins->next, pins = rtnl_dereference(*ins))
+ if (pins->handle == n->handle)
+ break;
+
+ RCU_INIT_POINTER(n->next, pins->next);
+ rcu_assign_pointer(*ins, n);
+}
+
+static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
+ struct tc_u_knode *n)
+{
+ struct tc_u_knode *new;
+ struct tc_u32_sel *s = &n->sel;
+
+ new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
+ GFP_KERNEL);
+
+ if (!new)
+ return NULL;
+
+ RCU_INIT_POINTER(new->next, n->next);
+ new->handle = n->handle;
+ RCU_INIT_POINTER(new->ht_up, n->ht_up);
+
+#ifdef CONFIG_NET_CLS_IND
+ new->ifindex = n->ifindex;
+#endif
+ new->fshift = n->fshift;
+ new->res = n->res;
+ RCU_INIT_POINTER(new->ht_down, n->ht_down);
+
+ /* bump reference count as long as we hold pointer to structure */
+ if (new->ht_down)
+ new->ht_down->refcnt++;
+
+#ifdef CONFIG_CLS_U32_PERF
+ /* Statistics may be incremented by readers during update
+ * so we must keep them in tact. When the node is later destroyed
+ * a special destroy call must be made to not free the pf memory.
+ */
+ new->pf = n->pf;
+#endif
+
+#ifdef CONFIG_CLS_U32_MARK
+ new->val = n->val;
+ new->mask = n->mask;
+ /* Similarly success statistics must be moved as pointers */
+ new->pcpu_success = n->pcpu_success;
+#endif
+ new->tp = tp;
+ memcpy(&new->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
+
+ tcf_exts_init(&new->exts, TCA_U32_ACT, TCA_U32_POLICE);
+
+ return new;
+}
+
static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca,
@@ -564,6 +700,9 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
struct nlattr *tb[TCA_U32_MAX + 1];
u32 htid;
int err;
+#ifdef CONFIG_CLS_U32_PERF
+ size_t size;
+#endif
if (opt == NULL)
return handle ? -EINVAL : 0;
@@ -574,11 +713,28 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
n = (struct tc_u_knode *)*arg;
if (n) {
+ struct tc_u_knode *new;
+
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
- return u32_set_parms(net, tp, base, n->ht_up, n, tb,
- tca[TCA_RATE], ovr);
+ new = u32_init_knode(tp, n);
+ if (!new)
+ return -ENOMEM;
+
+ err = u32_set_parms(net, tp, base,
+ rtnl_dereference(n->ht_up), new, tb,
+ tca[TCA_RATE], ovr);
+
+ if (err) {
+ u32_destroy_key(tp, new, false);
+ return err;
+ }
+
+ u32_replace_knode(tp, tp_c, new);
+ tcf_unbind_filter(tp, &n->res);
+ call_rcu(&n->rcu, u32_delete_key_rcu);
+ return 0;
}
if (tb[TCA_U32_DIVISOR]) {
@@ -601,8 +757,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
ht->divisor = divisor;
ht->handle = handle;
ht->prio = tp->prio;
- ht->next = tp_c->hlist;
- tp_c->hlist = ht;
+ RCU_INIT_POINTER(ht->next, tp_c->hlist);
+ rcu_assign_pointer(tp_c->hlist, ht);
*arg = (unsigned long)ht;
return 0;
}
@@ -610,7 +766,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (tb[TCA_U32_HASH]) {
htid = nla_get_u32(tb[TCA_U32_HASH]);
if (TC_U32_HTID(htid) == TC_U32_ROOT) {
- ht = tp->root;
+ ht = rtnl_dereference(tp->root);
htid = ht->handle;
} else {
ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
@@ -618,7 +774,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -EINVAL;
}
} else {
- ht = tp->root;
+ ht = rtnl_dereference(tp->root);
htid = ht->handle;
}
@@ -642,46 +798,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -ENOBUFS;
#ifdef CONFIG_CLS_U32_PERF
- n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
- if (n->pf == NULL) {
+ size = sizeof(struct tc_u32_pcnt) + s->nkeys * sizeof(u64);
+ n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt));
+ if (!n->pf) {
kfree(n);
return -ENOBUFS;
}
#endif
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
- n->ht_up = ht;
+ RCU_INIT_POINTER(n->ht_up, ht);
n->handle = handle;
n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE);
+ n->tp = tp;
#ifdef CONFIG_CLS_U32_MARK
+ n->pcpu_success = alloc_percpu(u32);
+ if (!n->pcpu_success) {
+ err = -ENOMEM;
+ goto errout;
+ }
+
if (tb[TCA_U32_MARK]) {
struct tc_u32_mark *mark;
mark = nla_data(tb[TCA_U32_MARK]);
- memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
- n->mark.success = 0;
+ n->val = mark->val;
+ n->mask = mark->mask;
}
#endif
err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE], ovr);
if (err == 0) {
- struct tc_u_knode **ins;
- for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
- if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
+ struct tc_u_knode __rcu **ins;
+ struct tc_u_knode *pins;
+
+ ins = &ht->ht[TC_U32_HASH(handle)];
+ for (pins = rtnl_dereference(*ins); pins;
+ ins = &pins->next, pins = rtnl_dereference(*ins))
+ if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
break;
- n->next = *ins;
- tcf_tree_lock(tp);
- *ins = n;
- tcf_tree_unlock(tp);
+ RCU_INIT_POINTER(n->next, pins);
+ rcu_assign_pointer(*ins, n);
*arg = (unsigned long)n;
return 0;
}
+
+#ifdef CONFIG_CLS_U32_MARK
+ free_percpu(n->pcpu_success);
+errout:
+#endif
+
#ifdef CONFIG_CLS_U32_PERF
- kfree(n->pf);
+ free_percpu(n->pf);
#endif
kfree(n);
return err;
@@ -697,7 +869,9 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
if (arg->stop)
return;
- for (ht = tp_c->hlist; ht; ht = ht->next) {
+ for (ht = rtnl_dereference(tp_c->hlist);
+ ht;
+ ht = rtnl_dereference(ht->next)) {
if (ht->prio != tp->prio)
continue;
if (arg->count >= arg->skip) {
@@ -708,7 +882,9 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
}
arg->count++;
for (h = 0; h <= ht->divisor; h++) {
- for (n = ht->ht[h]; n; n = n->next) {
+ for (n = rtnl_dereference(ht->ht[h]);
+ n;
+ n = rtnl_dereference(n->next)) {
if (arg->count < arg->skip) {
arg->count++;
continue;
@@ -727,6 +903,7 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
struct tc_u_knode *n = (struct tc_u_knode *)fh;
+ struct tc_u_hnode *ht_up, *ht_down;
struct nlattr *nest;
if (n == NULL)
@@ -745,11 +922,18 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
goto nla_put_failure;
} else {
+#ifdef CONFIG_CLS_U32_PERF
+ struct tc_u32_pcnt *gpf;
+ int cpu;
+#endif
+
if (nla_put(skb, TCA_U32_SEL,
sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
&n->sel))
goto nla_put_failure;
- if (n->ht_up) {
+
+ ht_up = rtnl_dereference(n->ht_up);
+ if (ht_up) {
u32 htid = n->handle & 0xFFFFF000;
if (nla_put_u32(skb, TCA_U32_HASH, htid))
goto nla_put_failure;
@@ -757,14 +941,28 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
if (n->res.classid &&
nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
goto nla_put_failure;
- if (n->ht_down &&
- nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
+
+ ht_down = rtnl_dereference(n->ht_down);
+ if (ht_down &&
+ nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
goto nla_put_failure;
#ifdef CONFIG_CLS_U32_MARK
- if ((n->mark.val || n->mark.mask) &&
- nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
- goto nla_put_failure;
+ if ((n->val || n->mask)) {
+ struct tc_u32_mark mark = {.val = n->val,
+ .mask = n->mask,
+ .success = 0};
+ int cpum;
+
+ for_each_possible_cpu(cpum) {
+ __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
+
+ mark.success += cnt;
+ }
+
+ if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
+ goto nla_put_failure;
+ }
#endif
if (tcf_exts_dump(skb, &n->exts) < 0)
@@ -779,10 +977,29 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
}
#endif
#ifdef CONFIG_CLS_U32_PERF
+ gpf = kzalloc(sizeof(struct tc_u32_pcnt) +
+ n->sel.nkeys * sizeof(u64),
+ GFP_KERNEL);
+ if (!gpf)
+ goto nla_put_failure;
+
+ for_each_possible_cpu(cpu) {
+ int i;
+ struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
+
+ gpf->rcnt += pf->rcnt;
+ gpf->rhit += pf->rhit;
+ for (i = 0; i < n->sel.nkeys; i++)
+ gpf->kcnts[i] += pf->kcnts[i];
+ }
+
if (nla_put(skb, TCA_U32_PCNT,
sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
- n->pf))
+ gpf)) {
+ kfree(gpf);
goto nla_put_failure;
+ }
+ kfree(gpf);
#endif
}
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
index 7c292d474f47..ddd883ca55b2 100644
--- a/net/sched/em_canid.c
+++ b/net/sched/em_canid.c
@@ -120,7 +120,7 @@ static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
return match;
}
-static int em_canid_change(struct tcf_proto *tp, void *data, int len,
+static int em_canid_change(struct net *net, void *data, int len,
struct tcf_ematch *m)
{
struct can_filter *conf = data; /* Array with rules */
@@ -183,7 +183,7 @@ static int em_canid_change(struct tcf_proto *tp, void *data, int len,
return 0;
}
-static void em_canid_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
+static void em_canid_destroy(struct tcf_ematch *m)
{
struct canid_match *cm = em_canid_priv(m);
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
index 527aeb7a3ff0..5b4a4efe468c 100644
--- a/net/sched/em_ipset.c
+++ b/net/sched/em_ipset.c
@@ -19,12 +19,11 @@
#include <net/ip.h>
#include <net/pkt_cls.h>
-static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
+static int em_ipset_change(struct net *net, void *data, int data_len,
struct tcf_ematch *em)
{
struct xt_set_info *set = data;
ip_set_id_t index;
- struct net *net = dev_net(qdisc_dev(tp->q));
if (data_len != sizeof(*set))
return -EINVAL;
@@ -42,11 +41,11 @@ static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
return -ENOMEM;
}
-static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em)
+static void em_ipset_destroy(struct tcf_ematch *em)
{
const struct xt_set_info *set = (const void *) em->data;
if (set) {
- ip_set_nfnl_put(dev_net(qdisc_dev(p->q)), set->index);
+ ip_set_nfnl_put(em->net, set->index);
kfree((void *) em->data);
}
}
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 9b8c0b0e60d7..c8f8c399b99a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -856,7 +856,7 @@ static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
[TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) },
};
-static int em_meta_change(struct tcf_proto *tp, void *data, int len,
+static int em_meta_change(struct net *net, void *data, int len,
struct tcf_ematch *m)
{
int err;
@@ -908,7 +908,7 @@ errout:
return err;
}
-static void em_meta_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
+static void em_meta_destroy(struct tcf_ematch *m)
{
if (m)
meta_delete((struct meta_match *) m->data);
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index a3bed07a008b..df3110d69585 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -23,7 +23,7 @@ struct nbyte_data {
char pattern[0];
};
-static int em_nbyte_change(struct tcf_proto *tp, void *data, int data_len,
+static int em_nbyte_change(struct net *net, void *data, int data_len,
struct tcf_ematch *em)
{
struct tcf_em_nbyte *nbyte = data;
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 15d353d2e4be..f03c3de16c27 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -45,7 +45,7 @@ static int em_text_match(struct sk_buff *skb, struct tcf_ematch *m,
return skb_find_text(skb, from, to, tm->config, &state) != UINT_MAX;
}
-static int em_text_change(struct tcf_proto *tp, void *data, int len,
+static int em_text_change(struct net *net, void *data, int len,
struct tcf_ematch *m)
{
struct text_match *tm;
@@ -100,7 +100,7 @@ retry:
return 0;
}
-static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
+static void em_text_destroy(struct tcf_ematch *m)
{
if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
textsearch_destroy(EM_TEXT_PRIV(m)->config);
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index f878fa16349a..6742200b1307 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -178,6 +178,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
struct tcf_ematch_hdr *em_hdr = nla_data(nla);
int data_len = nla_len(nla) - sizeof(*em_hdr);
void *data = (void *) em_hdr + sizeof(*em_hdr);
+ struct net *net = dev_net(qdisc_dev(tp->q));
if (!TCF_EM_REL_VALID(em_hdr->flags))
goto errout;
@@ -240,7 +241,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
goto errout;
if (em->ops->change) {
- err = em->ops->change(tp, data, data_len, em);
+ err = em->ops->change(net, data, data_len, em);
if (err < 0)
goto errout;
} else if (data_len > 0) {
@@ -271,6 +272,7 @@ static int tcf_em_validate(struct tcf_proto *tp,
em->matchid = em_hdr->matchid;
em->flags = em_hdr->flags;
em->datalen = data_len;
+ em->net = net;
err = 0;
errout:
@@ -378,7 +380,7 @@ errout:
return err;
errout_abort:
- tcf_em_tree_destroy(tp, tree);
+ tcf_em_tree_destroy(tree);
return err;
}
EXPORT_SYMBOL(tcf_em_tree_validate);
@@ -393,7 +395,7 @@ EXPORT_SYMBOL(tcf_em_tree_validate);
* tcf_em_tree_validate()/tcf_em_tree_change(). You must ensure that
* the ematch tree is not in use before calling this function.
*/
-void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree)
+void tcf_em_tree_destroy(struct tcf_ematch_tree *tree)
{
int i;
@@ -405,7 +407,7 @@ void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree)
if (em->ops) {
if (em->ops->destroy)
- em->ops->destroy(tp, em);
+ em->ops->destroy(em);
else if (!tcf_em_is_simple(em))
kfree((void *) em->data);
module_put(em->ops->owner);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 58bed7599db7..2cf61b3e633c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -578,31 +578,34 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
+ rcu_read_lock();
qdisc_unthrottled(wd->qdisc);
__netif_schedule(qdisc_root(wd->qdisc));
+ rcu_read_unlock();
return HRTIMER_NORESTART;
}
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
{
- hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
wd->timer.function = qdisc_watchdog;
wd->qdisc = qdisc;
}
EXPORT_SYMBOL(qdisc_watchdog_init);
-void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
{
if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state))
return;
- qdisc_throttled(wd->qdisc);
+ if (throttle)
+ qdisc_throttled(wd->qdisc);
hrtimer_start(&wd->timer,
ns_to_ktime(expires),
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_ABS_PINNED);
}
EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
@@ -763,7 +766,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
cops->put(sch, cl);
}
sch->q.qlen -= n;
- sch->qstats.drops += drops;
+ __qdisc_qstats_drop(sch, drops);
}
}
EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
@@ -942,6 +945,17 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
sch->handle = handle;
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
+ if (qdisc_is_percpu_stats(sch)) {
+ sch->cpu_bstats =
+ alloc_percpu(struct gnet_stats_basic_cpu);
+ if (!sch->cpu_bstats)
+ goto err_out4;
+
+ sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
+ if (!sch->cpu_qstats)
+ goto err_out4;
+ }
+
if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB]);
if (IS_ERR(stab)) {
@@ -964,8 +978,11 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
else
root_lock = qdisc_lock(sch);
- err = gen_new_estimator(&sch->bstats, &sch->rate_est,
- root_lock, tca[TCA_RATE]);
+ err = gen_new_estimator(&sch->bstats,
+ sch->cpu_bstats,
+ &sch->rate_est,
+ root_lock,
+ tca[TCA_RATE]);
if (err)
goto err_out4;
}
@@ -984,6 +1001,8 @@ err_out:
return NULL;
err_out4:
+ free_percpu(sch->cpu_bstats);
+ free_percpu(sch->cpu_qstats);
/*
* Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary.
@@ -1022,9 +1041,11 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
because change can't be undone. */
if (sch->flags & TCQ_F_MQROOT)
goto out;
- gen_replace_estimator(&sch->bstats, &sch->rate_est,
- qdisc_root_sleeping_lock(sch),
- tca[TCA_RATE]);
+ gen_replace_estimator(&sch->bstats,
+ sch->cpu_bstats,
+ &sch->rate_est,
+ qdisc_root_sleeping_lock(sch),
+ tca[TCA_RATE]);
}
out:
return 0;
@@ -1299,11 +1320,14 @@ graft:
static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
u32 portid, u32 seq, u16 flags, int event)
{
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
+ struct gnet_stats_queue __percpu *cpu_qstats = NULL;
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
struct gnet_dump d;
struct qdisc_size_table *stab;
+ __u32 qlen;
cond_resched();
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
@@ -1321,7 +1345,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto nla_put_failure;
if (q->ops->dump && q->ops->dump(q, skb) < 0)
goto nla_put_failure;
- q->qstats.qlen = q->q.qlen;
+ qlen = q->q.qlen;
stab = rtnl_dereference(q->stab);
if (stab && qdisc_dump_stab(skb, stab) < 0)
@@ -1334,9 +1358,14 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
goto nla_put_failure;
- if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
+ if (qdisc_is_percpu_stats(q)) {
+ cpu_bstats = q->cpu_bstats;
+ cpu_qstats = q->cpu_qstats;
+ }
+
+ if (gnet_stats_copy_basic(&d, cpu_bstats, &q->bstats) < 0 ||
gnet_stats_copy_rate_est(&d, &q->bstats, &q->rate_est) < 0 ||
- gnet_stats_copy_queue(&d, &q->qstats) < 0)
+ gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
goto nla_put_failure;
if (gnet_stats_finish_copy(&d) < 0)
@@ -1781,7 +1810,7 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
__be16 protocol = skb->protocol;
int err;
- for (; tp; tp = tp->next) {
+ for (; tp; tp = rcu_dereference_bh(tp->next)) {
if (tp->protocol != protocol &&
tp->protocol != htons(ETH_P_ALL))
continue;
@@ -1833,15 +1862,15 @@ void tcf_destroy(struct tcf_proto *tp)
{
tp->ops->destroy(tp);
module_put(tp->ops->owner);
- kfree(tp);
+ kfree_rcu(tp, rcu);
}
-void tcf_destroy_chain(struct tcf_proto **fl)
+void tcf_destroy_chain(struct tcf_proto __rcu **fl)
{
struct tcf_proto *tp;
- while ((tp = *fl) != NULL) {
- *fl = tp->next;
+ while ((tp = rtnl_dereference(*fl)) != NULL) {
+ RCU_INIT_POINTER(*fl, tp->next);
tcf_destroy(tp);
}
}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 8449b337f9e3..e3e2cc5fd068 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -41,7 +41,7 @@
struct atm_flow_data {
struct Qdisc *q; /* FIFO, TBF, etc. */
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
void (*old_pop)(struct atm_vcc *vcc,
struct sk_buff *skb); /* chaining */
@@ -273,7 +273,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
error = -ENOBUFS;
goto err_out;
}
- flow->filter_list = NULL;
+ RCU_INIT_POINTER(flow->filter_list, NULL);
flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
if (!flow->q)
flow->q = &noop_qdisc;
@@ -311,7 +311,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
if (list_empty(&flow->list))
return -EINVAL;
- if (flow->filter_list || flow == &p->link)
+ if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
return -EBUSY;
/*
* Reference count must be 2: one for "keepalive" (set at class
@@ -345,7 +345,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct atm_qdisc_data *p = qdisc_priv(sch);
struct atm_flow_data *flow = (struct atm_flow_data *)cl;
@@ -369,11 +370,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
flow = NULL;
if (TC_H_MAJ(skb->priority) != sch->handle ||
!(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
+ struct tcf_proto *fl;
+
list_for_each_entry(flow, &p->flows, list) {
- if (flow->filter_list) {
- result = tc_classify_compat(skb,
- flow->filter_list,
- &res);
+ fl = rcu_dereference_bh(flow->filter_list);
+ if (fl) {
+ result = tc_classify_compat(skb, fl, &res);
if (result < 0)
continue;
flow = (struct atm_flow_data *)res.class;
@@ -415,7 +417,7 @@ done:
if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused
if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
if (flow)
flow->qstats.drops++;
}
@@ -544,7 +546,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
if (!p->link.q)
p->link.q = &noop_qdisc;
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
- p->link.filter_list = NULL;
+ RCU_INIT_POINTER(p->link.filter_list, NULL);
p->link.vcc = NULL;
p->link.sock = NULL;
p->link.classid = sch->handle;
@@ -635,10 +637,8 @@ atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{
struct atm_flow_data *flow = (struct atm_flow_data *)arg;
- flow->qstats.qlen = flow->q->q.qlen;
-
- if (gnet_stats_copy_basic(d, &flow->bstats) < 0 ||
- gnet_stats_copy_queue(d, &flow->qstats) < 0)
+ if (gnet_stats_copy_basic(d, NULL, &flow->bstats) < 0 ||
+ gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
return -1;
return 0;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 762a04bb8f6d..beeb75f80fdb 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -133,7 +133,7 @@ struct cbq_class {
struct gnet_stats_rate_est64 rate_est;
struct tc_cbq_xstats xstats;
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
int refcnt;
int filters;
@@ -221,6 +221,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
struct cbq_class **defmap;
struct cbq_class *cl = NULL;
u32 prio = skb->priority;
+ struct tcf_proto *fl;
struct tcf_result res;
/*
@@ -235,11 +236,12 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
int result = 0;
defmap = head->defaults;
+ fl = rcu_dereference_bh(head->filter_list);
/*
* Step 2+n. Apply classifier.
*/
- if (!head->filter_list ||
- (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
+ result = tc_classify_compat(skb, fl, &res);
+ if (!fl || result < 0)
goto fallback;
cl = (void *)res.class;
@@ -375,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#endif
if (cl == NULL) {
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
@@ -393,7 +395,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
cbq_mark_toplevel(q, cl);
cl->qstats.drops++;
}
@@ -615,7 +617,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
time = ktime_set(0, 0);
time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
- hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
+ hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
}
qdisc_unthrottled(sch);
@@ -648,11 +650,11 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
return 0;
}
if (net_xmit_drop_count(ret))
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return 0;
}
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return -1;
}
#endif
@@ -993,7 +995,7 @@ cbq_dequeue(struct Qdisc *sch)
*/
if (sch->q.qlen) {
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (q->wd_expires)
qdisc_watchdog_schedule(&q->watchdog,
now + q->wd_expires);
@@ -1384,7 +1386,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
q->link.minidle = -0x7FFFFFFF;
qdisc_watchdog_init(&q->watchdog, sch);
- hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
q->delay_timer.function = cbq_undelay;
q->toplevel = TC_CBQ_MAXLEVEL;
q->now = psched_get_time();
@@ -1592,16 +1594,15 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
- cl->qstats.qlen = cl->q->q.qlen;
cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0;
if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now;
- if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qstats) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1757,7 +1758,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
}
if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ err = gen_replace_estimator(&cl->bstats, NULL,
+ &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err) {
@@ -1850,7 +1852,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
goto failure;
if (tca[TCA_RATE]) {
- err = gen_new_estimator(&cl->bstats, &cl->rate_est,
+ err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err) {
@@ -1954,7 +1956,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
return 0;
}
-static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
+static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
+ unsigned long arg)
{
struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg;
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index fb666d1e4de3..c009eb9045ce 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -57,7 +57,7 @@ struct choke_sched_data {
/* Variables */
struct red_vars vars;
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
struct {
u32 prob_drop; /* Early probability drops */
u32 prob_mark; /* Early probability marks */
@@ -127,7 +127,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
if (idx == q->tail)
choke_zap_tail_holes(q);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
qdisc_tree_decrease_qlen(sch, 1);
--sch->q.qlen;
@@ -203,9 +203,11 @@ static bool choke_classify(struct sk_buff *skb,
{
struct choke_sched_data *q = qdisc_priv(sch);
struct tcf_result res;
+ struct tcf_proto *fl;
int result;
- result = tc_classify(skb, q->filter_list, &res);
+ fl = rcu_dereference_bh(q->filter_list);
+ result = tc_classify(skb, fl, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -259,7 +261,7 @@ static bool choke_match_random(const struct choke_sched_data *q,
return false;
oskb = choke_peek_random(q, pidx);
- if (q->filter_list)
+ if (rcu_access_pointer(q->filter_list))
return choke_get_classid(nskb) == choke_get_classid(oskb);
return choke_match_flow(oskb, nskb);
@@ -267,11 +269,11 @@ static bool choke_match_random(const struct choke_sched_data *q,
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
+ int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
struct choke_sched_data *q = qdisc_priv(sch);
const struct red_parms *p = &q->parms;
- int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- if (q->filter_list) {
+ if (rcu_access_pointer(q->filter_list)) {
/* If using external classifiers, get result and record it. */
if (!choke_classify(skb, sch, &ret))
goto other_drop; /* Packet was eaten by filter */
@@ -300,7 +302,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (q->vars.qavg > p->qth_max) {
q->vars.qcount = -1;
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (use_harddrop(q) || !use_ecn(q) ||
!INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
@@ -313,7 +315,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->vars.qcount = 0;
q->vars.qR = red_random(p);
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++;
goto congestion_drop;
@@ -330,7 +332,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->tab[q->tail] = skb;
q->tail = (q->tail + 1) & q->tab_mask;
++sch->q.qlen;
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
}
@@ -343,7 +345,7 @@ congestion_drop:
other_drop:
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
@@ -363,7 +365,7 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
q->tab[q->head] = NULL;
choke_zap_head_holes(q);
--sch->q.qlen;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
return skb;
@@ -458,7 +460,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
ntab[tail++] = skb;
continue;
}
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
--sch->q.qlen;
qdisc_drop(skb, sch);
}
@@ -564,7 +566,8 @@ static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
return 0;
}
-static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **choke_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct choke_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 2f9ab17db85a..de28f8e968e8 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -149,7 +149,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
}
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 7bbbfe112192..338706092c27 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -35,7 +35,7 @@ struct drr_class {
struct drr_sched {
struct list_head active;
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
struct Qdisc_class_hash clhash;
};
@@ -88,7 +88,8 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl != NULL) {
if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ err = gen_replace_estimator(&cl->bstats, NULL,
+ &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err)
@@ -116,7 +117,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->qdisc = &noop_qdisc;
if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err) {
@@ -184,7 +185,8 @@ static void drr_put_class(struct Qdisc *sch, unsigned long arg)
drr_destroy_class(sch, cl);
}
-static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch,
+ unsigned long cl)
{
struct drr_sched *q = qdisc_priv(sch);
@@ -273,17 +275,16 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct gnet_dump *d)
{
struct drr_class *cl = (struct drr_class *)arg;
+ __u32 qlen = cl->qdisc->q.qlen;
struct tc_drr_stats xstats;
memset(&xstats, 0, sizeof(xstats));
- if (cl->qdisc->q.qlen) {
+ if (qlen)
xstats.deficit = cl->deficit;
- cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
- }
- if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
@@ -319,6 +320,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
struct tcf_result res;
+ struct tcf_proto *fl;
int result;
if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
@@ -328,7 +330,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, q->filter_list, &res);
+ fl = rcu_dereference_bh(q->filter_list);
+ result = tc_classify(skb, fl, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -356,7 +359,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = drr_classify(skb, sch, &err);
if (cl == NULL) {
if (err & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return err;
}
@@ -365,7 +368,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
cl->qstats.drops++;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
}
return err;
}
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 49d6ef338b55..227114f27f94 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -37,7 +37,7 @@
struct dsmark_qdisc_data {
struct Qdisc *q;
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
u8 *mask; /* "owns" the array */
u8 *value;
u16 indices;
@@ -186,8 +186,8 @@ ignore:
}
}
-static inline struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,
- unsigned long cl)
+static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct dsmark_qdisc_data *p = qdisc_priv(sch);
return &p->filter_list;
@@ -229,7 +229,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skb->tc_index = TC_H_MIN(skb->priority);
else {
struct tcf_result res;
- int result = tc_classify(skb, p->filter_list, &res);
+ struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
+ int result = tc_classify(skb, fl, &res);
pr_debug("result %d class 0x%04x\n", result, res.classid);
@@ -257,7 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = qdisc_enqueue(skb, p->q);
if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err))
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return err;
}
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index e15a9eb29087..2e2398cfc694 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -42,7 +42,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* queue full, remove one skb to fulfill the limit */
__qdisc_queue_drop_head(sch, &sch->q);
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
qdisc_enqueue_tail(skb, sch);
return NET_XMIT_CN;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index ba32c2b005d0..cbd7e1fd23b4 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -290,7 +290,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
flow->head = skb->next;
skb->next = NULL;
flow->qlen--;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
}
return skb;
@@ -371,13 +371,12 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
f->qlen++;
if (skb_is_retransmit(skb))
q->stat_tcp_retrans++;
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
if (fq_flow_is_detached(f)) {
fq_flow_add_tail(&q->new_flows, f);
if (time_after(jiffies, f->age + q->flow_refill_delay))
f->credit = max_t(u32, f->credit, q->quantum);
q->inactive_flows--;
- qdisc_unthrottled(sch);
}
/* Note: this overwrites f->age */
@@ -385,7 +384,6 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(f == &q->internal)) {
q->stat_internal_packets++;
- qdisc_unthrottled(sch);
}
sch->q.qlen++;
@@ -416,7 +414,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 now = ktime_to_ns(ktime_get());
+ u64 now = ktime_get_ns();
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
@@ -433,7 +431,8 @@ begin:
if (!head->first) {
if (q->time_next_delayed_flow != ~0ULL)
qdisc_watchdog_schedule_ns(&q->watchdog,
- q->time_next_delayed_flow);
+ q->time_next_delayed_flow,
+ false);
return NULL;
}
}
@@ -495,7 +494,6 @@ begin:
}
out:
qdisc_bstats_update(sch, skb);
- qdisc_unthrottled(sch);
return skb;
}
@@ -787,7 +785,7 @@ nla_put_failure:
static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 now = ktime_to_ns(ktime_get());
+ u64 now = ktime_get_ns();
struct tc_fq_qd_stats st = {
.gc_flows = q->stat_gc_flows,
.highprio_packets = q->stat_internal_packets,
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 063b726bf1f8..b9ca32ebc1de 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -52,7 +52,7 @@ struct fq_codel_flow {
}; /* please try to keep this structure <= 64 bytes */
struct fq_codel_sched_data {
- struct tcf_proto *filter_list; /* optional external classifier */
+ struct tcf_proto __rcu *filter_list; /* optional external classifier */
struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
u32 *backlogs; /* backlog table [flows_cnt] */
u32 flows_cnt; /* number of flows */
@@ -77,13 +77,15 @@ static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
hash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src ^ keys.ip_proto,
(__force u32)keys.ports, q->perturbation);
- return ((u64)hash * q->flows_cnt) >> 32;
+
+ return reciprocal_scale(hash, q->flows_cnt);
}
static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
int *qerr)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct tcf_proto *filter;
struct tcf_result res;
int result;
@@ -92,11 +94,12 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
TC_H_MIN(skb->priority) <= q->flows_cnt)
return TC_H_MIN(skb->priority);
- if (!q->filter_list)
+ filter = rcu_dereference(q->filter_list);
+ if (!filter)
return fq_codel_hash(q, skb) + 1;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, q->filter_list, &res);
+ result = tc_classify(skb, filter, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -161,8 +164,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
q->backlogs[idx] -= len;
kfree_skb(skb);
sch->q.qlen--;
- sch->qstats.drops++;
- sch->qstats.backlog -= len;
+ qdisc_qstats_drop(sch);
+ qdisc_qstats_backlog_dec(sch, skb);
flow->dropped++;
return idx;
}
@@ -177,7 +180,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
idx = fq_codel_classify(skb, sch, &ret);
if (idx == 0) {
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
@@ -187,7 +190,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
flow = &q->flows[idx];
flow_queue_add(flow, skb);
q->backlogs[idx] += qdisc_pkt_len(skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
if (list_empty(&flow->flowchain)) {
list_add_tail(&flow->flowchain, &q->new_flows);
@@ -495,7 +498,8 @@ static void fq_codel_put(struct Qdisc *q, unsigned long cl)
{
}
-static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -546,7 +550,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped;
}
- if (gnet_stats_copy_queue(d, &qs) < 0)
+ if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0)
return -1;
if (idx < q->flows_cnt)
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fc04fe93c2da..38d58e6cef07 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -47,7 +47,6 @@ EXPORT_SYMBOL(default_qdisc_ops);
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
- skb_dst_force(skb);
q->gso_skb = skb;
q->qstats.requeues++;
q->q.qlen++; /* it's still part of the queue */
@@ -56,24 +55,52 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
return 0;
}
-static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
+static void try_bulk_dequeue_skb(struct Qdisc *q,
+ struct sk_buff *skb,
+ const struct netdev_queue *txq)
+{
+ int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
+
+ while (bytelimit > 0) {
+ struct sk_buff *nskb = q->dequeue(q);
+
+ if (!nskb)
+ break;
+
+ bytelimit -= nskb->len; /* covers GSO len */
+ skb->next = nskb;
+ skb = nskb;
+ }
+ skb->next = NULL;
+}
+
+/* Note that dequeue_skb can possibly return a SKB list (via skb->next).
+ * A requeued skb (via q->gso_skb) can also be a SKB list.
+ */
+static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate)
{
struct sk_buff *skb = q->gso_skb;
const struct netdev_queue *txq = q->dev_queue;
+ *validate = true;
if (unlikely(skb)) {
/* check the reason of requeuing without tx lock first */
- txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb));
+ txq = skb_get_tx_queue(txq->dev, skb);
if (!netif_xmit_frozen_or_stopped(txq)) {
q->gso_skb = NULL;
q->q.qlen--;
} else
skb = NULL;
+ /* skb in gso_skb were already validated */
+ *validate = false;
} else {
- if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq))
+ if (!(q->flags & TCQ_F_ONETXQUEUE) ||
+ !netif_xmit_frozen_or_stopped(txq)) {
skb = q->dequeue(q);
+ if (skb && qdisc_may_bulk(q))
+ try_bulk_dequeue_skb(q, skb, txq);
+ }
}
-
return skb;
}
@@ -90,7 +117,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
* detect it by checking xmit owner and drop the packet when
* deadloop is detected. Return OK to try the next skb.
*/
- kfree_skb(skb);
+ kfree_skb_list(skb);
net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
dev_queue->dev->name);
ret = qdisc_qlen(q);
@@ -107,9 +134,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
}
/*
- * Transmit one skb, and handle the return status as required. Holding the
- * __QDISC___STATE_RUNNING bit guarantees that only one CPU can execute this
- * function.
+ * Transmit possibly several skbs, and handle the return status as
+ * required. Holding the __QDISC___STATE_RUNNING bit guarantees that
+ * only one CPU can execute this function.
*
* Returns to the caller:
* 0 - queue is empty or throttled.
@@ -117,19 +144,24 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
*/
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock)
+ spinlock_t *root_lock, bool validate)
{
int ret = NETDEV_TX_BUSY;
/* And release qdisc */
spin_unlock(root_lock);
- HARD_TX_LOCK(dev, txq, smp_processor_id());
- if (!netif_xmit_frozen_or_stopped(txq))
- ret = dev_hard_start_xmit(skb, dev, txq);
+ /* Note that we validate skb (GSO, checksum, ...) outside of locks */
+ if (validate)
+ skb = validate_xmit_skb_list(skb, dev);
- HARD_TX_UNLOCK(dev, txq);
+ if (skb) {
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
+ if (!netif_xmit_frozen_or_stopped(txq))
+ skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ HARD_TX_UNLOCK(dev, txq);
+ }
spin_lock(root_lock);
if (dev_xmit_complete(ret)) {
@@ -178,17 +210,18 @@ static inline int qdisc_restart(struct Qdisc *q)
struct net_device *dev;
spinlock_t *root_lock;
struct sk_buff *skb;
+ bool validate;
/* Dequeue packet */
- skb = dequeue_skb(q);
+ skb = dequeue_skb(q, &validate);
if (unlikely(!skb))
return 0;
- WARN_ON_ONCE(skb_dst_is_noref(skb));
+
root_lock = qdisc_lock(q);
dev = qdisc_dev(q);
- txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ txq = skb_get_tx_queue(dev, skb);
- return sch_direct_xmit(skb, q, dev, txq, root_lock);
+ return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
}
void __qdisc_run(struct Qdisc *q)
@@ -518,7 +551,7 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
- skb_queue_head_init(band2list(priv, prio));
+ __skb_queue_head_init(band2list(priv, prio));
/* Can by-pass the queue discipline */
qdisc->flags |= TCQ_F_CAN_BYPASS;
@@ -616,7 +649,7 @@ void qdisc_reset(struct Qdisc *qdisc)
ops->reset(qdisc);
if (qdisc->gso_skb) {
- kfree_skb(qdisc->gso_skb);
+ kfree_skb_list(qdisc->gso_skb);
qdisc->gso_skb = NULL;
qdisc->q.qlen = 0;
}
@@ -627,6 +660,9 @@ static void qdisc_rcu_free(struct rcu_head *head)
{
struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
+ if (qdisc_is_percpu_stats(qdisc))
+ free_percpu(qdisc->cpu_bstats);
+
kfree((char *) qdisc - qdisc->padded);
}
@@ -652,7 +688,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
module_put(ops->owner);
dev_put(qdisc_dev(qdisc));
- kfree_skb(qdisc->gso_skb);
+ kfree_skb_list(qdisc->gso_skb);
/*
* gen_estimator est_timer() might access qdisc->q.lock,
* wait a RCU grace period before freeing qdisc.
@@ -778,7 +814,7 @@ static void dev_deactivate_queue(struct net_device *dev,
struct Qdisc *qdisc_default = _qdisc_default;
struct Qdisc *qdisc;
- qdisc = dev_queue->qdisc;
+ qdisc = rtnl_dereference(dev_queue->qdisc);
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
@@ -871,7 +907,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
{
struct Qdisc *qdisc = _qdisc;
- dev_queue->qdisc = qdisc;
+ rcu_assign_pointer(dev_queue->qdisc, qdisc);
dev_queue->qdisc_sleeping = qdisc;
}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 12cbc09157fc..a4ca4517cdc8 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -209,7 +209,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break;
case RED_PROB_MARK:
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++;
goto congestion_drop;
@@ -219,7 +219,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break;
case RED_HARD_MARK:
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
!INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index ec8aeaac1dd7..e6c7416d0332 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -116,7 +116,7 @@ struct hfsc_class {
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est64 rate_est;
unsigned int level; /* class level in hierarchy */
- struct tcf_proto *filter_list; /* filter list */
+ struct tcf_proto __rcu *filter_list; /* filter list */
unsigned int filter_cnt; /* filter count */
struct hfsc_sched *sched; /* scheduler data */
@@ -1014,9 +1014,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cur_time = psched_get_time();
if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_sleeping_lock(sch),
- tca[TCA_RATE]);
+ spinlock_t *lock = qdisc_root_sleeping_lock(sch);
+
+ err = gen_replace_estimator(&cl->bstats, NULL,
+ &cl->rate_est,
+ lock,
+ tca[TCA_RATE]);
if (err)
return err;
}
@@ -1063,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return -ENOBUFS;
if (tca[TCA_RATE]) {
- err = gen_new_estimator(&cl->bstats, &cl->rate_est,
+ err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err) {
@@ -1161,7 +1164,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
head = &q->root;
- tcf = q->root.filter_list;
+ tcf = rcu_dereference_bh(q->root.filter_list);
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -1185,7 +1188,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return cl; /* hit leaf class */
/* apply inner filter chain */
- tcf = cl->filter_list;
+ tcf = rcu_dereference_bh(cl->filter_list);
head = cl;
}
@@ -1285,7 +1288,7 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
cl->filter_cnt--;
}
-static struct tcf_proto **
+static struct tcf_proto __rcu **
hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
{
struct hfsc_sched *q = qdisc_priv(sch);
@@ -1367,16 +1370,15 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct hfsc_class *cl = (struct hfsc_class *)arg;
struct tc_hfsc_stats xstats;
- cl->qstats.qlen = cl->qdisc->q.qlen;
cl->qstats.backlog = cl->qdisc->qstats.backlog;
xstats.level = cl->level;
xstats.period = cl->cl_vtperiod;
xstats.work = cl->cl_total;
xstats.rtwork = cl->cl_cumul;
- if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qstats) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
@@ -1588,7 +1590,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) {
if (err & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return err;
}
@@ -1597,7 +1599,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
cl->qstats.drops++;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
}
return err;
}
@@ -1640,7 +1642,7 @@ hfsc_dequeue(struct Qdisc *sch)
*/
cl = vttree_get_minvt(&q->root, cur_time);
if (cl == NULL) {
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
hfsc_schedule_watchdog(sch);
return NULL;
}
@@ -1695,7 +1697,7 @@ hfsc_drop(struct Qdisc *sch)
list_move_tail(&cl->dlist, &q->droplist);
}
cl->qstats.drops++;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
sch->q.qlen--;
return len;
}
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index d85b6812a7d4..15d3aabfe250 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -376,8 +376,8 @@ static unsigned int hhf_drop(struct Qdisc *sch)
struct sk_buff *skb = dequeue_head(bucket);
sch->q.qlen--;
- sch->qstats.drops++;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_drop(sch);
+ qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
}
@@ -395,7 +395,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
bucket = &q->buckets[idx];
bucket_add(bucket, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
if (list_empty(&bucket->bucketchain)) {
unsigned int weight;
@@ -457,7 +457,7 @@ begin:
if (bucket->head) {
skb = dequeue_head(bucket);
sch->q.qlen--;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
}
if (!skb) {
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 9f949abcacef..f1acb0f60dc3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -103,7 +103,7 @@ struct htb_class {
u32 prio; /* these two are used only by leaves... */
int quantum; /* but stored for parent-to-leaf return */
- struct tcf_proto *filter_list; /* class attached filters */
+ struct tcf_proto __rcu *filter_list; /* class attached filters */
int filter_cnt;
int refcnt; /* usage count of this class */
@@ -153,7 +153,7 @@ struct htb_sched {
int rate2quantum; /* quant = rate / rate2quantum */
/* filters for qdisc itself */
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
#define HTB_WARN_TOOMANYEVENTS 0x1
unsigned int warned; /* only one warning */
@@ -223,9 +223,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
if (cl->level == 0)
return cl;
/* Start with inner filter chain if a non-leaf class is selected */
- tcf = cl->filter_list;
+ tcf = rcu_dereference_bh(cl->filter_list);
} else {
- tcf = q->filter_list;
+ tcf = rcu_dereference_bh(q->filter_list);
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
@@ -251,7 +251,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
return cl; /* we hit leaf; return it */
/* we have got inner class; apply inner filter chain */
- tcf = cl->filter_list;
+ tcf = rcu_dereference_bh(cl->filter_list);
}
/* classification failed; try to use default class */
cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
@@ -586,13 +586,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_ACT
} else if (!cl) {
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
#endif
} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
cl->qstats.drops++;
}
return ret;
@@ -895,7 +895,7 @@ ok:
if (!sch->q.qlen)
goto fin;
- q->now = ktime_to_ns(ktime_get());
+ q->now = ktime_get_ns();
start_at = jiffies;
next_event = q->now + 5LLU * NSEC_PER_SEC;
@@ -925,14 +925,14 @@ ok:
goto ok;
}
}
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (likely(next_event > q->now)) {
if (!test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
ktime_t time = ns_to_ktime(next_event);
qdisc_throttled(q->watchdog.qdisc);
hrtimer_start(&q->watchdog.timer, time,
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_ABS_PINNED);
}
} else {
schedule_work(&q->work);
@@ -1044,7 +1044,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
qdisc_watchdog_init(&q->watchdog, sch);
INIT_WORK(&q->work, htb_work_func);
- skb_queue_head_init(&q->direct_queue);
+ __skb_queue_head_init(&q->direct_queue);
if (tb[TCA_HTB_DIRECT_QLEN])
q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
@@ -1138,15 +1138,16 @@ static int
htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
{
struct htb_class *cl = (struct htb_class *)arg;
+ __u32 qlen = 0;
if (!cl->level && cl->un.leaf.q)
- cl->qstats.qlen = cl->un.leaf.q->q.qlen;
+ qlen = cl->un.leaf.q->q.qlen;
cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
- if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qstats) < 0)
+ gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1225,7 +1226,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
parent->tokens = parent->buffer;
parent->ctokens = parent->cbuffer;
- parent->t_c = ktime_to_ns(ktime_get());
+ parent->t_c = ktime_get_ns();
parent->cmode = HTB_CAN_SEND;
}
@@ -1402,7 +1403,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
goto failure;
if (htb_rate_est || tca[TCA_RATE]) {
- err = gen_new_estimator(&cl->bstats, &cl->rate_est,
+ err = gen_new_estimator(&cl->bstats, NULL,
+ &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE] ? : &est.nla);
if (err) {
@@ -1455,7 +1457,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
- cl->t_c = ktime_to_ns(ktime_get());
+ cl->t_c = ktime_get_ns();
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
@@ -1464,8 +1466,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
parent->children++;
} else {
if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_sleeping_lock(sch),
+ spinlock_t *lock = qdisc_root_sleeping_lock(sch);
+
+ err = gen_replace_estimator(&cl->bstats, NULL,
+ &cl->rate_est,
+ lock,
tca[TCA_RATE]);
if (err)
return err;
@@ -1519,11 +1524,12 @@ failure:
return err;
}
-static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
+static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
+ unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
- struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
+ struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
return fl;
}
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 62871c14e1f9..eb5b8445fef9 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -17,7 +17,7 @@
struct ingress_qdisc_data {
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
};
/* ------------------------- Class/flow operations ------------------------- */
@@ -46,7 +46,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
}
-static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
@@ -59,15 +60,16 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
struct tcf_result res;
+ struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
int result;
- result = tc_classify(skb, p->filter_list, &res);
+ result = tc_classify(skb, fl, &res);
qdisc_bstats_update(sch, skb);
switch (result) {
case TC_ACT_SHOT:
result = TC_ACT_SHOT;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
break;
case TC_ACT_STOLEN:
case TC_ACT_QUEUED:
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index a8b2864a696b..f3cbaecd283a 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -112,7 +112,6 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
sch->q.qlen += qdisc->q.qlen;
sch->bstats.bytes += qdisc->bstats.bytes;
sch->bstats.packets += qdisc->bstats.packets;
- sch->qstats.qlen += qdisc->qstats.qlen;
sch->qstats.backlog += qdisc->qstats.backlog;
sch->qstats.drops += qdisc->qstats.drops;
sch->qstats.requeues += qdisc->qstats.requeues;
@@ -200,9 +199,8 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- sch->qstats.qlen = sch->q.qlen;
- if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
- gnet_stats_copy_queue(d, &sch->qstats) < 0)
+ if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
+ gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
return -1;
return 0;
}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 6749e2f540d0..3811a745452c 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -231,12 +231,11 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
memset(&sch->qstats, 0, sizeof(sch->qstats));
for (i = 0; i < dev->num_tx_queues; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
spin_lock_bh(qdisc_lock(qdisc));
sch->q.qlen += qdisc->q.qlen;
sch->bstats.bytes += qdisc->bstats.bytes;
sch->bstats.packets += qdisc->bstats.packets;
- sch->qstats.qlen += qdisc->qstats.qlen;
sch->qstats.backlog += qdisc->qstats.backlog;
sch->qstats.drops += qdisc->qstats.drops;
sch->qstats.requeues += qdisc->qstats.requeues;
@@ -327,6 +326,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
if (cl <= netdev_get_num_tc(dev)) {
int i;
+ __u32 qlen = 0;
struct Qdisc *qdisc;
struct gnet_stats_queue qstats = {0};
struct gnet_stats_basic_packed bstats = {0};
@@ -340,11 +340,13 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
spin_unlock_bh(d->lock);
for (i = tc.offset; i < tc.offset + tc.count; i++) {
- qdisc = netdev_get_tx_queue(dev, i)->qdisc;
+ struct netdev_queue *q = netdev_get_tx_queue(dev, i);
+
+ qdisc = rtnl_dereference(q->qdisc);
spin_lock_bh(qdisc_lock(qdisc));
+ qlen += qdisc->q.qlen;
bstats.bytes += qdisc->bstats.bytes;
bstats.packets += qdisc->bstats.packets;
- qstats.qlen += qdisc->qstats.qlen;
qstats.backlog += qdisc->qstats.backlog;
qstats.drops += qdisc->qstats.drops;
qstats.requeues += qdisc->qstats.requeues;
@@ -353,16 +355,16 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
}
/* Reclaim root sleeping lock before completing stats */
spin_lock_bh(d->lock);
- if (gnet_stats_copy_basic(d, &bstats) < 0 ||
- gnet_stats_copy_queue(d, &qstats) < 0)
+ if (gnet_stats_copy_basic(d, NULL, &bstats) < 0 ||
+ gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
return -1;
} else {
struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
sch = dev_queue->qdisc_sleeping;
- sch->qstats.qlen = sch->q.qlen;
- if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
- gnet_stats_copy_queue(d, &sch->qstats) < 0)
+ if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
+ gnet_stats_copy_queue(d, NULL,
+ &sch->qstats, sch->q.qlen) < 0)
return -1;
}
return 0;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index afb050a735fa..42dd218871e0 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -31,7 +31,7 @@ struct multiq_sched_data {
u16 bands;
u16 max_bands;
u16 curband;
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
struct Qdisc **queues;
};
@@ -42,10 +42,11 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
struct multiq_sched_data *q = qdisc_priv(sch);
u32 band;
struct tcf_result res;
+ struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
int err;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- err = tc_classify(skb, q->filter_list, &res);
+ err = tc_classify(skb, fl, &res);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
@@ -74,7 +75,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (qdisc == NULL) {
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
@@ -86,7 +87,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
}
if (net_xmit_drop_count(ret))
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return ret;
}
@@ -359,9 +360,8 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
- cl_q->qstats.qlen = cl_q->q.qlen;
- if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
- gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
+ if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
+ gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
return 0;
@@ -388,7 +388,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct multiq_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 111d70fddaea..b34331967e02 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -429,12 +429,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Drop packet? */
if (loss_event(q)) {
if (q->ecn && INET_ECN_set_ce(skb))
- sch->qstats.drops++; /* mark packet */
+ qdisc_qstats_drop(sch); /* mark packet */
else
--count;
}
if (count == 0) {
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
@@ -478,7 +478,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
return qdisc_reshape_fail(skb, sch);
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
cb = netem_skb_cb(skb);
if (q->gap == 0 || /* not doing reordering */
@@ -549,15 +549,14 @@ static unsigned int netem_drop(struct Qdisc *sch)
sch->q.qlen--;
skb->next = NULL;
skb->prev = NULL;
- len = qdisc_pkt_len(skb);
- sch->qstats.backlog -= len;
+ qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
}
}
if (!len && q->qdisc && q->qdisc->ops->drop)
len = q->qdisc->ops->drop(q->qdisc);
if (len)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return len;
}
@@ -575,7 +574,7 @@ tfifo_dequeue:
skb = __skb_dequeue(&sch->q);
if (skb) {
deliver:
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
@@ -610,7 +609,7 @@ deliver:
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
qdisc_tree_decrease_qlen(sch, 1);
}
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index fefeeb73f15f..33d7a98a7a97 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -232,7 +232,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q);
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch);
}
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 79359b69ad8d..8e5cd34aaa74 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -24,7 +24,7 @@
struct prio_sched_data {
int bands;
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
struct Qdisc *queues[TCQ_PRIO_BANDS];
};
@@ -36,11 +36,13 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
struct prio_sched_data *q = qdisc_priv(sch);
u32 band = skb->priority;
struct tcf_result res;
+ struct tcf_proto *fl;
int err;
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
if (TC_H_MAJ(skb->priority) != sch->handle) {
- err = tc_classify(skb, q->filter_list, &res);
+ fl = rcu_dereference_bh(q->filter_list);
+ err = tc_classify(skb, fl, &res);
#ifdef CONFIG_NET_CLS_ACT
switch (err) {
case TC_ACT_STOLEN:
@@ -50,7 +52,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
return NULL;
}
#endif
- if (!q->filter_list || err < 0) {
+ if (!fl || err < 0) {
if (TC_H_MAJ(band))
band = 0;
return q->queues[q->prio2band[band & TC_PRIO_MAX]];
@@ -75,7 +77,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (qdisc == NULL) {
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
@@ -87,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
}
if (net_xmit_drop_count(ret))
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return ret;
}
@@ -322,9 +324,8 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
- cl_q->qstats.qlen = cl_q->q.qlen;
- if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
- gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
+ if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
+ gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
return -1;
return 0;
@@ -351,7 +352,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
}
-static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct prio_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 8056fb4e618a..3ec7e88a43ca 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -181,7 +181,7 @@ struct qfq_group {
};
struct qfq_sched {
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
struct Qdisc_class_hash clhash;
u64 oldV, V; /* Precise virtual times. */
@@ -459,7 +459,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl != NULL) { /* modify existing class */
if (tca[TCA_RATE]) {
- err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ err = gen_replace_estimator(&cl->bstats, NULL,
+ &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err)
@@ -484,7 +485,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->qdisc = &noop_qdisc;
if (tca[TCA_RATE]) {
- err = gen_new_estimator(&cl->bstats, &cl->rate_est,
+ err = gen_new_estimator(&cl->bstats, NULL,
+ &cl->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
if (err)
@@ -576,7 +578,8 @@ static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
qfq_destroy_class(sch, cl);
}
-static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch,
+ unsigned long cl)
{
struct qfq_sched *q = qdisc_priv(sch);
@@ -661,14 +664,14 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct tc_qfq_stats xstats;
memset(&xstats, 0, sizeof(xstats));
- cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
xstats.weight = cl->agg->class_weight;
xstats.lmax = cl->agg->lmax;
- if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
+ if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
- gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
+ gnet_stats_copy_queue(d, NULL,
+ &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
@@ -704,6 +707,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl;
struct tcf_result res;
+ struct tcf_proto *fl;
int result;
if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
@@ -714,7 +718,8 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, q->filter_list, &res);
+ fl = rcu_dereference_bh(q->filter_list);
+ result = tc_classify(skb, fl, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -1224,7 +1229,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
if (err & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return err;
}
@@ -1244,7 +1249,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
if (net_xmit_drop_count(err)) {
cl->qstats.drops++;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
}
return err;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 633e32defdcc..6c0534cc7758 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -74,7 +74,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break;
case RED_PROB_MARK:
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++;
goto congestion_drop;
@@ -84,7 +84,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break;
case RED_HARD_MARK:
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (red_use_harddrop(q) || !red_use_ecn(q) ||
!INET_ECN_set_ce(skb)) {
q->stats.forced_drop++;
@@ -100,7 +100,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
}
return ret;
@@ -142,7 +142,7 @@ static unsigned int red_drop(struct Qdisc *sch)
if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
q->stats.other++;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
sch->q.qlen--;
return len;
}
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 9b0f7093d970..5819dd82630d 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -55,7 +55,7 @@ struct sfb_bins {
struct sfb_sched_data {
struct Qdisc *qdisc;
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
unsigned long rehash_interval;
unsigned long warmup_time; /* double buffering warmup time in jiffies */
u32 max;
@@ -253,13 +253,13 @@ static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
return false;
}
-static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
+static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
int *qerr, u32 *salt)
{
struct tcf_result res;
int result;
- result = tc_classify(skb, q->filter_list, &res);
+ result = tc_classify(skb, fl, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -281,6 +281,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct sfb_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
+ struct tcf_proto *fl;
int i;
u32 p_min = ~0;
u32 minqlen = ~0;
@@ -289,7 +290,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct flow_keys keys;
if (unlikely(sch->q.qlen >= q->limit)) {
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
q->stats.queuedrop++;
goto drop;
}
@@ -306,9 +307,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
}
- if (q->filter_list) {
+ fl = rcu_dereference_bh(q->filter_list);
+ if (fl) {
/* If using external classifiers, get result and record it. */
- if (!sfb_classify(skb, q, &ret, &salt))
+ if (!sfb_classify(skb, fl, &ret, &salt))
goto other_drop;
keys.src = salt;
keys.dst = 0;
@@ -346,7 +348,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sfb_skb_cb(skb)->hashes[slot] = 0;
if (unlikely(minqlen >= q->max)) {
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
q->stats.bucketdrop++;
goto drop;
}
@@ -374,7 +376,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
}
if (sfb_rate_limit(skb, q)) {
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
q->stats.penaltydrop++;
goto drop;
}
@@ -409,7 +411,7 @@ enqueue:
increment_qlen(skb, q);
} else if (net_xmit_drop_count(ret)) {
q->stats.childdrop++;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
}
return ret;
@@ -418,7 +420,7 @@ drop:
return NET_XMIT_CN;
other_drop:
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
@@ -660,7 +662,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct sfb_sched_data *q = qdisc_priv(sch);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 1af2f73906d0..b877140beda5 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -125,7 +125,7 @@ struct sfq_sched_data {
u8 cur_depth; /* depth of longest slot */
u8 flags;
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
- struct tcf_proto *filter_list;
+ struct tcf_proto __rcu *filter_list;
sfq_index *ht; /* Hash table ('divisor' slots) */
struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
@@ -187,6 +187,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
{
struct sfq_sched_data *q = qdisc_priv(sch);
struct tcf_result res;
+ struct tcf_proto *fl;
int result;
if (TC_H_MAJ(skb->priority) == sch->handle &&
@@ -194,13 +195,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
TC_H_MIN(skb->priority) <= q->divisor)
return TC_H_MIN(skb->priority);
- if (!q->filter_list) {
+ fl = rcu_dereference_bh(q->filter_list);
+ if (!fl) {
skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
return sfq_hash(q, skb) + 1;
}
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
- result = tc_classify(skb, q->filter_list, &res);
+ result = tc_classify(skb, fl, &res);
if (result >= 0) {
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
@@ -310,11 +312,6 @@ static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
slot->skblist_prev = skb;
}
-#define slot_queue_walk(slot, skb) \
- for (skb = slot->skblist_next; \
- skb != (struct sk_buff *)slot; \
- skb = skb->next)
-
static unsigned int sfq_drop(struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
@@ -334,8 +331,8 @@ drop:
sfq_dec(q, x);
kfree_skb(skb);
sch->q.qlen--;
- sch->qstats.drops++;
- sch->qstats.backlog -= len;
+ qdisc_qstats_drop(sch);
+ qdisc_qstats_backlog_dec(sch, skb);
return len;
}
@@ -382,7 +379,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
hash = sfq_classify(skb, sch, &ret);
if (hash == 0) {
if (ret & __NET_XMIT_BYPASS)
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
kfree_skb(skb);
return ret;
}
@@ -412,7 +409,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break;
case RED_PROB_MARK:
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (sfq_prob_mark(q)) {
/* We know we have at least one packet in queue */
if (sfq_headdrop(q) &&
@@ -429,7 +426,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
goto congestion_drop;
case RED_HARD_MARK:
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
if (sfq_hard_mark(q)) {
/* We know we have at least one packet in queue */
if (sfq_headdrop(q) &&
@@ -464,7 +461,7 @@ congestion_drop:
}
enqueue:
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
slot->backlog += qdisc_pkt_len(skb);
slot_queue_add(slot, skb);
sfq_inc(q, x);
@@ -523,7 +520,7 @@ next_slot:
sfq_dec(q, a);
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
slot->backlog -= qdisc_pkt_len(skb);
/* Is the slot empty? */
if (slot->qlen == 0) {
@@ -589,7 +586,8 @@ static void sfq_rehash(struct Qdisc *sch)
if (x == SFQ_EMPTY_SLOT) {
x = q->dep[0].next; /* get a free slot */
if (x >= SFQ_MAX_FLOWS) {
-drop: sch->qstats.backlog -= qdisc_pkt_len(skb);
+drop:
+ qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
dropped++;
continue;
@@ -841,7 +839,8 @@ static void sfq_put(struct Qdisc *q, unsigned long cl)
{
}
-static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
+static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch,
+ unsigned long cl)
{
struct sfq_sched_data *q = qdisc_priv(sch);
@@ -872,7 +871,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
qs.qlen = slot->qlen;
qs.backlog = slot->backlog;
}
- if (gnet_stats_copy_queue(d, &qs) < 0)
+ if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 18ff63433709..a4afde14e865 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -175,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
} else {
nb++;
}
@@ -201,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
return ret;
}
@@ -216,7 +216,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
sch->q.qlen--;
- sch->qstats.drops++;
+ qdisc_qstats_drop(sch);
}
return len;
}
@@ -239,7 +239,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
s64 ptoks = 0;
unsigned int len = qdisc_pkt_len(skb);
- now = ktime_to_ns(ktime_get());
+ now = ktime_get_ns();
toks = min_t(s64, now - q->t_c, q->buffer);
if (tbf_peak_present(q)) {
@@ -268,7 +268,8 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
}
qdisc_watchdog_schedule_ns(&q->watchdog,
- now + max_t(long, -toks, -ptoks));
+ now + max_t(long, -toks, -ptoks),
+ true);
/* Maybe we have a shorter packet in the queue,
which can be sent now. It sounds cool,
@@ -281,7 +282,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
(cf. CSZ, HPFQ, HFSC)
*/
- sch->qstats.overlimits++;
+ qdisc_qstats_overlimit(sch);
}
return NULL;
}
@@ -292,7 +293,7 @@ static void tbf_reset(struct Qdisc *sch)
qdisc_reset(q->qdisc);
sch->q.qlen = 0;
- q->t_c = ktime_to_ns(ktime_get());
+ q->t_c = ktime_get_ns();
q->tokens = q->buffer;
q->ptokens = q->mtu;
qdisc_watchdog_cancel(&q->watchdog);
@@ -431,7 +432,7 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
if (opt == NULL)
return -EINVAL;
- q->t_c = ktime_to_ns(ktime_get());
+ q->t_c = ktime_get_ns();
qdisc_watchdog_init(&q->watchdog, sch);
q->qdisc = &noop_qdisc;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index bd33793b527e..6ada42396a24 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -96,11 +96,14 @@ teql_dequeue(struct Qdisc *sch)
struct teql_sched_data *dat = qdisc_priv(sch);
struct netdev_queue *dat_queue;
struct sk_buff *skb;
+ struct Qdisc *q;
skb = __skb_dequeue(&dat->q);
dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
+ q = rcu_dereference_bh(dat_queue->qdisc);
+
if (skb == NULL) {
- struct net_device *m = qdisc_dev(dat_queue->qdisc);
+ struct net_device *m = qdisc_dev(q);
if (m) {
dat->m->slaves = sch;
netif_wake_queue(m);
@@ -108,7 +111,7 @@ teql_dequeue(struct Qdisc *sch)
} else {
qdisc_bstats_update(sch, skb);
}
- sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
+ sch->q.qlen = dat->q.qlen + q->q.qlen;
return skb;
}
@@ -157,9 +160,9 @@ teql_destroy(struct Qdisc *sch)
txq = netdev_get_tx_queue(master->dev, 0);
master->slaves = NULL;
- root_lock = qdisc_root_sleeping_lock(txq->qdisc);
+ root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc));
spin_lock_bh(root_lock);
- qdisc_reset(txq->qdisc);
+ qdisc_reset(rtnl_dereference(txq->qdisc));
spin_unlock_bh(root_lock);
}
}
@@ -266,7 +269,7 @@ static inline int teql_resolve(struct sk_buff *skb,
struct dst_entry *dst = skb_dst(skb);
int res;
- if (txq->qdisc == &noop_qdisc)
+ if (rcu_access_pointer(txq->qdisc) == &noop_qdisc)
return -ENODEV;
if (!dev->header_ops || !dst)
@@ -301,7 +304,6 @@ restart:
do {
struct net_device *slave = qdisc_dev(q);
struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
- const struct net_device_ops *slave_ops = slave->netdev_ops;
if (slave_txq->qdisc_sleeping != q)
continue;
@@ -317,8 +319,8 @@ restart:
unsigned int length = qdisc_pkt_len(skb);
if (!netif_xmit_frozen_or_stopped(slave_txq) &&
- slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
- txq_trans_update(slave_txq);
+ netdev_start_xmit(skb, slave, slave_txq, false) ==
+ NETDEV_TX_OK) {
__netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev);
@@ -468,7 +470,7 @@ static __init void teql_master_setup(struct net_device *dev)
dev->tx_queue_len = 100;
dev->flags = IFF_NOARP;
dev->hard_header_len = LL_MAX_HEADER;
- dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ netif_keep_dst(dev);
}
static LIST_HEAD(master_dev_list);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c1b991294516..b6493b3f11a9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -133,9 +133,13 @@ int sctp_rcv(struct sk_buff *skb)
__skb_pull(skb, skb_transport_offset(skb));
if (skb->len < sizeof(struct sctphdr))
goto discard_it;
- if (!sctp_checksum_disable && !skb_csum_unnecessary(skb) &&
- sctp_rcv_checksum(net, skb) < 0)
+
+ skb->csum_valid = 0; /* Previous value not applicable */
+ if (skb_csum_unnecessary(skb))
+ __skb_decr_checksum_unnecessary(skb);
+ else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0)
goto discard_it;
+ skb->csum_valid = 1;
skb_pull(skb, sizeof(struct sctphdr));
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 6240834f4b95..9d2c6c9facb6 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -366,7 +366,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) &&
ret != RTN_LOCAL &&
!sp->inet.freebind &&
- !sysctl_ip_nonlocal_bind)
+ !net->ipv4.sysctl_ip_nonlocal_bind)
return 0;
if (ipv6_only_sock(sctp_opt2sk(sp)))
diff --git a/net/socket.c b/net/socket.c
index 4cdbc107606f..ffd9cb46902b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -610,7 +610,7 @@ void sock_release(struct socket *sock)
}
EXPORT_SYMBOL(sock_release);
-void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
+void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
{
u8 flags = *tx_flags;
@@ -626,12 +626,9 @@ void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)
flags |= SKBTX_ACK_TSTAMP;
- if (sock_flag(sk, SOCK_WIFI_STATUS))
- flags |= SKBTX_WIFI_STATUS;
-
*tx_flags = flags;
}
-EXPORT_SYMBOL(sock_tx_timestamp);
+EXPORT_SYMBOL(__sock_tx_timestamp);
static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size)
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index a080c66d819a..b8a13caad59a 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_TIPC) := tipc.o
tipc-y += addr.o bcast.o bearer.o config.o \
core.o link.o discover.o msg.o \
name_distr.o subscr.o name_table.o net.o \
- netlink.o node.o node_subscr.o port.o ref.o \
+ netlink.o node.o node_subscr.o \
socket.o log.o eth_media.o server.o
tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index dd13bfa09333..b8670bf262e2 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -37,7 +37,6 @@
#include "core.h"
#include "link.h"
-#include "port.h"
#include "socket.h"
#include "msg.h"
#include "bcast.h"
@@ -227,6 +226,17 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
}
/**
+ * tipc_bclink_wakeup_users - wake up pending users
+ *
+ * Called with no locks taken
+ */
+void tipc_bclink_wakeup_users(void)
+{
+ while (skb_queue_len(&bclink->link.waiting_sks))
+ tipc_sk_rcv(skb_dequeue(&bclink->link.waiting_sks));
+}
+
+/**
* tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
* @n_ptr: node that sent acknowledgement info
* @acked: broadcast sequence # that has been acknowledged
@@ -300,8 +310,9 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
tipc_link_push_queue(bcl);
bclink_set_last_sent();
}
- if (unlikely(released && !list_empty(&bcl->waiting_ports)))
- tipc_link_wakeup_ports(bcl, 0);
+ if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
+ n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
+
exit:
tipc_bclink_unlock();
}
@@ -840,9 +851,10 @@ int tipc_bclink_init(void)
sprintf(bcbearer->media.name, "tipc-broadcast");
spin_lock_init(&bclink->lock);
- INIT_LIST_HEAD(&bcl->waiting_ports);
+ __skb_queue_head_init(&bcl->waiting_sks);
bcl->next_out_no = 1;
spin_lock_init(&bclink->node.lock);
+ __skb_queue_head_init(&bclink->node.waiting_sks);
bcl->owner = &bclink->node;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 4875d9536aee..e7b0f85a82bc 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -99,5 +99,5 @@ int tipc_bclink_set_queue_limits(u32 limit);
void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
uint tipc_bclink_get_mtu(void);
int tipc_bclink_xmit(struct sk_buff *buf);
-
+void tipc_bclink_wakeup_users(void);
#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 2b42403ad33a..876f4c6a2631 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -35,7 +35,7 @@
*/
#include "core.h"
-#include "port.h"
+#include "socket.h"
#include "name_table.h"
#include "config.h"
#include "server.h"
@@ -266,7 +266,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
rep_tlv_buf = tipc_media_get_names();
break;
case TIPC_CMD_SHOW_PORTS:
- rep_tlv_buf = tipc_port_get_ports();
+ rep_tlv_buf = tipc_sk_socks_show();
break;
case TIPC_CMD_SHOW_STATS:
rep_tlv_buf = tipc_show_stats();
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 676d18015dd8..a5737b8407dd 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -35,11 +35,10 @@
*/
#include "core.h"
-#include "ref.h"
#include "name_table.h"
#include "subscr.h"
#include "config.h"
-#include "port.h"
+#include "socket.h"
#include <linux/module.h>
@@ -85,7 +84,7 @@ static void tipc_core_stop(void)
tipc_netlink_stop();
tipc_subscr_stop();
tipc_nametbl_stop();
- tipc_ref_table_stop();
+ tipc_sk_ref_table_stop();
tipc_socket_stop();
tipc_unregister_sysctl();
}
@@ -99,7 +98,7 @@ static int tipc_core_start(void)
get_random_bytes(&tipc_random, sizeof(tipc_random));
- err = tipc_ref_table_init(tipc_max_ports, tipc_random);
+ err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random);
if (err)
goto out_reftbl;
@@ -139,7 +138,7 @@ out_socket:
out_netlink:
tipc_nametbl_stop();
out_nametbl:
- tipc_ref_table_stop();
+ tipc_sk_ref_table_stop();
out_reftbl:
return err;
}
diff --git a/net/tipc/core.h b/net/tipc/core.h
index bb26ed1ee966..f773b148722f 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -81,6 +81,7 @@ extern u32 tipc_own_addr __read_mostly;
extern int tipc_max_ports __read_mostly;
extern int tipc_net_id __read_mostly;
extern int sysctl_tipc_rmem[3] __read_mostly;
+extern int sysctl_tipc_named_timeout __read_mostly;
/*
* Other global variables
@@ -187,8 +188,11 @@ static inline void k_term_timer(struct timer_list *timer)
struct tipc_skb_cb {
void *handle;
- bool deferred;
struct sk_buff *tail;
+ bool deferred;
+ bool wakeup_pending;
+ u16 chain_sz;
+ u16 chain_imp;
};
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/link.c b/net/tipc/link.c
index fb1485dc6736..65410e18b8a6 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -36,7 +36,6 @@
#include "core.h"
#include "link.h"
-#include "port.h"
#include "socket.h"
#include "name_distr.h"
#include "discover.h"
@@ -275,7 +274,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
link_init_max_pkt(l_ptr);
l_ptr->next_out_no = 1;
- INIT_LIST_HEAD(&l_ptr->waiting_ports);
+ __skb_queue_head_init(&l_ptr->waiting_sks);
link_reset_statistics(l_ptr);
@@ -322,66 +321,47 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
}
/**
- * link_schedule_port - schedule port for deferred sending
- * @l_ptr: pointer to link
- * @origport: reference to sending port
- * @sz: amount of data to be sent
- *
- * Schedules port for renewed sending of messages after link congestion
- * has abated.
+ * link_schedule_user - schedule user for wakeup after congestion
+ * @link: congested link
+ * @oport: sending port
+ * @chain_sz: size of buffer chain that was attempted sent
+ * @imp: importance of message attempted sent
+ * Create pseudo msg to send back to user when congestion abates
*/
-static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
+static bool link_schedule_user(struct tipc_link *link, u32 oport,
+ uint chain_sz, uint imp)
{
- struct tipc_port *p_ptr;
- struct tipc_sock *tsk;
+ struct sk_buff *buf;
- spin_lock_bh(&tipc_port_list_lock);
- p_ptr = tipc_port_lock(origport);
- if (p_ptr) {
- if (!list_empty(&p_ptr->wait_list))
- goto exit;
- tsk = tipc_port_to_sock(p_ptr);
- tsk->link_cong = 1;
- p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
- list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
- l_ptr->stats.link_congs++;
-exit:
- tipc_port_unlock(p_ptr);
- }
- spin_unlock_bh(&tipc_port_list_lock);
- return -ELINKCONG;
+ buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr,
+ tipc_own_addr, oport, 0, 0);
+ if (!buf)
+ return false;
+ TIPC_SKB_CB(buf)->chain_sz = chain_sz;
+ TIPC_SKB_CB(buf)->chain_imp = imp;
+ __skb_queue_tail(&link->waiting_sks, buf);
+ link->stats.link_congs++;
+ return true;
}
-void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
+/**
+ * link_prepare_wakeup - prepare users for wakeup after congestion
+ * @link: congested link
+ * Move a number of waiting users, as permitted by available space in
+ * the send queue, from link wait queue to node wait queue for wakeup
+ */
+static void link_prepare_wakeup(struct tipc_link *link)
{
- struct tipc_port *p_ptr;
- struct tipc_sock *tsk;
- struct tipc_port *temp_p_ptr;
- int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
-
- if (all)
- win = 100000;
- if (win <= 0)
- return;
- if (!spin_trylock_bh(&tipc_port_list_lock))
- return;
- if (link_congested(l_ptr))
- goto exit;
- list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
- wait_list) {
- if (win <= 0)
+ struct sk_buff_head *wq = &link->waiting_sks;
+ struct sk_buff *buf;
+ uint pend_qsz = link->out_queue_size;
+
+ for (buf = skb_peek(wq); buf; buf = skb_peek(wq)) {
+ if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(buf)->chain_imp])
break;
- tsk = tipc_port_to_sock(p_ptr);
- list_del_init(&p_ptr->wait_list);
- spin_lock_bh(p_ptr->lock);
- tsk->link_cong = 0;
- tipc_sock_wakeup(tsk);
- win -= p_ptr->waiting_pkts;
- spin_unlock_bh(p_ptr->lock);
+ pend_qsz += TIPC_SKB_CB(buf)->chain_sz;
+ __skb_queue_tail(&link->owner->waiting_sks, __skb_dequeue(wq));
}
-
-exit:
- spin_unlock_bh(&tipc_port_list_lock);
}
/**
@@ -423,6 +403,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
u32 prev_state = l_ptr->state;
u32 checkpoint = l_ptr->next_in_no;
int was_active_link = tipc_link_is_active(l_ptr);
+ struct tipc_node *owner = l_ptr->owner;
msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
@@ -450,9 +431,10 @@ void tipc_link_reset(struct tipc_link *l_ptr)
kfree_skb(l_ptr->proto_msg_queue);
l_ptr->proto_msg_queue = NULL;
kfree_skb_list(l_ptr->oldest_deferred_in);
- if (!list_empty(&l_ptr->waiting_ports))
- tipc_link_wakeup_ports(l_ptr, 1);
-
+ if (!skb_queue_empty(&l_ptr->waiting_sks)) {
+ skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
+ owner->action_flags |= TIPC_WAKEUP_USERS;
+ }
l_ptr->retransm_queue_head = 0;
l_ptr->retransm_queue_size = 0;
l_ptr->last_out = NULL;
@@ -688,19 +670,23 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- uint psz = msg_size(msg);
uint imp = tipc_msg_tot_importance(msg);
u32 oport = msg_tot_origport(msg);
- if (likely(imp <= TIPC_CRITICAL_IMPORTANCE)) {
- if (!msg_errcode(msg) && !msg_reroute_cnt(msg)) {
- link_schedule_port(link, oport, psz);
- return -ELINKCONG;
- }
- } else {
+ if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
tipc_link_reset(link);
+ goto drop;
}
+ if (unlikely(msg_errcode(msg)))
+ goto drop;
+ if (unlikely(msg_reroute_cnt(msg)))
+ goto drop;
+ if (TIPC_SKB_CB(buf)->wakeup_pending)
+ return -ELINKCONG;
+ if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp))
+ return -ELINKCONG;
+drop:
kfree_skb_list(buf);
return -EHOSTUNREACH;
}
@@ -1202,8 +1188,10 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
if (unlikely(l_ptr->next_out))
tipc_link_push_queue(l_ptr);
- if (unlikely(!list_empty(&l_ptr->waiting_ports)))
- tipc_link_wakeup_ports(l_ptr, 0);
+ if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
+ link_prepare_wakeup(l_ptr);
+ l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS;
+ }
/* Process the incoming packet */
if (unlikely(!link_working_working(l_ptr))) {
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 782983ccd323..b567a3427fda 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -1,7 +1,7 @@
/*
* net/tipc/link.h: Include file for TIPC link code
*
- * Copyright (c) 1995-2006, 2013, Ericsson AB
+ * Copyright (c) 1995-2006, 2013-2014, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -133,7 +133,7 @@ struct tipc_stats {
* @retransm_queue_size: number of messages to retransmit
* @retransm_queue_head: sequence number of first message to retransmit
* @next_out: ptr to first unsent outbound message in queue
- * @waiting_ports: linked list of ports waiting for link congestion to abate
+ * @waiting_sks: linked list of sockets waiting for link congestion to abate
* @long_msg_seq_no: next identifier to use for outbound fragmented messages
* @reasm_buf: head of partially reassembled inbound message fragments
* @stats: collects statistics regarding link activity
@@ -194,7 +194,7 @@ struct tipc_link {
u32 retransm_queue_size;
u32 retransm_queue_head;
struct sk_buff *next_out;
- struct list_head waiting_ports;
+ struct sk_buff_head waiting_sks;
/* Fragmentation/reassembly */
u32 long_msg_seq_no;
@@ -235,7 +235,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
void tipc_link_push_queue(struct tipc_link *l_ptr);
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
struct sk_buff *buf);
-void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
void tipc_link_retransmit(struct tipc_link *l_ptr,
struct sk_buff *start, u32 retransmits);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 9680be6d388a..74745a47d72a 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -56,8 +56,35 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
msg_set_size(m, hsize);
msg_set_prevnode(m, tipc_own_addr);
msg_set_type(m, type);
- msg_set_orignode(m, tipc_own_addr);
- msg_set_destnode(m, destnode);
+ if (hsize > SHORT_H_SIZE) {
+ msg_set_orignode(m, tipc_own_addr);
+ msg_set_destnode(m, destnode);
+ }
+}
+
+struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
+ uint data_sz, u32 dnode, u32 onode,
+ u32 dport, u32 oport, int errcode)
+{
+ struct tipc_msg *msg;
+ struct sk_buff *buf;
+
+ buf = tipc_buf_acquire(hdr_sz + data_sz);
+ if (unlikely(!buf))
+ return NULL;
+
+ msg = buf_msg(buf);
+ tipc_msg_init(msg, user, type, hdr_sz, dnode);
+ msg_set_size(msg, hdr_sz + data_sz);
+ msg_set_prevnode(msg, onode);
+ msg_set_origport(msg, oport);
+ msg_set_destport(msg, dport);
+ msg_set_errcode(msg, errcode);
+ if (hdr_sz > SHORT_H_SIZE) {
+ msg_set_orignode(msg, onode);
+ msg_set_destnode(msg, dnode);
+ }
+ return buf;
}
/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
@@ -155,7 +182,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
struct sk_buff *buf, *prev;
char *pktpos;
int rc;
-
+ uint chain_sz = 0;
msg_set_size(mhdr, msz);
/* No fragmentation needed? */
@@ -166,6 +193,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
return -ENOMEM;
skb_copy_to_linear_data(buf, mhdr, mhsz);
pktpos = buf->data + mhsz;
+ TIPC_SKB_CB(buf)->chain_sz = 1;
if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz))
return dsz;
rc = -EFAULT;
@@ -182,6 +210,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
*chain = buf = tipc_buf_acquire(pktmax);
if (!buf)
return -ENOMEM;
+ chain_sz = 1;
pktpos = buf->data;
skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
pktpos += INT_H_SIZE;
@@ -215,6 +244,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
rc = -ENOMEM;
goto error;
}
+ chain_sz++;
prev->next = buf;
msg_set_type(&pkthdr, FRAGMENT);
msg_set_size(&pkthdr, pktsz);
@@ -224,7 +254,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
pktrem = pktsz - INT_H_SIZE;
} while (1);
-
+ TIPC_SKB_CB(*chain)->chain_sz = chain_sz;
msg_set_type(buf_msg(buf), LAST_FRAGMENT);
return dsz;
error:
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 462fa194a6af..0ea7b695ac4d 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -442,6 +442,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
#define NAME_DISTRIBUTOR 11
#define MSG_FRAGMENTER 12
#define LINK_CONFIG 13
+#define SOCK_WAKEUP 14 /* pseudo user */
/*
* Connection management protocol message types
@@ -732,6 +733,10 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode);
void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
u32 destnode);
+struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
+ uint data_sz, u32 dnode, u32 onode,
+ u32 dport, u32 oport, int errcode);
+
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index dcc15bcd5692..376d2bb51d8d 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -1,7 +1,7 @@
/*
* net/tipc/name_distr.c: TIPC name distribution code
*
- * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2000-2006, 2014, Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -71,6 +71,21 @@ static struct publ_list *publ_lists[] = {
};
+int sysctl_tipc_named_timeout __read_mostly = 2000;
+
+/**
+ * struct tipc_dist_queue - queue holding deferred name table updates
+ */
+static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
+
+struct distr_queue_item {
+ struct distr_item i;
+ u32 dtype;
+ u32 node;
+ unsigned long expires;
+ struct list_head next;
+};
+
/**
* publ_to_item - add publication info to a publication message
*/
@@ -263,54 +278,105 @@ static void named_purge_publ(struct publication *publ)
}
/**
+ * tipc_update_nametbl - try to process a nametable update and notify
+ * subscribers
+ *
+ * tipc_nametbl_lock must be held.
+ * Returns the publication item if successful, otherwise NULL.
+ */
+static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
+{
+ struct publication *publ = NULL;
+
+ if (dtype == PUBLICATION) {
+ publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower),
+ ntohl(i->upper),
+ TIPC_CLUSTER_SCOPE, node,
+ ntohl(i->ref), ntohl(i->key));
+ if (publ) {
+ tipc_nodesub_subscribe(&publ->subscr, node, publ,
+ (net_ev_handler)
+ named_purge_publ);
+ return true;
+ }
+ } else if (dtype == WITHDRAWAL) {
+ publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower),
+ node, ntohl(i->ref),
+ ntohl(i->key));
+ if (publ) {
+ tipc_nodesub_unsubscribe(&publ->subscr);
+ kfree(publ);
+ return true;
+ }
+ } else {
+ pr_warn("Unrecognized name table message received\n");
+ }
+ return false;
+}
+
+/**
+ * tipc_named_add_backlog - add a failed name table update to the backlog
+ *
+ */
+static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
+{
+ struct distr_queue_item *e;
+ unsigned long now = get_jiffies_64();
+
+ e = kzalloc(sizeof(*e), GFP_ATOMIC);
+ if (!e)
+ return;
+ e->dtype = type;
+ e->node = node;
+ e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
+ memcpy(e, i, sizeof(*i));
+ list_add_tail(&e->next, &tipc_dist_queue);
+}
+
+/**
+ * tipc_named_process_backlog - try to process any pending name table updates
+ * from the network.
+ */
+void tipc_named_process_backlog(void)
+{
+ struct distr_queue_item *e, *tmp;
+ char addr[16];
+ unsigned long now = get_jiffies_64();
+
+ list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
+ if (time_after(e->expires, now)) {
+ if (!tipc_update_nametbl(&e->i, e->node, e->dtype))
+ continue;
+ } else {
+ tipc_addr_string_fill(addr, e->node);
+ pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n",
+ e->dtype, ntohl(e->i.type),
+ ntohl(e->i.lower),
+ ntohl(e->i.upper),
+ addr, ntohl(e->i.key));
+ }
+ list_del(&e->next);
+ kfree(e);
+ }
+}
+
+/**
* tipc_named_rcv - process name table update message sent by another node
*/
void tipc_named_rcv(struct sk_buff *buf)
{
- struct publication *publ;
struct tipc_msg *msg = buf_msg(buf);
struct distr_item *item = (struct distr_item *)msg_data(msg);
u32 count = msg_data_sz(msg) / ITEM_SIZE;
+ u32 node = msg_orignode(msg);
write_lock_bh(&tipc_nametbl_lock);
while (count--) {
- if (msg_type(msg) == PUBLICATION) {
- publ = tipc_nametbl_insert_publ(ntohl(item->type),
- ntohl(item->lower),
- ntohl(item->upper),
- TIPC_CLUSTER_SCOPE,
- msg_orignode(msg),
- ntohl(item->ref),
- ntohl(item->key));
- if (publ) {
- tipc_nodesub_subscribe(&publ->subscr,
- msg_orignode(msg),
- publ,
- (net_ev_handler)
- named_purge_publ);
- }
- } else if (msg_type(msg) == WITHDRAWAL) {
- publ = tipc_nametbl_remove_publ(ntohl(item->type),
- ntohl(item->lower),
- msg_orignode(msg),
- ntohl(item->ref),
- ntohl(item->key));
-
- if (publ) {
- tipc_nodesub_unsubscribe(&publ->subscr);
- kfree(publ);
- } else {
- pr_err("Unable to remove publication by node 0x%x\n"
- " (type=%u, lower=%u, ref=%u, key=%u)\n",
- msg_orignode(msg), ntohl(item->type),
- ntohl(item->lower), ntohl(item->ref),
- ntohl(item->key));
- }
- } else {
- pr_warn("Unrecognized name table message received\n");
- }
+ if (!tipc_update_nametbl(item, node, msg_type(msg)))
+ tipc_named_add_backlog(item, msg_type(msg), node);
item++;
}
+ tipc_named_process_backlog();
write_unlock_bh(&tipc_nametbl_lock);
kfree_skb(buf);
}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 8afe32b7fc9a..b9e75feb3434 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -73,5 +73,6 @@ void named_cluster_distribute(struct sk_buff *buf);
void tipc_named_node_up(u32 dnode);
void tipc_named_rcv(struct sk_buff *buf);
void tipc_named_reinit(void);
+void tipc_named_process_backlog(void);
#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 9d7d37d95187..3a6a0a7c0759 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -39,7 +39,6 @@
#include "name_table.h"
#include "name_distr.h"
#include "subscr.h"
-#include "port.h"
#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
@@ -262,8 +261,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/* Lower end overlaps existing entry => need an exact match */
if ((sseq->lower != lower) || (sseq->upper != upper)) {
- pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
- type, lower, upper);
return NULL;
}
@@ -285,8 +282,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/* Fail if upper end overlaps into an existing entry */
if ((inspos < nseq->first_free) &&
(upper >= nseq->sseqs[inspos].lower)) {
- pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
- type, lower, upper);
return NULL;
}
@@ -678,6 +673,8 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
if (likely(publ)) {
table.local_publ_count++;
buf = tipc_named_publish(publ);
+ /* Any pending external events? */
+ tipc_named_process_backlog();
}
write_unlock_bh(&tipc_nametbl_lock);
@@ -699,6 +696,8 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
if (likely(publ)) {
table.local_publ_count--;
buf = tipc_named_withdraw(publ);
+ /* Any pending external events? */
+ tipc_named_process_backlog();
write_unlock_bh(&tipc_nametbl_lock);
list_del_init(&publ->pport_list);
kfree(publ);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7fcc94998fea..93b9944a6a8b 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -38,7 +38,6 @@
#include "net.h"
#include "name_distr.h"
#include "subscr.h"
-#include "port.h"
#include "socket.h"
#include "node.h"
#include "config.h"
@@ -111,7 +110,7 @@ int tipc_net_start(u32 addr)
tipc_own_addr = addr;
tipc_named_reinit();
- tipc_port_reinit();
+ tipc_sk_reinit();
res = tipc_bclink_init();
if (res)
return res;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index f7069299943f..90cee4a6fce4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -38,6 +38,7 @@
#include "config.h"
#include "node.h"
#include "name_distr.h"
+#include "socket.h"
#define NODE_HTABLE_SIZE 512
@@ -50,6 +51,13 @@ static u32 tipc_num_nodes;
static u32 tipc_num_links;
static DEFINE_SPINLOCK(node_list_lock);
+struct tipc_sock_conn {
+ u32 port;
+ u32 peer_port;
+ u32 peer_node;
+ struct list_head list;
+};
+
/*
* A trivial power-of-two bitmask technique is used for speed, since this
* operation is done for every incoming TIPC packet. The number of hash table
@@ -100,6 +108,8 @@ struct tipc_node *tipc_node_create(u32 addr)
INIT_HLIST_NODE(&n_ptr->hash);
INIT_LIST_HEAD(&n_ptr->list);
INIT_LIST_HEAD(&n_ptr->nsub);
+ INIT_LIST_HEAD(&n_ptr->conn_sks);
+ __skb_queue_head_init(&n_ptr->waiting_sks);
hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
@@ -136,6 +146,71 @@ void tipc_node_stop(void)
spin_unlock_bh(&node_list_lock);
}
+int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
+{
+ struct tipc_node *node;
+ struct tipc_sock_conn *conn;
+
+ if (in_own_node(dnode))
+ return 0;
+
+ node = tipc_node_find(dnode);
+ if (!node) {
+ pr_warn("Connecting sock to node 0x%x failed\n", dnode);
+ return -EHOSTUNREACH;
+ }
+ conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
+ if (!conn)
+ return -EHOSTUNREACH;
+ conn->peer_node = dnode;
+ conn->port = port;
+ conn->peer_port = peer_port;
+
+ tipc_node_lock(node);
+ list_add_tail(&conn->list, &node->conn_sks);
+ tipc_node_unlock(node);
+ return 0;
+}
+
+void tipc_node_remove_conn(u32 dnode, u32 port)
+{
+ struct tipc_node *node;
+ struct tipc_sock_conn *conn, *safe;
+
+ if (in_own_node(dnode))
+ return;
+
+ node = tipc_node_find(dnode);
+ if (!node)
+ return;
+
+ tipc_node_lock(node);
+ list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
+ if (port != conn->port)
+ continue;
+ list_del(&conn->list);
+ kfree(conn);
+ }
+ tipc_node_unlock(node);
+}
+
+void tipc_node_abort_sock_conns(struct list_head *conns)
+{
+ struct tipc_sock_conn *conn, *safe;
+ struct sk_buff *buf;
+
+ list_for_each_entry_safe(conn, safe, conns, list) {
+ buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+ SHORT_H_SIZE, 0, tipc_own_addr,
+ conn->peer_node, conn->port,
+ conn->peer_port, TIPC_ERR_NO_NODE);
+ if (likely(buf))
+ tipc_sk_rcv(buf);
+ list_del(&conn->list);
+ kfree(conn);
+ }
+}
+
/**
* tipc_node_link_up - handle addition of link
*
@@ -474,25 +549,45 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
void tipc_node_unlock(struct tipc_node *node)
{
LIST_HEAD(nsub_list);
+ LIST_HEAD(conn_sks);
+ struct sk_buff_head waiting_sks;
u32 addr = 0;
+ unsigned int flags = node->action_flags;
if (likely(!node->action_flags)) {
spin_unlock_bh(&node->lock);
return;
}
+ __skb_queue_head_init(&waiting_sks);
+ if (node->action_flags & TIPC_WAKEUP_USERS) {
+ skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
+ node->action_flags &= ~TIPC_WAKEUP_USERS;
+ }
if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
list_replace_init(&node->nsub, &nsub_list);
+ list_replace_init(&node->conn_sks, &conn_sks);
node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
}
if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
addr = node->addr;
}
+ node->action_flags &= ~TIPC_WAKEUP_BCAST_USERS;
spin_unlock_bh(&node->lock);
+ while (!skb_queue_empty(&waiting_sks))
+ tipc_sk_rcv(__skb_dequeue(&waiting_sks));
+
+ if (!list_empty(&conn_sks))
+ tipc_node_abort_sock_conns(&conn_sks);
+
if (!list_empty(&nsub_list))
tipc_nodesub_notify(&nsub_list);
+
+ if (flags & TIPC_WAKEUP_BCAST_USERS)
+ tipc_bclink_wakeup_users();
+
if (addr)
tipc_named_node_up(addr);
}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index b61716a8218e..67513c3c852c 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -58,7 +58,9 @@ enum {
TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
TIPC_NOTIFY_NODE_DOWN = (1 << 3),
- TIPC_NOTIFY_NODE_UP = (1 << 4)
+ TIPC_NOTIFY_NODE_UP = (1 << 4),
+ TIPC_WAKEUP_USERS = (1 << 5),
+ TIPC_WAKEUP_BCAST_USERS = (1 << 6)
};
/**
@@ -115,6 +117,8 @@ struct tipc_node {
int working_links;
u32 signature;
struct list_head nsub;
+ struct sk_buff_head waiting_sks;
+ struct list_head conn_sks;
struct rcu_head rcu;
};
@@ -133,6 +137,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
void tipc_node_unlock(struct tipc_node *node);
+int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port);
+void tipc_node_remove_conn(u32 dnode, u32 port);
static inline void tipc_node_lock(struct tipc_node *node)
{
diff --git a/net/tipc/port.c b/net/tipc/port.c
deleted file mode 100644
index 7e096a5e7701..000000000000
--- a/net/tipc/port.c
+++ /dev/null
@@ -1,514 +0,0 @@
-/*
- * net/tipc/port.c: TIPC port code
- *
- * Copyright (c) 1992-2007, 2014, Ericsson AB
- * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "config.h"
-#include "port.h"
-#include "name_table.h"
-#include "socket.h"
-
-/* Connection management: */
-#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
-
-#define MAX_REJECT_SIZE 1024
-
-DEFINE_SPINLOCK(tipc_port_list_lock);
-
-static LIST_HEAD(ports);
-static void port_handle_node_down(unsigned long ref);
-static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err);
-static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err);
-static void port_timeout(unsigned long ref);
-
-/**
- * tipc_port_peer_msg - verify message was sent by connected port's peer
- *
- * Handles cases where the node's network address has changed from
- * the default of <0.0.0> to its configured setting.
- */
-int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
-{
- u32 peernode;
- u32 orignode;
-
- if (msg_origport(msg) != tipc_port_peerport(p_ptr))
- return 0;
-
- orignode = msg_orignode(msg);
- peernode = tipc_port_peernode(p_ptr);
- return (orignode == peernode) ||
- (!orignode && (peernode == tipc_own_addr)) ||
- (!peernode && (orignode == tipc_own_addr));
-}
-
-/* tipc_port_init - intiate TIPC port and lock it
- *
- * Returns obtained reference if initialization is successful, zero otherwise
- */
-u32 tipc_port_init(struct tipc_port *p_ptr,
- const unsigned int importance)
-{
- struct tipc_msg *msg;
- u32 ref;
-
- ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
- if (!ref) {
- pr_warn("Port registration failed, ref. table exhausted\n");
- return 0;
- }
-
- p_ptr->max_pkt = MAX_PKT_DEFAULT;
- p_ptr->ref = ref;
- INIT_LIST_HEAD(&p_ptr->wait_list);
- INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
- k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
- INIT_LIST_HEAD(&p_ptr->publications);
- INIT_LIST_HEAD(&p_ptr->port_list);
-
- /*
- * Must hold port list lock while initializing message header template
- * to ensure a change to node's own network address doesn't result
- * in template containing out-dated network address information
- */
- spin_lock_bh(&tipc_port_list_lock);
- msg = &p_ptr->phdr;
- tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
- msg_set_origport(msg, ref);
- list_add_tail(&p_ptr->port_list, &ports);
- spin_unlock_bh(&tipc_port_list_lock);
- return ref;
-}
-
-void tipc_port_destroy(struct tipc_port *p_ptr)
-{
- struct sk_buff *buf = NULL;
- struct tipc_msg *msg = NULL;
- u32 peer;
-
- tipc_withdraw(p_ptr, 0, NULL);
-
- spin_lock_bh(p_ptr->lock);
- tipc_ref_discard(p_ptr->ref);
- spin_unlock_bh(p_ptr->lock);
-
- k_cancel_timer(&p_ptr->timer);
- if (p_ptr->connected) {
- buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
- tipc_nodesub_unsubscribe(&p_ptr->subscription);
- msg = buf_msg(buf);
- peer = msg_destnode(msg);
- tipc_link_xmit(buf, peer, msg_link_selector(msg));
- }
- spin_lock_bh(&tipc_port_list_lock);
- list_del(&p_ptr->port_list);
- list_del(&p_ptr->wait_list);
- spin_unlock_bh(&tipc_port_list_lock);
- k_term_timer(&p_ptr->timer);
-}
-
-/*
- * port_build_proto_msg(): create connection protocol message for port
- *
- * On entry the port must be locked and connected.
- */
-static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr,
- u32 type, u32 ack)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
-
- buf = tipc_buf_acquire(INT_H_SIZE);
- if (buf) {
- msg = buf_msg(buf);
- tipc_msg_init(msg, CONN_MANAGER, type, INT_H_SIZE,
- tipc_port_peernode(p_ptr));
- msg_set_destport(msg, tipc_port_peerport(p_ptr));
- msg_set_origport(msg, p_ptr->ref);
- msg_set_msgcnt(msg, ack);
- buf->next = NULL;
- }
- return buf;
-}
-
-static void port_timeout(unsigned long ref)
-{
- struct tipc_port *p_ptr = tipc_port_lock(ref);
- struct sk_buff *buf = NULL;
- struct tipc_msg *msg = NULL;
-
- if (!p_ptr)
- return;
-
- if (!p_ptr->connected) {
- tipc_port_unlock(p_ptr);
- return;
- }
-
- /* Last probe answered ? */
- if (p_ptr->probing_state == TIPC_CONN_PROBING) {
- buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
- } else {
- buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0);
- p_ptr->probing_state = TIPC_CONN_PROBING;
- k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
- }
- tipc_port_unlock(p_ptr);
- msg = buf_msg(buf);
- tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
-}
-
-
-static void port_handle_node_down(unsigned long ref)
-{
- struct tipc_port *p_ptr = tipc_port_lock(ref);
- struct sk_buff *buf = NULL;
- struct tipc_msg *msg = NULL;
-
- if (!p_ptr)
- return;
- buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
- tipc_port_unlock(p_ptr);
- msg = buf_msg(buf);
- tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
-}
-
-
-static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err)
-{
- struct sk_buff *buf = port_build_peer_abort_msg(p_ptr, err);
-
- if (buf) {
- struct tipc_msg *msg = buf_msg(buf);
- msg_swap_words(msg, 4, 5);
- msg_swap_words(msg, 6, 7);
- buf->next = NULL;
- }
- return buf;
-}
-
-
-static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err)
-{
- struct sk_buff *buf;
- struct tipc_msg *msg;
- u32 imp;
-
- if (!p_ptr->connected)
- return NULL;
-
- buf = tipc_buf_acquire(BASIC_H_SIZE);
- if (buf) {
- msg = buf_msg(buf);
- memcpy(msg, &p_ptr->phdr, BASIC_H_SIZE);
- msg_set_hdr_sz(msg, BASIC_H_SIZE);
- msg_set_size(msg, BASIC_H_SIZE);
- imp = msg_importance(msg);
- if (imp < TIPC_CRITICAL_IMPORTANCE)
- msg_set_importance(msg, ++imp);
- msg_set_errcode(msg, err);
- buf->next = NULL;
- }
- return buf;
-}
-
-static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
-{
- struct publication *publ;
- int ret;
-
- if (full_id)
- ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
- tipc_zone(tipc_own_addr),
- tipc_cluster(tipc_own_addr),
- tipc_node(tipc_own_addr), p_ptr->ref);
- else
- ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref);
-
- if (p_ptr->connected) {
- u32 dport = tipc_port_peerport(p_ptr);
- u32 destnode = tipc_port_peernode(p_ptr);
-
- ret += tipc_snprintf(buf + ret, len - ret,
- " connected to <%u.%u.%u:%u>",
- tipc_zone(destnode),
- tipc_cluster(destnode),
- tipc_node(destnode), dport);
- if (p_ptr->conn_type != 0)
- ret += tipc_snprintf(buf + ret, len - ret,
- " via {%u,%u}", p_ptr->conn_type,
- p_ptr->conn_instance);
- } else if (p_ptr->published) {
- ret += tipc_snprintf(buf + ret, len - ret, " bound to");
- list_for_each_entry(publ, &p_ptr->publications, pport_list) {
- if (publ->lower == publ->upper)
- ret += tipc_snprintf(buf + ret, len - ret,
- " {%u,%u}", publ->type,
- publ->lower);
- else
- ret += tipc_snprintf(buf + ret, len - ret,
- " {%u,%u,%u}", publ->type,
- publ->lower, publ->upper);
- }
- }
- ret += tipc_snprintf(buf + ret, len - ret, "\n");
- return ret;
-}
-
-struct sk_buff *tipc_port_get_ports(void)
-{
- struct sk_buff *buf;
- struct tlv_desc *rep_tlv;
- char *pb;
- int pb_len;
- struct tipc_port *p_ptr;
- int str_len = 0;
-
- buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
- if (!buf)
- return NULL;
- rep_tlv = (struct tlv_desc *)buf->data;
- pb = TLV_DATA(rep_tlv);
- pb_len = ULTRA_STRING_MAX_LEN;
-
- spin_lock_bh(&tipc_port_list_lock);
- list_for_each_entry(p_ptr, &ports, port_list) {
- spin_lock_bh(p_ptr->lock);
- str_len += port_print(p_ptr, pb, pb_len, 0);
- spin_unlock_bh(p_ptr->lock);
- }
- spin_unlock_bh(&tipc_port_list_lock);
- str_len += 1; /* for "\0" */
- skb_put(buf, TLV_SPACE(str_len));
- TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
-
- return buf;
-}
-
-void tipc_port_reinit(void)
-{
- struct tipc_port *p_ptr;
- struct tipc_msg *msg;
-
- spin_lock_bh(&tipc_port_list_lock);
- list_for_each_entry(p_ptr, &ports, port_list) {
- msg = &p_ptr->phdr;
- msg_set_prevnode(msg, tipc_own_addr);
- msg_set_orignode(msg, tipc_own_addr);
- }
- spin_unlock_bh(&tipc_port_list_lock);
-}
-
-void tipc_acknowledge(u32 ref, u32 ack)
-{
- struct tipc_port *p_ptr;
- struct sk_buff *buf = NULL;
- struct tipc_msg *msg;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return;
- if (p_ptr->connected)
- buf = port_build_proto_msg(p_ptr, CONN_ACK, ack);
-
- tipc_port_unlock(p_ptr);
- if (!buf)
- return;
- msg = buf_msg(buf);
- tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
-}
-
-int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
- struct tipc_name_seq const *seq)
-{
- struct publication *publ;
- u32 key;
-
- if (p_ptr->connected)
- return -EINVAL;
- key = p_ptr->ref + p_ptr->pub_count + 1;
- if (key == p_ptr->ref)
- return -EADDRINUSE;
-
- publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
- scope, p_ptr->ref, key);
- if (publ) {
- list_add(&publ->pport_list, &p_ptr->publications);
- p_ptr->pub_count++;
- p_ptr->published = 1;
- return 0;
- }
- return -EINVAL;
-}
-
-int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
- struct tipc_name_seq const *seq)
-{
- struct publication *publ;
- struct publication *tpubl;
- int res = -EINVAL;
-
- if (!seq) {
- list_for_each_entry_safe(publ, tpubl,
- &p_ptr->publications, pport_list) {
- tipc_nametbl_withdraw(publ->type, publ->lower,
- publ->ref, publ->key);
- }
- res = 0;
- } else {
- list_for_each_entry_safe(publ, tpubl,
- &p_ptr->publications, pport_list) {
- if (publ->scope != scope)
- continue;
- if (publ->type != seq->type)
- continue;
- if (publ->lower != seq->lower)
- continue;
- if (publ->upper != seq->upper)
- break;
- tipc_nametbl_withdraw(publ->type, publ->lower,
- publ->ref, publ->key);
- res = 0;
- break;
- }
- }
- if (list_empty(&p_ptr->publications))
- p_ptr->published = 0;
- return res;
-}
-
-int tipc_port_connect(u32 ref, struct tipc_portid const *peer)
-{
- struct tipc_port *p_ptr;
- int res;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
- res = __tipc_port_connect(ref, p_ptr, peer);
- tipc_port_unlock(p_ptr);
- return res;
-}
-
-/*
- * __tipc_port_connect - connect to a remote peer
- *
- * Port must be locked.
- */
-int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
- struct tipc_portid const *peer)
-{
- struct tipc_msg *msg;
- int res = -EINVAL;
-
- if (p_ptr->published || p_ptr->connected)
- goto exit;
- if (!peer->ref)
- goto exit;
-
- msg = &p_ptr->phdr;
- msg_set_destnode(msg, peer->node);
- msg_set_destport(msg, peer->ref);
- msg_set_type(msg, TIPC_CONN_MSG);
- msg_set_lookup_scope(msg, 0);
- msg_set_hdr_sz(msg, SHORT_H_SIZE);
-
- p_ptr->probing_interval = PROBING_INTERVAL;
- p_ptr->probing_state = TIPC_CONN_OK;
- p_ptr->connected = 1;
- k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
-
- tipc_nodesub_subscribe(&p_ptr->subscription, peer->node,
- (void *)(unsigned long)ref,
- (net_ev_handler)port_handle_node_down);
- res = 0;
-exit:
- p_ptr->max_pkt = tipc_node_get_mtu(peer->node, ref);
- return res;
-}
-
-/*
- * __tipc_disconnect - disconnect port from peer
- *
- * Port must be locked.
- */
-int __tipc_port_disconnect(struct tipc_port *tp_ptr)
-{
- if (tp_ptr->connected) {
- tp_ptr->connected = 0;
- /* let timer expire on it's own to avoid deadlock! */
- tipc_nodesub_unsubscribe(&tp_ptr->subscription);
- return 0;
- }
-
- return -ENOTCONN;
-}
-
-/*
- * tipc_port_disconnect(): Disconnect port form peer.
- * This is a node local operation.
- */
-int tipc_port_disconnect(u32 ref)
-{
- struct tipc_port *p_ptr;
- int res;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
- res = __tipc_port_disconnect(p_ptr);
- tipc_port_unlock(p_ptr);
- return res;
-}
-
-/*
- * tipc_port_shutdown(): Send a SHUTDOWN msg to peer and disconnect
- */
-int tipc_port_shutdown(u32 ref)
-{
- struct tipc_msg *msg;
- struct tipc_port *p_ptr;
- struct sk_buff *buf = NULL;
-
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
-
- buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
- tipc_port_unlock(p_ptr);
- msg = buf_msg(buf);
- tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
- return tipc_port_disconnect(ref);
-}
diff --git a/net/tipc/port.h b/net/tipc/port.h
deleted file mode 100644
index 3087da39ee47..000000000000
--- a/net/tipc/port.h
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * net/tipc/port.h: Include file for TIPC port code
- *
- * Copyright (c) 1994-2007, 2014, Ericsson AB
- * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_PORT_H
-#define _TIPC_PORT_H
-
-#include "ref.h"
-#include "net.h"
-#include "msg.h"
-#include "node_subscr.h"
-
-#define TIPC_CONNACK_INTV 256
-#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
-#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
- SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
-
-/**
- * struct tipc_port - TIPC port structure
- * @lock: pointer to spinlock for controlling access to port
- * @connected: non-zero if port is currently connected to a peer port
- * @conn_type: TIPC type used when connection was established
- * @conn_instance: TIPC instance used when connection was established
- * @published: non-zero if port has one or more associated names
- * @max_pkt: maximum packet size "hint" used when building messages sent by port
- * @ref: unique reference to port in TIPC object registry
- * @phdr: preformatted message header used when sending messages
- * @port_list: adjacent ports in TIPC's global list of ports
- * @wait_list: adjacent ports in list of ports waiting on link congestion
- * @waiting_pkts:
- * @publications: list of publications for port
- * @pub_count: total # of publications port has made during its lifetime
- * @probing_state:
- * @probing_interval:
- * @timer_ref:
- * @subscription: "node down" subscription used to terminate failed connections
- */
-struct tipc_port {
- spinlock_t *lock;
- int connected;
- u32 conn_type;
- u32 conn_instance;
- int published;
- u32 max_pkt;
- u32 ref;
- struct tipc_msg phdr;
- struct list_head port_list;
- struct list_head wait_list;
- u32 waiting_pkts;
- struct list_head publications;
- u32 pub_count;
- u32 probing_state;
- u32 probing_interval;
- struct timer_list timer;
- struct tipc_node_subscr subscription;
-};
-
-extern spinlock_t tipc_port_list_lock;
-struct tipc_port_list;
-
-/*
- * TIPC port manipulation routines
- */
-u32 tipc_port_init(struct tipc_port *p_ptr,
- const unsigned int importance);
-
-void tipc_acknowledge(u32 port_ref, u32 ack);
-
-void tipc_port_destroy(struct tipc_port *p_ptr);
-
-int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
- struct tipc_name_seq const *name_seq);
-
-int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
- struct tipc_name_seq const *name_seq);
-
-int tipc_port_connect(u32 portref, struct tipc_portid const *port);
-
-int tipc_port_disconnect(u32 portref);
-
-int tipc_port_shutdown(u32 ref);
-
-/*
- * The following routines require that the port be locked on entry
- */
-int __tipc_port_disconnect(struct tipc_port *tp_ptr);
-int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
- struct tipc_portid const *peer);
-int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
-
-struct sk_buff *tipc_port_get_ports(void);
-void tipc_port_reinit(void);
-
-/**
- * tipc_port_lock - lock port instance referred to and return its pointer
- */
-static inline struct tipc_port *tipc_port_lock(u32 ref)
-{
- return (struct tipc_port *)tipc_ref_lock(ref);
-}
-
-/**
- * tipc_port_unlock - unlock a port instance
- *
- * Can use pointer instead of tipc_ref_unlock() since port is already locked.
- */
-static inline void tipc_port_unlock(struct tipc_port *p_ptr)
-{
- spin_unlock_bh(p_ptr->lock);
-}
-
-static inline u32 tipc_port_peernode(struct tipc_port *p_ptr)
-{
- return msg_destnode(&p_ptr->phdr);
-}
-
-static inline u32 tipc_port_peerport(struct tipc_port *p_ptr)
-{
- return msg_destport(&p_ptr->phdr);
-}
-
-static inline bool tipc_port_unreliable(struct tipc_port *port)
-{
- return msg_src_droppable(&port->phdr) != 0;
-}
-
-static inline void tipc_port_set_unreliable(struct tipc_port *port,
- bool unreliable)
-{
- msg_set_src_droppable(&port->phdr, unreliable ? 1 : 0);
-}
-
-static inline bool tipc_port_unreturnable(struct tipc_port *port)
-{
- return msg_dest_droppable(&port->phdr) != 0;
-}
-
-static inline void tipc_port_set_unreturnable(struct tipc_port *port,
- bool unreturnable)
-{
- msg_set_dest_droppable(&port->phdr, unreturnable ? 1 : 0);
-}
-
-
-static inline int tipc_port_importance(struct tipc_port *port)
-{
- return msg_importance(&port->phdr);
-}
-
-static inline int tipc_port_set_importance(struct tipc_port *port, int imp)
-{
- if (imp > TIPC_CRITICAL_IMPORTANCE)
- return -EINVAL;
- msg_set_importance(&port->phdr, (u32)imp);
- return 0;
-}
-
-#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
deleted file mode 100644
index 3d4ecd754eee..000000000000
--- a/net/tipc/ref.c
+++ /dev/null
@@ -1,266 +0,0 @@
-/*
- * net/tipc/ref.c: TIPC object registry code
- *
- * Copyright (c) 1991-2006, Ericsson AB
- * Copyright (c) 2004-2007, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "core.h"
-#include "ref.h"
-
-/**
- * struct reference - TIPC object reference entry
- * @object: pointer to object associated with reference entry
- * @lock: spinlock controlling access to object
- * @ref: reference value for object (combines instance & array index info)
- */
-struct reference {
- void *object;
- spinlock_t lock;
- u32 ref;
-};
-
-/**
- * struct tipc_ref_table - table of TIPC object reference entries
- * @entries: pointer to array of reference entries
- * @capacity: array index of first unusable entry
- * @init_point: array index of first uninitialized entry
- * @first_free: array index of first unused object reference entry
- * @last_free: array index of last unused object reference entry
- * @index_mask: bitmask for array index portion of reference values
- * @start_mask: initial value for instance value portion of reference values
- */
-struct ref_table {
- struct reference *entries;
- u32 capacity;
- u32 init_point;
- u32 first_free;
- u32 last_free;
- u32 index_mask;
- u32 start_mask;
-};
-
-/*
- * Object reference table consists of 2**N entries.
- *
- * State Object ptr Reference
- * ----- ---------- ---------
- * In use non-NULL XXXX|own index
- * (XXXX changes each time entry is acquired)
- * Free NULL YYYY|next free index
- * (YYYY is one more than last used XXXX)
- * Uninitialized NULL 0
- *
- * Entry 0 is not used; this allows index 0 to denote the end of the free list.
- *
- * Note that a reference value of 0 does not necessarily indicate that an
- * entry is uninitialized, since the last entry in the free list could also
- * have a reference value of 0 (although this is unlikely).
- */
-
-static struct ref_table tipc_ref_table;
-
-static DEFINE_SPINLOCK(ref_table_lock);
-
-/**
- * tipc_ref_table_init - create reference table for objects
- */
-int tipc_ref_table_init(u32 requested_size, u32 start)
-{
- struct reference *table;
- u32 actual_size;
-
- /* account for unused entry, then round up size to a power of 2 */
-
- requested_size++;
- for (actual_size = 16; actual_size < requested_size; actual_size <<= 1)
- /* do nothing */ ;
-
- /* allocate table & mark all entries as uninitialized */
- table = vzalloc(actual_size * sizeof(struct reference));
- if (table == NULL)
- return -ENOMEM;
-
- tipc_ref_table.entries = table;
- tipc_ref_table.capacity = requested_size;
- tipc_ref_table.init_point = 1;
- tipc_ref_table.first_free = 0;
- tipc_ref_table.last_free = 0;
- tipc_ref_table.index_mask = actual_size - 1;
- tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
-
- return 0;
-}
-
-/**
- * tipc_ref_table_stop - destroy reference table for objects
- */
-void tipc_ref_table_stop(void)
-{
- vfree(tipc_ref_table.entries);
- tipc_ref_table.entries = NULL;
-}
-
-/**
- * tipc_ref_acquire - create reference to an object
- *
- * Register an object pointer in reference table and lock the object.
- * Returns a unique reference value that is used from then on to retrieve the
- * object pointer, or to determine that the object has been deregistered.
- *
- * Note: The object is returned in the locked state so that the caller can
- * register a partially initialized object, without running the risk that
- * the object will be accessed before initialization is complete.
- */
-u32 tipc_ref_acquire(void *object, spinlock_t **lock)
-{
- u32 index;
- u32 index_mask;
- u32 next_plus_upper;
- u32 ref;
- struct reference *entry = NULL;
-
- if (!object) {
- pr_err("Attempt to acquire ref. to non-existent obj\n");
- return 0;
- }
- if (!tipc_ref_table.entries) {
- pr_err("Ref. table not found in acquisition attempt\n");
- return 0;
- }
-
- /* take a free entry, if available; otherwise initialize a new entry */
- spin_lock_bh(&ref_table_lock);
- if (tipc_ref_table.first_free) {
- index = tipc_ref_table.first_free;
- entry = &(tipc_ref_table.entries[index]);
- index_mask = tipc_ref_table.index_mask;
- next_plus_upper = entry->ref;
- tipc_ref_table.first_free = next_plus_upper & index_mask;
- ref = (next_plus_upper & ~index_mask) + index;
- } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
- index = tipc_ref_table.init_point++;
- entry = &(tipc_ref_table.entries[index]);
- spin_lock_init(&entry->lock);
- ref = tipc_ref_table.start_mask + index;
- } else {
- ref = 0;
- }
- spin_unlock_bh(&ref_table_lock);
-
- /*
- * Grab the lock so no one else can modify this entry
- * While we assign its ref value & object pointer
- */
- if (entry) {
- spin_lock_bh(&entry->lock);
- entry->ref = ref;
- entry->object = object;
- *lock = &entry->lock;
- /*
- * keep it locked, the caller is responsible
- * for unlocking this when they're done with it
- */
- }
-
- return ref;
-}
-
-/**
- * tipc_ref_discard - invalidate references to an object
- *
- * Disallow future references to an object and free up the entry for re-use.
- * Note: The entry's spin_lock may still be busy after discard
- */
-void tipc_ref_discard(u32 ref)
-{
- struct reference *entry;
- u32 index;
- u32 index_mask;
-
- if (!tipc_ref_table.entries) {
- pr_err("Ref. table not found during discard attempt\n");
- return;
- }
-
- index_mask = tipc_ref_table.index_mask;
- index = ref & index_mask;
- entry = &(tipc_ref_table.entries[index]);
-
- spin_lock_bh(&ref_table_lock);
-
- if (!entry->object) {
- pr_err("Attempt to discard ref. to non-existent obj\n");
- goto exit;
- }
- if (entry->ref != ref) {
- pr_err("Attempt to discard non-existent reference\n");
- goto exit;
- }
-
- /*
- * mark entry as unused; increment instance part of entry's reference
- * to invalidate any subsequent references
- */
- entry->object = NULL;
- entry->ref = (ref & ~index_mask) + (index_mask + 1);
-
- /* append entry to free entry list */
- if (tipc_ref_table.first_free == 0)
- tipc_ref_table.first_free = index;
- else
- tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
- tipc_ref_table.last_free = index;
-
-exit:
- spin_unlock_bh(&ref_table_lock);
-}
-
-/**
- * tipc_ref_lock - lock referenced object and return pointer to it
- */
-void *tipc_ref_lock(u32 ref)
-{
- if (likely(tipc_ref_table.entries)) {
- struct reference *entry;
-
- entry = &tipc_ref_table.entries[ref &
- tipc_ref_table.index_mask];
- if (likely(entry->ref != 0)) {
- spin_lock_bh(&entry->lock);
- if (likely((entry->ref == ref) && (entry->object)))
- return entry->object;
- spin_unlock_bh(&entry->lock);
- }
- }
- return NULL;
-}
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
deleted file mode 100644
index d01aa1df63b8..000000000000
--- a/net/tipc/ref.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * net/tipc/ref.h: Include file for TIPC object registry code
- *
- * Copyright (c) 1991-2006, Ericsson AB
- * Copyright (c) 2005-2006, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_REF_H
-#define _TIPC_REF_H
-
-int tipc_ref_table_init(u32 requested_size, u32 start);
-void tipc_ref_table_stop(void);
-
-u32 tipc_ref_acquire(void *object, spinlock_t **lock);
-void tipc_ref_discard(u32 ref);
-
-void *tipc_ref_lock(u32 ref);
-
-#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ff8c8118d56e..75275c5cf929 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -35,17 +35,67 @@
*/
#include "core.h"
-#include "port.h"
#include "name_table.h"
#include "node.h"
#include "link.h"
#include <linux/export.h>
+#include "config.h"
+#include "socket.h"
#define SS_LISTENING -1 /* socket is listening */
#define SS_READY -2 /* socket is connectionless */
-#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
-#define TIPC_FWD_MSG 1
+#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
+#define CONN_PROBING_INTERVAL 3600000 /* [ms] => 1 h */
+#define TIPC_FWD_MSG 1
+#define TIPC_CONN_OK 0
+#define TIPC_CONN_PROBING 1
+
+/**
+ * struct tipc_sock - TIPC socket structure
+ * @sk: socket - interacts with 'port' and with user via the socket API
+ * @connected: non-zero if port is currently connected to a peer port
+ * @conn_type: TIPC type used when connection was established
+ * @conn_instance: TIPC instance used when connection was established
+ * @published: non-zero if port has one or more associated names
+ * @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @ref: unique reference to port in TIPC object registry
+ * @phdr: preformatted message header used when sending messages
+ * @port_list: adjacent ports in TIPC's global list of ports
+ * @publications: list of publications for port
+ * @pub_count: total # of publications port has made during its lifetime
+ * @probing_state:
+ * @probing_interval:
+ * @timer:
+ * @port: port - interacts with 'sk' and with the rest of the TIPC stack
+ * @peer_name: the peer of the connection, if any
+ * @conn_timeout: the time we can wait for an unresponded setup request
+ * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
+ * @link_cong: non-zero if owner must sleep because of link congestion
+ * @sent_unacked: # messages sent by socket, and not yet acked by peer
+ * @rcv_unacked: # messages read by user, but not yet acked back to peer
+ */
+struct tipc_sock {
+ struct sock sk;
+ int connected;
+ u32 conn_type;
+ u32 conn_instance;
+ int published;
+ u32 max_pkt;
+ u32 ref;
+ struct tipc_msg phdr;
+ struct list_head sock_list;
+ struct list_head publications;
+ u32 pub_count;
+ u32 probing_state;
+ u32 probing_interval;
+ struct timer_list timer;
+ uint conn_timeout;
+ atomic_t dupl_rcvcnt;
+ bool link_cong;
+ uint sent_unacked;
+ uint rcv_unacked;
+};
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
static void tipc_data_ready(struct sock *sk);
@@ -53,6 +103,16 @@ static void tipc_write_space(struct sock *sk);
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
+static void tipc_sk_timeout(unsigned long ref);
+static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
+ struct tipc_name_seq const *seq);
+static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
+ struct tipc_name_seq const *seq);
+static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk);
+static void tipc_sk_ref_discard(u32 ref);
+static struct tipc_sock *tipc_sk_get(u32 ref);
+static struct tipc_sock *tipc_sk_get_next(u32 *ref);
+static void tipc_sk_put(struct tipc_sock *tsk);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
@@ -106,24 +166,75 @@ static struct proto tipc_proto_kern;
* - port reference
*/
-#include "socket.h"
+static u32 tsk_peer_node(struct tipc_sock *tsk)
+{
+ return msg_destnode(&tsk->phdr);
+}
+
+static u32 tsk_peer_port(struct tipc_sock *tsk)
+{
+ return msg_destport(&tsk->phdr);
+}
+
+static bool tsk_unreliable(struct tipc_sock *tsk)
+{
+ return msg_src_droppable(&tsk->phdr) != 0;
+}
+
+static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
+{
+ msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
+}
+
+static bool tsk_unreturnable(struct tipc_sock *tsk)
+{
+ return msg_dest_droppable(&tsk->phdr) != 0;
+}
+
+static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
+{
+ msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
+}
+
+static int tsk_importance(struct tipc_sock *tsk)
+{
+ return msg_importance(&tsk->phdr);
+}
+
+static int tsk_set_importance(struct tipc_sock *tsk, int imp)
+{
+ if (imp > TIPC_CRITICAL_IMPORTANCE)
+ return -EINVAL;
+ msg_set_importance(&tsk->phdr, (u32)imp);
+ return 0;
+}
+
+static struct tipc_sock *tipc_sk(const struct sock *sk)
+{
+ return container_of(sk, struct tipc_sock, sk);
+}
+
+static int tsk_conn_cong(struct tipc_sock *tsk)
+{
+ return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
+}
/**
- * advance_rx_queue - discard first buffer in socket receive queue
+ * tsk_advance_rx_queue - discard first buffer in socket receive queue
*
* Caller must hold socket lock
*/
-static void advance_rx_queue(struct sock *sk)
+static void tsk_advance_rx_queue(struct sock *sk)
{
kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
}
/**
- * reject_rx_queue - reject all buffers in socket receive queue
+ * tsk_rej_rx_queue - reject all buffers in socket receive queue
*
* Caller must hold socket lock
*/
-static void reject_rx_queue(struct sock *sk)
+static void tsk_rej_rx_queue(struct sock *sk)
{
struct sk_buff *buf;
u32 dnode;
@@ -134,6 +245,38 @@ static void reject_rx_queue(struct sock *sk)
}
}
+/* tsk_peer_msg - verify if message was sent by connected port's peer
+ *
+ * Handles cases where the node's network address has changed from
+ * the default of <0.0.0> to its configured setting.
+ */
+static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
+{
+ u32 peer_port = tsk_peer_port(tsk);
+ u32 orig_node;
+ u32 peer_node;
+
+ if (unlikely(!tsk->connected))
+ return false;
+
+ if (unlikely(msg_origport(msg) != peer_port))
+ return false;
+
+ orig_node = msg_orignode(msg);
+ peer_node = tsk_peer_node(tsk);
+
+ if (likely(orig_node == peer_node))
+ return true;
+
+ if (!orig_node && (peer_node == tipc_own_addr))
+ return true;
+
+ if (!peer_node && (orig_node == tipc_own_addr))
+ return true;
+
+ return false;
+}
+
/**
* tipc_sk_create - create a TIPC socket
* @net: network namespace (must be default network)
@@ -153,7 +296,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
socket_state state;
struct sock *sk;
struct tipc_sock *tsk;
- struct tipc_port *port;
+ struct tipc_msg *msg;
u32 ref;
/* Validate arguments */
@@ -188,20 +331,24 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
return -ENOMEM;
tsk = tipc_sk(sk);
- port = &tsk->port;
-
- ref = tipc_port_init(port, TIPC_LOW_IMPORTANCE);
+ ref = tipc_sk_ref_acquire(tsk);
if (!ref) {
- pr_warn("Socket registration failed, ref. table exhausted\n");
- sk_free(sk);
+ pr_warn("Socket create failed; reference table exhausted\n");
return -ENOMEM;
}
+ tsk->max_pkt = MAX_PKT_DEFAULT;
+ tsk->ref = ref;
+ INIT_LIST_HEAD(&tsk->publications);
+ msg = &tsk->phdr;
+ tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+ NAMED_H_SIZE, 0);
+ msg_set_origport(msg, ref);
/* Finish initializing socket data structures */
sock->ops = ops;
sock->state = state;
-
sock_init_data(sock, sk);
+ k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref);
sk->sk_backlog_rcv = tipc_backlog_rcv;
sk->sk_rcvbuf = sysctl_tipc_rmem[1];
sk->sk_data_ready = tipc_data_ready;
@@ -209,12 +356,11 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
tsk->sent_unacked = 0;
atomic_set(&tsk->dupl_rcvcnt, 0);
- tipc_port_unlock(port);
if (sock->state == SS_READY) {
- tipc_port_set_unreturnable(port, true);
+ tsk_set_unreturnable(tsk, true);
if (sock->type == SOCK_DGRAM)
- tipc_port_set_unreliable(port, true);
+ tsk_set_unreliable(tsk, true);
}
return 0;
}
@@ -308,7 +454,6 @@ static int tipc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk;
- struct tipc_port *port;
struct sk_buff *buf;
u32 dnode;
@@ -320,13 +465,13 @@ static int tipc_release(struct socket *sock)
return 0;
tsk = tipc_sk(sk);
- port = &tsk->port;
lock_sock(sk);
/*
* Reject all unreceived messages, except on an active connection
* (which disconnects locally & sends a 'FIN+' to peer)
*/
+ dnode = tsk_peer_node(tsk);
while (sock->state != SS_DISCONNECTING) {
buf = __skb_dequeue(&sk->sk_receive_queue);
if (buf == NULL)
@@ -337,17 +482,27 @@ static int tipc_release(struct socket *sock)
if ((sock->state == SS_CONNECTING) ||
(sock->state == SS_CONNECTED)) {
sock->state = SS_DISCONNECTING;
- tipc_port_disconnect(port->ref);
+ tsk->connected = 0;
+ tipc_node_remove_conn(dnode, tsk->ref);
}
if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
tipc_link_xmit(buf, dnode, 0);
}
}
- /* Destroy TIPC port; also disconnects an active connection and
- * sends a 'FIN-' to peer.
- */
- tipc_port_destroy(port);
+ tipc_sk_withdraw(tsk, 0, NULL);
+ tipc_sk_ref_discard(tsk->ref);
+ k_cancel_timer(&tsk->timer);
+ if (tsk->connected) {
+ buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+ SHORT_H_SIZE, 0, dnode, tipc_own_addr,
+ tsk_peer_port(tsk),
+ tsk->ref, TIPC_ERR_NO_PORT);
+ if (buf)
+ tipc_link_xmit(buf, dnode, tsk->ref);
+ tipc_node_remove_conn(dnode, tsk->ref);
+ }
+ k_term_timer(&tsk->timer);
/* Discard any remaining (connection-based) messages in receive queue */
__skb_queue_purge(&sk->sk_receive_queue);
@@ -355,7 +510,6 @@ static int tipc_release(struct socket *sock)
/* Reject any messages that accumulated in backlog queue */
sock->state = SS_DISCONNECTING;
release_sock(sk);
-
sock_put(sk);
sock->sk = NULL;
@@ -387,7 +541,7 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
lock_sock(sk);
if (unlikely(!uaddr_len)) {
- res = tipc_withdraw(&tsk->port, 0, NULL);
+ res = tipc_sk_withdraw(tsk, 0, NULL);
goto exit;
}
@@ -415,8 +569,8 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
}
res = (addr->scope > 0) ?
- tipc_publish(&tsk->port, addr->scope, &addr->addr.nameseq) :
- tipc_withdraw(&tsk->port, -addr->scope, &addr->addr.nameseq);
+ tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
+ tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
exit:
release_sock(sk);
return res;
@@ -446,10 +600,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
if ((sock->state != SS_CONNECTED) &&
((peer != 2) || (sock->state != SS_DISCONNECTING)))
return -ENOTCONN;
- addr->addr.id.ref = tipc_port_peerport(&tsk->port);
- addr->addr.id.node = tipc_port_peernode(&tsk->port);
+ addr->addr.id.ref = tsk_peer_port(tsk);
+ addr->addr.id.node = tsk_peer_node(tsk);
} else {
- addr->addr.id.ref = tsk->port.ref;
+ addr->addr.id.ref = tsk->ref;
addr->addr.id.node = tipc_own_addr;
}
@@ -518,7 +672,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
break;
case SS_READY:
case SS_CONNECTED:
- if (!tsk->link_cong && !tipc_sk_conn_cong(tsk))
+ if (!tsk->link_cong && !tsk_conn_cong(tsk))
mask |= POLLOUT;
/* fall thru' */
case SS_CONNECTING:
@@ -549,7 +703,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
struct iovec *iov, size_t dsz, long timeo)
{
struct sock *sk = sock->sk;
- struct tipc_msg *mhdr = &tipc_sk(sk)->port.phdr;
+ struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
struct sk_buff *buf;
uint mtu;
int rc;
@@ -579,6 +733,7 @@ new_mtu:
goto new_mtu;
if (rc != -ELINKCONG)
break;
+ tipc_sk(sk)->link_cong = 1;
rc = tipc_wait_for_sndmsg(sock, &timeo);
if (rc)
kfree_skb_list(buf);
@@ -638,20 +793,19 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
- struct tipc_port *port = &tsk->port;
int conn_cong;
/* Ignore if connection cannot be validated: */
- if (!port->connected || !tipc_port_peer_msg(port, msg))
+ if (!tsk_peer_msg(tsk, msg))
goto exit;
- port->probing_state = TIPC_CONN_OK;
+ tsk->probing_state = TIPC_CONN_OK;
if (msg_type(msg) == CONN_ACK) {
- conn_cong = tipc_sk_conn_cong(tsk);
+ conn_cong = tsk_conn_cong(tsk);
tsk->sent_unacked -= msg_msgcnt(msg);
if (conn_cong)
- tipc_sock_wakeup(tsk);
+ tsk->sk.sk_write_space(&tsk->sk);
} else if (msg_type(msg) == CONN_PROBE) {
if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
return TIPC_OK;
@@ -742,8 +896,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_port *port = &tsk->port;
- struct tipc_msg *mhdr = &port->phdr;
+ struct tipc_msg *mhdr = &tsk->phdr;
struct iovec *iov = m->msg_iov;
u32 dnode, dport;
struct sk_buff *buf;
@@ -774,13 +927,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
rc = -EISCONN;
goto exit;
}
- if (tsk->port.published) {
+ if (tsk->published) {
rc = -EOPNOTSUPP;
goto exit;
}
if (dest->addrtype == TIPC_ADDR_NAME) {
- tsk->port.conn_type = dest->addr.name.name.type;
- tsk->port.conn_instance = dest->addr.name.name.instance;
+ tsk->conn_type = dest->addr.name.name.type;
+ tsk->conn_instance = dest->addr.name.name.instance;
}
}
rc = dest_name_check(dest, m);
@@ -820,13 +973,14 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
}
new_mtu:
- mtu = tipc_node_get_mtu(dnode, tsk->port.ref);
+ mtu = tipc_node_get_mtu(dnode, tsk->ref);
rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
if (rc < 0)
goto exit;
do {
- rc = tipc_link_xmit(buf, dnode, tsk->port.ref);
+ TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong;
+ rc = tipc_link_xmit(buf, dnode, tsk->ref);
if (likely(rc >= 0)) {
if (sock->state != SS_READY)
sock->state = SS_CONNECTING;
@@ -835,10 +989,9 @@ new_mtu:
}
if (rc == -EMSGSIZE)
goto new_mtu;
-
if (rc != -ELINKCONG)
break;
-
+ tsk->link_cong = 1;
rc = tipc_wait_for_sndmsg(sock, &timeo);
if (rc)
kfree_skb_list(buf);
@@ -873,8 +1026,8 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
done = sk_wait_event(sk, timeo_p,
(!tsk->link_cong &&
- !tipc_sk_conn_cong(tsk)) ||
- !tsk->port.connected);
+ !tsk_conn_cong(tsk)) ||
+ !tsk->connected);
finish_wait(sk_sleep(sk), &wait);
} while (!done);
return 0;
@@ -897,11 +1050,10 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_port *port = &tsk->port;
- struct tipc_msg *mhdr = &port->phdr;
+ struct tipc_msg *mhdr = &tsk->phdr;
struct sk_buff *buf;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
- u32 ref = port->ref;
+ u32 ref = tsk->ref;
int rc = -EINVAL;
long timeo;
u32 dnode;
@@ -929,16 +1081,16 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
}
timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
- dnode = tipc_port_peernode(port);
+ dnode = tsk_peer_node(tsk);
next:
- mtu = port->max_pkt;
+ mtu = tsk->max_pkt;
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf);
if (unlikely(rc < 0))
goto exit;
do {
- if (likely(!tipc_sk_conn_cong(tsk))) {
+ if (likely(!tsk_conn_cong(tsk))) {
rc = tipc_link_xmit(buf, dnode, ref);
if (likely(!rc)) {
tsk->sent_unacked++;
@@ -948,11 +1100,12 @@ next:
goto next;
}
if (rc == -EMSGSIZE) {
- port->max_pkt = tipc_node_get_mtu(dnode, ref);
+ tsk->max_pkt = tipc_node_get_mtu(dnode, ref);
goto next;
}
if (rc != -ELINKCONG)
break;
+ tsk->link_cong = 1;
}
rc = tipc_wait_for_sndpkt(sock, &timeo);
if (rc)
@@ -984,29 +1137,25 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
return tipc_send_stream(iocb, sock, m, dsz);
}
-/**
- * auto_connect - complete connection setup to a remote port
- * @tsk: tipc socket structure
- * @msg: peer's response message
- *
- * Returns 0 on success, errno otherwise
+/* tipc_sk_finish_conn - complete the setup of a connection
*/
-static int auto_connect(struct tipc_sock *tsk, struct tipc_msg *msg)
+static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
+ u32 peer_node)
{
- struct tipc_port *port = &tsk->port;
- struct socket *sock = tsk->sk.sk_socket;
- struct tipc_portid peer;
-
- peer.ref = msg_origport(msg);
- peer.node = msg_orignode(msg);
-
- __tipc_port_connect(port->ref, port, &peer);
-
- if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
- return -EINVAL;
- msg_set_importance(&port->phdr, (u32)msg_importance(msg));
- sock->state = SS_CONNECTED;
- return 0;
+ struct tipc_msg *msg = &tsk->phdr;
+
+ msg_set_destnode(msg, peer_node);
+ msg_set_destport(msg, peer_port);
+ msg_set_type(msg, TIPC_CONN_MSG);
+ msg_set_lookup_scope(msg, 0);
+ msg_set_hdr_sz(msg, SHORT_H_SIZE);
+
+ tsk->probing_interval = CONN_PROBING_INTERVAL;
+ tsk->probing_state = TIPC_CONN_OK;
+ tsk->connected = 1;
+ k_start_timer(&tsk->timer, tsk->probing_interval);
+ tipc_node_add_conn(peer_node, tsk->ref, peer_port);
+ tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref);
}
/**
@@ -1033,17 +1182,17 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
}
/**
- * anc_data_recv - optionally capture ancillary data for received message
+ * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
* @m: descriptor for message info
* @msg: received message header
- * @tport: TIPC port associated with message
+ * @tsk: TIPC port associated with message
*
* Note: Ancillary data is not captured if not requested by receiver.
*
* Returns 0 if successful, otherwise errno
*/
-static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
- struct tipc_port *tport)
+static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
+ struct tipc_sock *tsk)
{
u32 anc_data[3];
u32 err;
@@ -1086,10 +1235,10 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
anc_data[2] = msg_nameupper(msg);
break;
case TIPC_CONN_MSG:
- has_name = (tport->conn_type != 0);
- anc_data[0] = tport->conn_type;
- anc_data[1] = tport->conn_instance;
- anc_data[2] = tport->conn_instance;
+ has_name = (tsk->conn_type != 0);
+ anc_data[0] = tsk->conn_type;
+ anc_data[1] = tsk->conn_instance;
+ anc_data[2] = tsk->conn_instance;
break;
default:
has_name = 0;
@@ -1103,6 +1252,24 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
return 0;
}
+static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
+{
+ struct sk_buff *buf = NULL;
+ struct tipc_msg *msg;
+ u32 peer_port = tsk_peer_port(tsk);
+ u32 dnode = tsk_peer_node(tsk);
+
+ if (!tsk->connected)
+ return;
+ buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
+ tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
+ if (!buf)
+ return;
+ msg = buf_msg(buf);
+ msg_set_msgcnt(msg, ack);
+ tipc_link_xmit(buf, dnode, msg_link_selector(msg));
+}
+
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
{
struct sock *sk = sock->sk;
@@ -1153,7 +1320,6 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_port *port = &tsk->port;
struct sk_buff *buf;
struct tipc_msg *msg;
long timeo;
@@ -1188,7 +1354,7 @@ restart:
/* Discard an empty non-errored message & try again */
if ((!sz) && (!err)) {
- advance_rx_queue(sk);
+ tsk_advance_rx_queue(sk);
goto restart;
}
@@ -1196,7 +1362,7 @@ restart:
set_orig_addr(m, msg);
/* Capture ancillary data (optional) */
- res = anc_data_recv(m, msg, port);
+ res = tipc_sk_anc_data_recv(m, msg, tsk);
if (res)
goto exit;
@@ -1223,10 +1389,10 @@ restart:
if (likely(!(flags & MSG_PEEK))) {
if ((sock->state != SS_READY) &&
(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
- tipc_acknowledge(port->ref, tsk->rcv_unacked);
+ tipc_sk_send_ack(tsk, tsk->rcv_unacked);
tsk->rcv_unacked = 0;
}
- advance_rx_queue(sk);
+ tsk_advance_rx_queue(sk);
}
exit:
release_sock(sk);
@@ -1250,7 +1416,6 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_port *port = &tsk->port;
struct sk_buff *buf;
struct tipc_msg *msg;
long timeo;
@@ -1288,14 +1453,14 @@ restart:
/* Discard an empty non-errored message & try again */
if ((!sz) && (!err)) {
- advance_rx_queue(sk);
+ tsk_advance_rx_queue(sk);
goto restart;
}
/* Optionally capture sender's address & ancillary data of first msg */
if (sz_copied == 0) {
set_orig_addr(m, msg);
- res = anc_data_recv(m, msg, port);
+ res = tipc_sk_anc_data_recv(m, msg, tsk);
if (res)
goto exit;
}
@@ -1334,10 +1499,10 @@ restart:
/* Consume received message (optional) */
if (likely(!(flags & MSG_PEEK))) {
if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
- tipc_acknowledge(port->ref, tsk->rcv_unacked);
+ tipc_sk_send_ack(tsk, tsk->rcv_unacked);
tsk->rcv_unacked = 0;
}
- advance_rx_queue(sk);
+ tsk_advance_rx_queue(sk);
}
/* Loop around if more data is required */
@@ -1396,12 +1561,9 @@ static void tipc_data_ready(struct sock *sk)
static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
{
struct sock *sk = &tsk->sk;
- struct tipc_port *port = &tsk->port;
struct socket *sock = sk->sk_socket;
struct tipc_msg *msg = buf_msg(*buf);
-
int retval = -TIPC_ERR_NO_PORT;
- int res;
if (msg_mcast(msg))
return retval;
@@ -1409,16 +1571,23 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
switch ((int)sock->state) {
case SS_CONNECTED:
/* Accept only connection-based messages sent by peer */
- if (msg_connected(msg) && tipc_port_peer_msg(port, msg)) {
+ if (tsk_peer_msg(tsk, msg)) {
if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING;
- __tipc_port_disconnect(port);
+ tsk->connected = 0;
+ /* let timer expire on it's own */
+ tipc_node_remove_conn(tsk_peer_node(tsk),
+ tsk->ref);
}
retval = TIPC_OK;
}
break;
case SS_CONNECTING:
/* Accept only ACK or NACK message */
+
+ if (unlikely(!msg_connected(msg)))
+ break;
+
if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING;
sk->sk_err = ECONNREFUSED;
@@ -1426,17 +1595,17 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
break;
}
- if (unlikely(!msg_connected(msg)))
- break;
-
- res = auto_connect(tsk, msg);
- if (res) {
+ if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
sock->state = SS_DISCONNECTING;
- sk->sk_err = -res;
+ sk->sk_err = EINVAL;
retval = TIPC_OK;
break;
}
+ tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
+ msg_set_importance(&tsk->phdr, msg_importance(msg));
+ sock->state = SS_CONNECTED;
+
/* If an incoming message is an 'ACK-', it should be
* discarded here because it doesn't contain useful
* data. In addition, we should try to wake up
@@ -1518,6 +1687,13 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
if (unlikely(msg_user(msg) == CONN_MANAGER))
return tipc_sk_proto_rcv(tsk, &onode, buf);
+ if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
+ kfree_skb(buf);
+ tsk->link_cong = 0;
+ sk->sk_write_space(sk);
+ return TIPC_OK;
+ }
+
/* Reject message if it is wrong sort of message for socket */
if (msg_type(msg) > TIPC_DIRECT_MSG)
return -TIPC_ERR_NO_PORT;
@@ -1585,7 +1761,6 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
int tipc_sk_rcv(struct sk_buff *buf)
{
struct tipc_sock *tsk;
- struct tipc_port *port;
struct sock *sk;
u32 dport = msg_destport(buf_msg(buf));
int rc = TIPC_OK;
@@ -1593,13 +1768,11 @@ int tipc_sk_rcv(struct sk_buff *buf)
u32 dnode;
/* Validate destination and message */
- port = tipc_port_lock(dport);
- if (unlikely(!port)) {
+ tsk = tipc_sk_get(dport);
+ if (unlikely(!tsk)) {
rc = tipc_msg_eval(buf, &dnode);
goto exit;
}
-
- tsk = tipc_port_to_sock(port);
sk = &tsk->sk;
/* Queue message */
@@ -1615,8 +1788,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
rc = -TIPC_ERR_OVERLOAD;
}
bh_unlock_sock(sk);
- tipc_port_unlock(port);
-
+ tipc_sk_put(tsk);
if (likely(!rc))
return 0;
exit:
@@ -1803,10 +1975,8 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
{
struct sock *new_sk, *sk = sock->sk;
struct sk_buff *buf;
- struct tipc_port *new_port;
+ struct tipc_sock *new_tsock;
struct tipc_msg *msg;
- struct tipc_portid peer;
- u32 new_ref;
long timeo;
int res;
@@ -1828,8 +1998,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
goto exit;
new_sk = new_sock->sk;
- new_port = &tipc_sk(new_sk)->port;
- new_ref = new_port->ref;
+ new_tsock = tipc_sk(new_sk);
msg = buf_msg(buf);
/* we lock on new_sk; but lockdep sees the lock on sk */
@@ -1839,18 +2008,16 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
* Reject any stray messages received by new socket
* before the socket lock was taken (very, very unlikely)
*/
- reject_rx_queue(new_sk);
+ tsk_rej_rx_queue(new_sk);
/* Connect new socket to it's peer */
- peer.ref = msg_origport(msg);
- peer.node = msg_orignode(msg);
- tipc_port_connect(new_ref, &peer);
+ tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
new_sock->state = SS_CONNECTED;
- tipc_port_set_importance(new_port, msg_importance(msg));
+ tsk_set_importance(new_tsock, msg_importance(msg));
if (msg_named(msg)) {
- new_port->conn_type = msg_nametype(msg);
- new_port->conn_instance = msg_nameinst(msg);
+ new_tsock->conn_type = msg_nametype(msg);
+ new_tsock->conn_instance = msg_nameinst(msg);
}
/*
@@ -1860,7 +2027,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
if (!msg_data_sz(msg)) {
struct msghdr m = {NULL,};
- advance_rx_queue(sk);
+ tsk_advance_rx_queue(sk);
tipc_send_packet(NULL, new_sock, &m, 0);
} else {
__skb_dequeue(&sk->sk_receive_queue);
@@ -1886,9 +2053,8 @@ static int tipc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_port *port = &tsk->port;
struct sk_buff *buf;
- u32 peer;
+ u32 dnode;
int res;
if (how != SHUT_RDWR)
@@ -1908,15 +2074,21 @@ restart:
kfree_skb(buf);
goto restart;
}
- tipc_port_disconnect(port->ref);
- if (tipc_msg_reverse(buf, &peer, TIPC_CONN_SHUTDOWN))
- tipc_link_xmit(buf, peer, 0);
+ if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN))
+ tipc_link_xmit(buf, dnode, tsk->ref);
+ tipc_node_remove_conn(dnode, tsk->ref);
} else {
- tipc_port_shutdown(port->ref);
+ dnode = tsk_peer_node(tsk);
+ buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
+ TIPC_CONN_MSG, SHORT_H_SIZE,
+ 0, dnode, tipc_own_addr,
+ tsk_peer_port(tsk),
+ tsk->ref, TIPC_CONN_SHUTDOWN);
+ tipc_link_xmit(buf, dnode, tsk->ref);
}
-
+ tsk->connected = 0;
sock->state = SS_DISCONNECTING;
-
+ tipc_node_remove_conn(dnode, tsk->ref);
/* fall through */
case SS_DISCONNECTING:
@@ -1937,6 +2109,432 @@ restart:
return res;
}
+static void tipc_sk_timeout(unsigned long ref)
+{
+ struct tipc_sock *tsk;
+ struct sock *sk;
+ struct sk_buff *buf = NULL;
+ u32 peer_port, peer_node;
+
+ tsk = tipc_sk_get(ref);
+ if (!tsk)
+ return;
+
+ sk = &tsk->sk;
+ bh_lock_sock(sk);
+ if (!tsk->connected) {
+ bh_unlock_sock(sk);
+ goto exit;
+ }
+ peer_port = tsk_peer_port(tsk);
+ peer_node = tsk_peer_node(tsk);
+
+ if (tsk->probing_state == TIPC_CONN_PROBING) {
+ /* Previous probe not answered -> self abort */
+ buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
+ SHORT_H_SIZE, 0, tipc_own_addr,
+ peer_node, ref, peer_port,
+ TIPC_ERR_NO_PORT);
+ } else {
+ buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
+ 0, peer_node, tipc_own_addr,
+ peer_port, ref, TIPC_OK);
+ tsk->probing_state = TIPC_CONN_PROBING;
+ k_start_timer(&tsk->timer, tsk->probing_interval);
+ }
+ bh_unlock_sock(sk);
+ if (buf)
+ tipc_link_xmit(buf, peer_node, ref);
+exit:
+ tipc_sk_put(tsk);
+}
+
+static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
+ struct tipc_name_seq const *seq)
+{
+ struct publication *publ;
+ u32 key;
+
+ if (tsk->connected)
+ return -EINVAL;
+ key = tsk->ref + tsk->pub_count + 1;
+ if (key == tsk->ref)
+ return -EADDRINUSE;
+
+ publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
+ scope, tsk->ref, key);
+ if (unlikely(!publ))
+ return -EINVAL;
+
+ list_add(&publ->pport_list, &tsk->publications);
+ tsk->pub_count++;
+ tsk->published = 1;
+ return 0;
+}
+
+static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
+ struct tipc_name_seq const *seq)
+{
+ struct publication *publ;
+ struct publication *safe;
+ int rc = -EINVAL;
+
+ list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
+ if (seq) {
+ if (publ->scope != scope)
+ continue;
+ if (publ->type != seq->type)
+ continue;
+ if (publ->lower != seq->lower)
+ continue;
+ if (publ->upper != seq->upper)
+ break;
+ tipc_nametbl_withdraw(publ->type, publ->lower,
+ publ->ref, publ->key);
+ rc = 0;
+ break;
+ }
+ tipc_nametbl_withdraw(publ->type, publ->lower,
+ publ->ref, publ->key);
+ rc = 0;
+ }
+ if (list_empty(&tsk->publications))
+ tsk->published = 0;
+ return rc;
+}
+
+static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
+ int len, int full_id)
+{
+ struct publication *publ;
+ int ret;
+
+ if (full_id)
+ ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
+ tipc_zone(tipc_own_addr),
+ tipc_cluster(tipc_own_addr),
+ tipc_node(tipc_own_addr), tsk->ref);
+ else
+ ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref);
+
+ if (tsk->connected) {
+ u32 dport = tsk_peer_port(tsk);
+ u32 destnode = tsk_peer_node(tsk);
+
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " connected to <%u.%u.%u:%u>",
+ tipc_zone(destnode),
+ tipc_cluster(destnode),
+ tipc_node(destnode), dport);
+ if (tsk->conn_type != 0)
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " via {%u,%u}", tsk->conn_type,
+ tsk->conn_instance);
+ } else if (tsk->published) {
+ ret += tipc_snprintf(buf + ret, len - ret, " bound to");
+ list_for_each_entry(publ, &tsk->publications, pport_list) {
+ if (publ->lower == publ->upper)
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " {%u,%u}", publ->type,
+ publ->lower);
+ else
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " {%u,%u,%u}", publ->type,
+ publ->lower, publ->upper);
+ }
+ }
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
+}
+
+struct sk_buff *tipc_sk_socks_show(void)
+{
+ struct sk_buff *buf;
+ struct tlv_desc *rep_tlv;
+ char *pb;
+ int pb_len;
+ struct tipc_sock *tsk;
+ int str_len = 0;
+ u32 ref = 0;
+
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
+ if (!buf)
+ return NULL;
+ rep_tlv = (struct tlv_desc *)buf->data;
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
+
+ tsk = tipc_sk_get_next(&ref);
+ for (; tsk; tsk = tipc_sk_get_next(&ref)) {
+ lock_sock(&tsk->sk);
+ str_len += tipc_sk_show(tsk, pb + str_len,
+ pb_len - str_len, 0);
+ release_sock(&tsk->sk);
+ tipc_sk_put(tsk);
+ }
+ str_len += 1; /* for "\0" */
+ skb_put(buf, TLV_SPACE(str_len));
+ TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+ return buf;
+}
+
+/* tipc_sk_reinit: set non-zero address in all existing sockets
+ * when we go from standalone to network mode.
+ */
+void tipc_sk_reinit(void)
+{
+ struct tipc_msg *msg;
+ u32 ref = 0;
+ struct tipc_sock *tsk = tipc_sk_get_next(&ref);
+
+ for (; tsk; tsk = tipc_sk_get_next(&ref)) {
+ lock_sock(&tsk->sk);
+ msg = &tsk->phdr;
+ msg_set_prevnode(msg, tipc_own_addr);
+ msg_set_orignode(msg, tipc_own_addr);
+ release_sock(&tsk->sk);
+ tipc_sk_put(tsk);
+ }
+}
+
+/**
+ * struct reference - TIPC socket reference entry
+ * @tsk: pointer to socket associated with reference entry
+ * @ref: reference value for socket (combines instance & array index info)
+ */
+struct reference {
+ struct tipc_sock *tsk;
+ u32 ref;
+};
+
+/**
+ * struct tipc_ref_table - table of TIPC socket reference entries
+ * @entries: pointer to array of reference entries
+ * @capacity: array index of first unusable entry
+ * @init_point: array index of first uninitialized entry
+ * @first_free: array index of first unused socket reference entry
+ * @last_free: array index of last unused socket reference entry
+ * @index_mask: bitmask for array index portion of reference values
+ * @start_mask: initial value for instance value portion of reference values
+ */
+struct ref_table {
+ struct reference *entries;
+ u32 capacity;
+ u32 init_point;
+ u32 first_free;
+ u32 last_free;
+ u32 index_mask;
+ u32 start_mask;
+};
+
+/* Socket reference table consists of 2**N entries.
+ *
+ * State Socket ptr Reference
+ * ----- ---------- ---------
+ * In use non-NULL XXXX|own index
+ * (XXXX changes each time entry is acquired)
+ * Free NULL YYYY|next free index
+ * (YYYY is one more than last used XXXX)
+ * Uninitialized NULL 0
+ *
+ * Entry 0 is not used; this allows index 0 to denote the end of the free list.
+ *
+ * Note that a reference value of 0 does not necessarily indicate that an
+ * entry is uninitialized, since the last entry in the free list could also
+ * have a reference value of 0 (although this is unlikely).
+ */
+
+static struct ref_table tipc_ref_table;
+
+static DEFINE_RWLOCK(ref_table_lock);
+
+/**
+ * tipc_ref_table_init - create reference table for sockets
+ */
+int tipc_sk_ref_table_init(u32 req_sz, u32 start)
+{
+ struct reference *table;
+ u32 actual_sz;
+
+ /* account for unused entry, then round up size to a power of 2 */
+
+ req_sz++;
+ for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) {
+ /* do nothing */
+ };
+
+ /* allocate table & mark all entries as uninitialized */
+ table = vzalloc(actual_sz * sizeof(struct reference));
+ if (table == NULL)
+ return -ENOMEM;
+
+ tipc_ref_table.entries = table;
+ tipc_ref_table.capacity = req_sz;
+ tipc_ref_table.init_point = 1;
+ tipc_ref_table.first_free = 0;
+ tipc_ref_table.last_free = 0;
+ tipc_ref_table.index_mask = actual_sz - 1;
+ tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
+
+ return 0;
+}
+
+/**
+ * tipc_ref_table_stop - destroy reference table for sockets
+ */
+void tipc_sk_ref_table_stop(void)
+{
+ if (!tipc_ref_table.entries)
+ return;
+ vfree(tipc_ref_table.entries);
+ tipc_ref_table.entries = NULL;
+}
+
+/* tipc_ref_acquire - create reference to a socket
+ *
+ * Register an socket pointer in the reference table.
+ * Returns a unique reference value that is used from then on to retrieve the
+ * socket pointer, or to determine if the socket has been deregistered.
+ */
+u32 tipc_sk_ref_acquire(struct tipc_sock *tsk)
+{
+ u32 index;
+ u32 index_mask;
+ u32 next_plus_upper;
+ u32 ref = 0;
+ struct reference *entry;
+
+ if (unlikely(!tsk)) {
+ pr_err("Attempt to acquire ref. to non-existent obj\n");
+ return 0;
+ }
+ if (unlikely(!tipc_ref_table.entries)) {
+ pr_err("Ref. table not found in acquisition attempt\n");
+ return 0;
+ }
+
+ /* Take a free entry, if available; otherwise initialize a new one */
+ write_lock_bh(&ref_table_lock);
+ index = tipc_ref_table.first_free;
+ entry = &tipc_ref_table.entries[index];
+
+ if (likely(index)) {
+ index = tipc_ref_table.first_free;
+ entry = &tipc_ref_table.entries[index];
+ index_mask = tipc_ref_table.index_mask;
+ next_plus_upper = entry->ref;
+ tipc_ref_table.first_free = next_plus_upper & index_mask;
+ ref = (next_plus_upper & ~index_mask) + index;
+ entry->tsk = tsk;
+ } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
+ index = tipc_ref_table.init_point++;
+ entry = &tipc_ref_table.entries[index];
+ ref = tipc_ref_table.start_mask + index;
+ }
+
+ if (ref) {
+ entry->ref = ref;
+ entry->tsk = tsk;
+ }
+ write_unlock_bh(&ref_table_lock);
+ return ref;
+}
+
+/* tipc_sk_ref_discard - invalidate reference to an socket
+ *
+ * Disallow future references to an socket and free up the entry for re-use.
+ */
+void tipc_sk_ref_discard(u32 ref)
+{
+ struct reference *entry;
+ u32 index;
+ u32 index_mask;
+
+ if (unlikely(!tipc_ref_table.entries)) {
+ pr_err("Ref. table not found during discard attempt\n");
+ return;
+ }
+
+ index_mask = tipc_ref_table.index_mask;
+ index = ref & index_mask;
+ entry = &tipc_ref_table.entries[index];
+
+ write_lock_bh(&ref_table_lock);
+
+ if (unlikely(!entry->tsk)) {
+ pr_err("Attempt to discard ref. to non-existent socket\n");
+ goto exit;
+ }
+ if (unlikely(entry->ref != ref)) {
+ pr_err("Attempt to discard non-existent reference\n");
+ goto exit;
+ }
+
+ /* Mark entry as unused; increment instance part of entry's
+ * reference to invalidate any subsequent references
+ */
+
+ entry->tsk = NULL;
+ entry->ref = (ref & ~index_mask) + (index_mask + 1);
+
+ /* Append entry to free entry list */
+ if (unlikely(tipc_ref_table.first_free == 0))
+ tipc_ref_table.first_free = index;
+ else
+ tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
+ tipc_ref_table.last_free = index;
+exit:
+ write_unlock_bh(&ref_table_lock);
+}
+
+/* tipc_sk_get - find referenced socket and return pointer to it
+ */
+struct tipc_sock *tipc_sk_get(u32 ref)
+{
+ struct reference *entry;
+ struct tipc_sock *tsk;
+
+ if (unlikely(!tipc_ref_table.entries))
+ return NULL;
+ read_lock_bh(&ref_table_lock);
+ entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
+ tsk = entry->tsk;
+ if (likely(tsk && (entry->ref == ref)))
+ sock_hold(&tsk->sk);
+ else
+ tsk = NULL;
+ read_unlock_bh(&ref_table_lock);
+ return tsk;
+}
+
+/* tipc_sk_get_next - lock & return next socket after referenced one
+*/
+struct tipc_sock *tipc_sk_get_next(u32 *ref)
+{
+ struct reference *entry;
+ struct tipc_sock *tsk = NULL;
+ uint index = *ref & tipc_ref_table.index_mask;
+
+ read_lock_bh(&ref_table_lock);
+ while (++index < tipc_ref_table.capacity) {
+ entry = &tipc_ref_table.entries[index];
+ if (!entry->tsk)
+ continue;
+ tsk = entry->tsk;
+ sock_hold(&tsk->sk);
+ *ref = entry->ref;
+ break;
+ }
+ read_unlock_bh(&ref_table_lock);
+ return tsk;
+}
+
+static void tipc_sk_put(struct tipc_sock *tsk)
+{
+ sock_put(&tsk->sk);
+}
+
/**
* tipc_setsockopt - set socket option
* @sock: socket structure
@@ -1955,7 +2553,6 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_port *port = &tsk->port;
u32 value;
int res;
@@ -1973,16 +2570,16 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
switch (opt) {
case TIPC_IMPORTANCE:
- res = tipc_port_set_importance(port, value);
+ res = tsk_set_importance(tsk, value);
break;
case TIPC_SRC_DROPPABLE:
if (sock->type != SOCK_STREAM)
- tipc_port_set_unreliable(port, value);
+ tsk_set_unreliable(tsk, value);
else
res = -ENOPROTOOPT;
break;
case TIPC_DEST_DROPPABLE:
- tipc_port_set_unreturnable(port, value);
+ tsk_set_unreturnable(tsk, value);
break;
case TIPC_CONN_TIMEOUT:
tipc_sk(sk)->conn_timeout = value;
@@ -2015,7 +2612,6 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_port *port = &tsk->port;
int len;
u32 value;
int res;
@@ -2032,16 +2628,16 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
switch (opt) {
case TIPC_IMPORTANCE:
- value = tipc_port_importance(port);
+ value = tsk_importance(tsk);
break;
case TIPC_SRC_DROPPABLE:
- value = tipc_port_unreliable(port);
+ value = tsk_unreliable(tsk);
break;
case TIPC_DEST_DROPPABLE:
- value = tipc_port_unreturnable(port);
+ value = tsk_unreturnable(tsk);
break;
case TIPC_CONN_TIMEOUT:
- value = tipc_sk(sk)->conn_timeout;
+ value = tsk->conn_timeout;
/* no need to set "res", since already 0 at this point */
break;
case TIPC_NODE_RECVQ_DEPTH:
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 43b75b3ceced..baa43d03901e 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -35,56 +35,17 @@
#ifndef _TIPC_SOCK_H
#define _TIPC_SOCK_H
-#include "port.h"
#include <net/sock.h>
-#define TIPC_CONN_OK 0
-#define TIPC_CONN_PROBING 1
-
-/**
- * struct tipc_sock - TIPC socket structure
- * @sk: socket - interacts with 'port' and with user via the socket API
- * @port: port - interacts with 'sk' and with the rest of the TIPC stack
- * @peer_name: the peer of the connection, if any
- * @conn_timeout: the time we can wait for an unresponded setup request
- * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
- * @link_cong: non-zero if owner must sleep because of link congestion
- * @sent_unacked: # messages sent by socket, and not yet acked by peer
- * @rcv_unacked: # messages read by user, but not yet acked back to peer
- */
-
-struct tipc_sock {
- struct sock sk;
- struct tipc_port port;
- unsigned int conn_timeout;
- atomic_t dupl_rcvcnt;
- int link_cong;
- uint sent_unacked;
- uint rcv_unacked;
-};
-
-static inline struct tipc_sock *tipc_sk(const struct sock *sk)
-{
- return container_of(sk, struct tipc_sock, sk);
-}
-
-static inline struct tipc_sock *tipc_port_to_sock(const struct tipc_port *port)
-{
- return container_of(port, struct tipc_sock, port);
-}
-
-static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
-{
- tsk->sk.sk_write_space(&tsk->sk);
-}
-
-static inline int tipc_sk_conn_cong(struct tipc_sock *tsk)
-{
- return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
-}
-
+#define TIPC_CONNACK_INTV 256
+#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
+#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
+ SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
int tipc_sk_rcv(struct sk_buff *buf);
-
+struct sk_buff *tipc_sk_socks_show(void);
void tipc_sk_mcast_rcv(struct sk_buff *buf);
+void tipc_sk_reinit(void);
+int tipc_sk_ref_table_init(u32 requested_size, u32 start);
+void tipc_sk_ref_table_stop(void);
#endif
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 642437231ad5..31b5cb232a43 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -36,7 +36,6 @@
#include "core.h"
#include "name_table.h"
-#include "port.h"
#include "subscr.h"
/**
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index f3fef93325a8..1a779b1e8510 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -47,6 +47,13 @@ static struct ctl_table tipc_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "named_timeout",
+ .data = &sysctl_tipc_named_timeout,
+ .maxlen = sizeof(sysctl_tipc_named_timeout),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
{}
};
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 9bc73f87f64a..99f7012b23b9 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -258,7 +258,7 @@ static void inc_inflight_move_tail(struct unix_sock *u)
list_move_tail(&u->link, &gc_candidates);
}
-static bool gc_in_progress = false;
+static bool gc_in_progress;
#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void)
diff --git a/net/wimax/id-table.c b/net/wimax/id-table.c
index 72273abfcb16..a21508d11036 100644
--- a/net/wimax/id-table.c
+++ b/net/wimax/id-table.c
@@ -137,7 +137,7 @@ void wimax_id_table_release(void)
#endif
spin_lock(&wimax_id_table_lock);
list_for_each_entry(wimax_dev, &wimax_id_table, id_table_node) {
- printk(KERN_ERR "BUG: %s wimax_dev %p ifindex %d not cleared\n",
+ pr_err("BUG: %s wimax_dev %p ifindex %d not cleared\n",
__func__, wimax_dev, wimax_dev->net_dev->ifindex);
WARN_ON(1);
}
diff --git a/net/wimax/op-msg.c b/net/wimax/op-msg.c
index c278b3356f75..54aa146930bd 100644
--- a/net/wimax/op-msg.c
+++ b/net/wimax/op-msg.c
@@ -189,7 +189,7 @@ const void *wimax_msg_data_len(struct sk_buff *msg, size_t *size)
nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
WIMAX_GNL_MSG_DATA);
if (nla == NULL) {
- printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
return NULL;
}
*size = nla_len(nla);
@@ -211,7 +211,7 @@ const void *wimax_msg_data(struct sk_buff *msg)
nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
WIMAX_GNL_MSG_DATA);
if (nla == NULL) {
- printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
return NULL;
}
return nla_data(nla);
@@ -232,7 +232,7 @@ ssize_t wimax_msg_len(struct sk_buff *msg)
nla = nlmsg_find_attr(nlh, sizeof(struct genlmsghdr),
WIMAX_GNL_MSG_DATA);
if (nla == NULL) {
- printk(KERN_ERR "Cannot find attribute WIMAX_GNL_MSG_DATA\n");
+ pr_err("Cannot find attribute WIMAX_GNL_MSG_DATA\n");
return -EINVAL;
}
return nla_len(nla);
@@ -343,8 +343,7 @@ int wimax_gnl_doit_msg_from_user(struct sk_buff *skb, struct genl_info *info)
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
if (info->attrs[WIMAX_GNL_MSG_IFIDX] == NULL) {
- printk(KERN_ERR "WIMAX_GNL_MSG_FROM_USER: can't find IFIDX "
- "attribute\n");
+ pr_err("WIMAX_GNL_MSG_FROM_USER: can't find IFIDX attribute\n");
goto error_no_wimax_dev;
}
ifindex = nla_get_u32(info->attrs[WIMAX_GNL_MSG_IFIDX]);
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index eb4580784d9d..a42079165e1f 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -107,8 +107,7 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
if (info->attrs[WIMAX_GNL_RESET_IFIDX] == NULL) {
- printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX "
- "attribute\n");
+ pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n");
goto error_no_wimax_dev;
}
ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RESET_IFIDX]);
diff --git a/net/wimax/op-rfkill.c b/net/wimax/op-rfkill.c
index 403078d670a9..7d730543f243 100644
--- a/net/wimax/op-rfkill.c
+++ b/net/wimax/op-rfkill.c
@@ -421,8 +421,7 @@ int wimax_gnl_doit_rfkill(struct sk_buff *skb, struct genl_info *info)
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
if (info->attrs[WIMAX_GNL_RFKILL_IFIDX] == NULL) {
- printk(KERN_ERR "WIMAX_GNL_OP_RFKILL: can't find IFIDX "
- "attribute\n");
+ pr_err("WIMAX_GNL_OP_RFKILL: can't find IFIDX attribute\n");
goto error_no_wimax_dev;
}
ifindex = nla_get_u32(info->attrs[WIMAX_GNL_RFKILL_IFIDX]);
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index 995c08c827b5..e6788d281d0e 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -49,8 +49,7 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
result = -ENODEV;
if (info->attrs[WIMAX_GNL_STGET_IFIDX] == NULL) {
- printk(KERN_ERR "WIMAX_GNL_OP_STATE_GET: can't find IFIDX "
- "attribute\n");
+ pr_err("WIMAX_GNL_OP_STATE_GET: can't find IFIDX attribute\n");
goto error_no_wimax_dev;
}
ifindex = nla_get_u32(info->attrs[WIMAX_GNL_STGET_IFIDX]);
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index ec8b577db135..3f816e2971ee 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -191,8 +191,8 @@ void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
unsigned int allowed_states_bm)
{
if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
- printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
- old_state, new_state);
+ pr_err("SW BUG! Forbidden state change %u -> %u\n",
+ old_state, new_state);
}
}
@@ -602,8 +602,7 @@ int __init wimax_subsys_init(void)
wimax_gnl_ops,
wimax_gnl_mcgrps);
if (unlikely(result < 0)) {
- printk(KERN_ERR "cannot register generic netlink family: %d\n",
- result);
+ pr_err("cannot register generic netlink family: %d\n", result);
goto error_register_family;
}
diff --git a/net/wimax/wimax-internal.h b/net/wimax/wimax-internal.h
index b445b82020a8..733c4bf8d4b3 100644
--- a/net/wimax/wimax-internal.h
+++ b/net/wimax/wimax-internal.h
@@ -30,6 +30,12 @@
#define __WIMAX_INTERNAL_H__
#ifdef __KERNEL__
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <net/wimax.h>
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 992b34070bcb..72d81e2154d5 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -4,6 +4,7 @@
* any point in time.
*
* Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*/
#include <linux/export.h>
diff --git a/net/wireless/core.c b/net/wireless/core.c
index afee5e0455ea..f52a4cd7017c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -2,6 +2,7 @@
* This is the linux wireless configuration interface.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -492,12 +493,6 @@ int wiphy_register(struct wiphy *wiphy)
int i;
u16 ifmodes = wiphy->interface_modes;
- /*
- * There are major locking problems in nl80211/mac80211 for CSA,
- * disable for all drivers until this has been reworked.
- */
- wiphy->flags &= ~WIPHY_FLAG_HAS_CHANNEL_SWITCH;
-
#ifdef CONFIG_PM
if (WARN_ON(wiphy->wowlan &&
(wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
@@ -635,6 +630,9 @@ int wiphy_register(struct wiphy *wiphy)
if (IS_ERR(rdev->wiphy.debugfsdir))
rdev->wiphy.debugfsdir = NULL;
+ cfg80211_debugfs_rdev_add(rdev);
+ nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
+
if (wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
struct regulatory_request request;
@@ -646,8 +644,6 @@ int wiphy_register(struct wiphy *wiphy)
nl80211_send_reg_change_event(&request);
}
- cfg80211_debugfs_rdev_add(rdev);
-
rdev->wiphy.registered = true;
rtnl_unlock();
@@ -659,8 +655,6 @@ int wiphy_register(struct wiphy *wiphy)
return res;
}
- nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
-
return 0;
}
EXPORT_SYMBOL(wiphy_register);
@@ -1012,7 +1006,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
rdev->devlist_generation++;
cfg80211_mlme_purge_registrations(wdev);
#ifdef CONFIG_CFG80211_WEXT
- kfree(wdev->wext.keys);
+ kzfree(wdev->wext.keys);
#endif
}
/*
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 8f345da3ea5f..e24fc585c883 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -115,7 +115,7 @@ static int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
}
if (WARN_ON(wdev->connect_keys))
- kfree(wdev->connect_keys);
+ kzfree(wdev->connect_keys);
wdev->connect_keys = connkeys;
wdev->ibss_fixed = params->channel_fixed;
@@ -161,7 +161,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
ASSERT_WDEV_LOCK(wdev);
- kfree(wdev->connect_keys);
+ kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
rdev_set_qos_map(rdev, dev, NULL);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 266766b8d80b..2c52b59e43f3 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -19,7 +19,7 @@
void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
- const u8 *buf, size_t len)
+ const u8 *buf, size_t len, int uapsd_queues)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
@@ -43,7 +43,7 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
return;
}
- nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL);
+ nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL, uapsd_queues);
/* update current_bss etc., consumes the bss reference */
__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
status_code,
@@ -605,7 +605,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
}
bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
- const u8 *buf, size_t len, u32 flags, gfp_t gfp)
+ const u8 *buf, size_t len, u32 flags)
{
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
@@ -648,7 +648,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
/* Indicate the received Action frame to user space */
if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
freq, sig_mbm,
- buf, len, flags, gfp))
+ buf, len, flags, GFP_ATOMIC))
continue;
result = true;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7257164af91b..cb9f5a44ffad 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2,6 +2,7 @@
* This is the new netlink-based wireless configuration interface.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*/
#include <linux/if.h>
@@ -225,6 +226,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
+ [NL80211_ATTR_WIPHY_DYN_ACK] = { .type = NLA_FLAG },
[NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -388,6 +390,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
[NL80211_ATTR_IFACE_SOCKET_OWNER] = { .type = NLA_FLAG },
[NL80211_ATTR_CSA_C_OFFSETS_TX] = { .type = NLA_BINARY },
+ [NL80211_ATTR_USE_RRM] = { .type = NLA_FLAG },
+ [NL80211_ATTR_TSID] = { .type = NLA_U8 },
+ [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 },
+ [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
+ [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
};
/* policy for the key attributes */
@@ -1507,6 +1514,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
CMD(channel_switch, CHANNEL_SWITCH);
CMD(set_qos_map, SET_QOS_MAP);
+ if (rdev->wiphy.flags &
+ WIPHY_FLAG_SUPPORTS_WMM_ADMISSION)
+ CMD(add_tx_ts, ADD_TX_TS);
}
/* add into the if now */
#undef CMD
@@ -2237,11 +2247,21 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
+ if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK])
+ return -EINVAL;
+
coverage_class = nla_get_u8(
info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
changed |= WIPHY_PARAM_COVERAGE_CLASS;
}
+ if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) {
+ if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION))
+ return -EOPNOTSUPP;
+
+ changed |= WIPHY_PARAM_DYN_ACK;
+ }
+
if (changed) {
u8 old_retry_short, old_retry_long;
u32 old_frag_threshold, old_rts_threshold;
@@ -3326,6 +3346,29 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(params.acl);
}
+ if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
+ params.smps_mode =
+ nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
+ switch (params.smps_mode) {
+ case NL80211_SMPS_OFF:
+ break;
+ case NL80211_SMPS_STATIC:
+ if (!(rdev->wiphy.features &
+ NL80211_FEATURE_STATIC_SMPS))
+ return -EINVAL;
+ break;
+ case NL80211_SMPS_DYNAMIC:
+ if (!(rdev->wiphy.features &
+ NL80211_FEATURE_DYNAMIC_SMPS))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ params.smps_mode = NL80211_SMPS_OFF;
+ }
+
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, &params);
if (!err) {
@@ -6033,7 +6076,6 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
const struct cfg80211_bss_ies *ies;
void *hdr;
struct nlattr *bss;
- bool tsf = false;
ASSERT_WDEV_LOCK(wdev);
@@ -6060,18 +6102,27 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
goto nla_put_failure;
rcu_read_lock();
+ /* indicate whether we have probe response data or not */
+ if (rcu_access_pointer(res->proberesp_ies) &&
+ nla_put_flag(msg, NL80211_BSS_PRESP_DATA))
+ goto fail_unlock_rcu;
+
+ /* this pointer prefers to be pointed to probe response data
+ * but is always valid
+ */
ies = rcu_dereference(res->ies);
if (ies) {
if (nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
goto fail_unlock_rcu;
- tsf = true;
if (ies->len && nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
ies->len, ies->data))
goto fail_unlock_rcu;
}
+
+ /* and this pointer is always (unless driver didn't know) beacon data */
ies = rcu_dereference(res->beacon_ies);
- if (ies) {
- if (!tsf && nla_put_u64(msg, NL80211_BSS_TSF, ies->tsf))
+ if (ies && ies->from_beacon) {
+ if (nla_put_u64(msg, NL80211_BSS_BEACON_TSF, ies->tsf))
goto fail_unlock_rcu;
if (ies->len && nla_put(msg, NL80211_BSS_BEACON_IES,
ies->len, ies->data))
@@ -6575,6 +6626,14 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
sizeof(req.vht_capa));
}
+ if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
+ if (!(rdev->wiphy.features &
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
+ !(rdev->wiphy.features & NL80211_FEATURE_QUIET))
+ return -EINVAL;
+ req.flags |= ASSOC_REQ_USE_RRM;
+ }
+
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
if (!err) {
wdev_lock(dev->ieee80211_ptr);
@@ -6837,7 +6896,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
if (err)
- kfree(connkeys);
+ kzfree(connkeys);
return err;
}
@@ -7209,7 +7268,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) {
- kfree(connkeys);
+ kzfree(connkeys);
return -EINVAL;
}
memcpy(&connect.ht_capa,
@@ -7227,7 +7286,7 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) {
if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) {
- kfree(connkeys);
+ kzfree(connkeys);
return -EINVAL;
}
memcpy(&connect.vht_capa,
@@ -7235,11 +7294,19 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
sizeof(connect.vht_capa));
}
+ if (nla_get_flag(info->attrs[NL80211_ATTR_USE_RRM])) {
+ if (!(rdev->wiphy.features &
+ NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES) ||
+ !(rdev->wiphy.features & NL80211_FEATURE_QUIET))
+ return -EINVAL;
+ connect.flags |= ASSOC_REQ_USE_RRM;
+ }
+
wdev_lock(dev->ieee80211_ptr);
err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
wdev_unlock(dev->ieee80211_ptr);
if (err)
- kfree(connkeys);
+ kzfree(connkeys);
return err;
}
@@ -8925,13 +8992,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
return -ERANGE;
- memcpy(rekey_data.kek, nla_data(tb[NL80211_REKEY_DATA_KEK]),
- NL80211_KEK_LEN);
- memcpy(rekey_data.kck, nla_data(tb[NL80211_REKEY_DATA_KCK]),
- NL80211_KCK_LEN);
- memcpy(rekey_data.replay_ctr,
- nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]),
- NL80211_REPLAY_CTR_LEN);
+ rekey_data.kek = nla_data(tb[NL80211_REKEY_DATA_KEK]);
+ rekey_data.kck = nla_data(tb[NL80211_REKEY_DATA_KCK]);
+ rekey_data.replay_ctr = nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]);
wdev_lock(wdev);
if (!wdev->current_bss) {
@@ -9363,6 +9426,93 @@ static int nl80211_set_qos_map(struct sk_buff *skb,
return ret;
}
+static int nl80211_add_tx_ts(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ const u8 *peer;
+ u8 tsid, up;
+ u16 admitted_time = 0;
+ int err;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_WMM_ADMISSION))
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC] ||
+ !info->attrs[NL80211_ATTR_USER_PRIO])
+ return -EINVAL;
+
+ tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]);
+ if (tsid >= IEEE80211_NUM_TIDS)
+ return -EINVAL;
+
+ up = nla_get_u8(info->attrs[NL80211_ATTR_USER_PRIO]);
+ if (up >= IEEE80211_NUM_UPS)
+ return -EINVAL;
+
+ /* WMM uses TIDs 0-7 even for TSPEC */
+ if (tsid < IEEE80211_FIRST_TSPEC_TSID) {
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_WMM_ADMISSION))
+ return -EINVAL;
+ } else {
+ /* TODO: handle 802.11 TSPEC/admission control
+ * need more attributes for that (e.g. BA session requirement)
+ */
+ return -EINVAL;
+ }
+
+ peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ if (info->attrs[NL80211_ATTR_ADMITTED_TIME]) {
+ admitted_time =
+ nla_get_u16(info->attrs[NL80211_ATTR_ADMITTED_TIME]);
+ if (!admitted_time)
+ return -EINVAL;
+ }
+
+ wdev_lock(wdev);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (wdev->current_bss)
+ break;
+ err = -ENOTCONN;
+ goto out;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = rdev_add_tx_ts(rdev, dev, tsid, peer, up, admitted_time);
+
+ out:
+ wdev_unlock(wdev);
+ return err;
+}
+
+static int nl80211_del_tx_ts(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ const u8 *peer;
+ u8 tsid;
+ int err;
+
+ if (!info->attrs[NL80211_ATTR_TSID] || !info->attrs[NL80211_ATTR_MAC])
+ return -EINVAL;
+
+ tsid = nla_get_u8(info->attrs[NL80211_ATTR_TSID]);
+ peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
+
+ wdev_lock(wdev);
+ err = rdev_del_tx_ts(rdev, dev, tsid, peer);
+ wdev_unlock(wdev);
+
+ return err;
+}
+
#define NL80211_FLAG_NEED_WIPHY 0x01
#define NL80211_FLAG_NEED_NETDEV 0x02
#define NL80211_FLAG_NEED_RTNL 0x04
@@ -9373,6 +9523,7 @@ static int nl80211_set_qos_map(struct sk_buff *skb,
/* If a netdev is associated, it must be UP, P2P must be started */
#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
NL80211_FLAG_CHECK_NETDEV_UP)
+#define NL80211_FLAG_CLEAR_SKB 0x20
static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
@@ -9456,8 +9607,20 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
dev_put(info->user_ptr[1]);
}
}
+
if (ops->internal_flags & NL80211_FLAG_NEED_RTNL)
rtnl_unlock();
+
+ /* If needed, clear the netlink message payload from the SKB
+ * as it might contain key data that shouldn't stick around on
+ * the heap after the SKB is freed. The netlink message header
+ * is still needed for further processing, so leave it intact.
+ */
+ if (ops->internal_flags & NL80211_FLAG_CLEAR_SKB) {
+ struct nlmsghdr *nlh = nlmsg_hdr(skb);
+
+ memset(nlmsg_data(nlh), 0, nlmsg_len(nlh));
+ }
}
static const struct genl_ops nl80211_ops[] = {
@@ -9525,7 +9688,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_NEW_KEY,
@@ -9533,7 +9697,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_DEL_KEY,
@@ -9711,7 +9876,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_ASSOCIATE,
@@ -9945,7 +10111,8 @@ static const struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
- NL80211_FLAG_NEED_RTNL,
+ NL80211_FLAG_NEED_RTNL |
+ NL80211_FLAG_CLEAR_SKB,
},
{
.cmd = NL80211_CMD_TDLS_MGMT,
@@ -10103,6 +10270,22 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+ {
+ .cmd = NL80211_CMD_ADD_TX_TS,
+ .doit = nl80211_add_tx_ts,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_DEL_TX_TS,
+ .doit = nl80211_del_tx_ts,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
/* notification functions */
@@ -10371,7 +10554,8 @@ nla_put_failure:
static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *buf, size_t len,
- enum nl80211_commands cmd, gfp_t gfp)
+ enum nl80211_commands cmd, gfp_t gfp,
+ int uapsd_queues)
{
struct sk_buff *msg;
void *hdr;
@@ -10391,6 +10575,19 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
nla_put(msg, NL80211_ATTR_FRAME, len, buf))
goto nla_put_failure;
+ if (uapsd_queues >= 0) {
+ struct nlattr *nla_wmm =
+ nla_nest_start(msg, NL80211_ATTR_STA_WME);
+ if (!nla_wmm)
+ goto nla_put_failure;
+
+ if (nla_put_u8(msg, NL80211_STA_WME_UAPSD_QUEUES,
+ uapsd_queues))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, nla_wmm);
+ }
+
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
@@ -10407,15 +10604,15 @@ void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_AUTHENTICATE, gfp);
+ NL80211_CMD_AUTHENTICATE, gfp, -1);
}
void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *buf,
- size_t len, gfp_t gfp)
+ size_t len, gfp_t gfp, int uapsd_queues)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_ASSOCIATE, gfp);
+ NL80211_CMD_ASSOCIATE, gfp, uapsd_queues);
}
void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
@@ -10423,7 +10620,7 @@ void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_DEAUTHENTICATE, gfp);
+ NL80211_CMD_DEAUTHENTICATE, gfp, -1);
}
void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
@@ -10431,7 +10628,7 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_DISASSOCIATE, gfp);
+ NL80211_CMD_DISASSOCIATE, gfp, -1);
}
void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
@@ -10452,7 +10649,7 @@ void cfg80211_rx_unprot_mlme_mgmt(struct net_device *dev, const u8 *buf,
cmd = NL80211_CMD_UNPROT_DISASSOCIATE;
trace_cfg80211_rx_unprot_mlme_mgmt(dev, buf, len);
- nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC);
+ nl80211_send_mlme_event(rdev, dev, buf, len, cmd, GFP_ATOMIC, -1);
}
EXPORT_SYMBOL(cfg80211_rx_unprot_mlme_mgmt);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 49c9a482dd12..7ad70d6f0cc6 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -23,7 +23,8 @@ void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
const u8 *buf, size_t len, gfp_t gfp);
void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
- const u8 *buf, size_t len, gfp_t gfp);
+ const u8 *buf, size_t len, gfp_t gfp,
+ int uapsd_queues);
void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *buf, size_t len, gfp_t gfp);
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 56c2240c30ce..f6d457d6a558 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -915,4 +915,35 @@ rdev_set_ap_chanwidth(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_add_tx_ts(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, u8 tsid, const u8 *peer,
+ u8 user_prio, u16 admitted_time)
+{
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_add_tx_ts(&rdev->wiphy, dev, tsid, peer,
+ user_prio, admitted_time);
+ if (rdev->ops->add_tx_ts)
+ ret = rdev->ops->add_tx_ts(&rdev->wiphy, dev, tsid, peer,
+ user_prio, admitted_time);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+
+ return ret;
+}
+
+static inline int
+rdev_del_tx_ts(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, u8 tsid, const u8 *peer)
+{
+ int ret = -EOPNOTSUPP;
+
+ trace_rdev_del_tx_ts(&rdev->wiphy, dev, tsid, peer);
+ if (rdev->ops->del_tx_ts)
+ ret = rdev->ops->del_tx_ts(&rdev->wiphy, dev, tsid, peer);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 1afdf45db38f..b725a31a4751 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3,6 +3,7 @@
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -798,6 +799,57 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
return 0;
}
+/* check whether old rule contains new rule */
+static bool rule_contains(struct ieee80211_reg_rule *r1,
+ struct ieee80211_reg_rule *r2)
+{
+ /* for simplicity, currently consider only same flags */
+ if (r1->flags != r2->flags)
+ return false;
+
+ /* verify r1 is more restrictive */
+ if ((r1->power_rule.max_antenna_gain >
+ r2->power_rule.max_antenna_gain) ||
+ r1->power_rule.max_eirp > r2->power_rule.max_eirp)
+ return false;
+
+ /* make sure r2's range is contained within r1 */
+ if (r1->freq_range.start_freq_khz > r2->freq_range.start_freq_khz ||
+ r1->freq_range.end_freq_khz < r2->freq_range.end_freq_khz)
+ return false;
+
+ /* and finally verify that r1.max_bw >= r2.max_bw */
+ if (r1->freq_range.max_bandwidth_khz <
+ r2->freq_range.max_bandwidth_khz)
+ return false;
+
+ return true;
+}
+
+/* add or extend current rules. do nothing if rule is already contained */
+static void add_rule(struct ieee80211_reg_rule *rule,
+ struct ieee80211_reg_rule *reg_rules, u32 *n_rules)
+{
+ struct ieee80211_reg_rule *tmp_rule;
+ int i;
+
+ for (i = 0; i < *n_rules; i++) {
+ tmp_rule = &reg_rules[i];
+ /* rule is already contained - do nothing */
+ if (rule_contains(tmp_rule, rule))
+ return;
+
+ /* extend rule if possible */
+ if (rule_contains(rule, tmp_rule)) {
+ memcpy(tmp_rule, rule, sizeof(*rule));
+ return;
+ }
+ }
+
+ memcpy(&reg_rules[*n_rules], rule, sizeof(*rule));
+ (*n_rules)++;
+}
+
/**
* regdom_intersect - do the intersection between two regulatory domains
* @rd1: first regulatory domain
@@ -817,12 +869,10 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
{
int r, size_of_regd;
unsigned int x, y;
- unsigned int num_rules = 0, rule_idx = 0;
+ unsigned int num_rules = 0;
const struct ieee80211_reg_rule *rule1, *rule2;
- struct ieee80211_reg_rule *intersected_rule;
+ struct ieee80211_reg_rule intersected_rule;
struct ieee80211_regdomain *rd;
- /* This is just a dummy holder to help us count */
- struct ieee80211_reg_rule dummy_rule;
if (!rd1 || !rd2)
return NULL;
@@ -840,7 +890,7 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
for (y = 0; y < rd2->n_reg_rules; y++) {
rule2 = &rd2->reg_rules[y];
if (!reg_rules_intersect(rd1, rd2, rule1, rule2,
- &dummy_rule))
+ &intersected_rule))
num_rules++;
}
}
@@ -855,34 +905,24 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
if (!rd)
return NULL;
- for (x = 0; x < rd1->n_reg_rules && rule_idx < num_rules; x++) {
+ for (x = 0; x < rd1->n_reg_rules; x++) {
rule1 = &rd1->reg_rules[x];
- for (y = 0; y < rd2->n_reg_rules && rule_idx < num_rules; y++) {
+ for (y = 0; y < rd2->n_reg_rules; y++) {
rule2 = &rd2->reg_rules[y];
- /*
- * This time around instead of using the stack lets
- * write to the target rule directly saving ourselves
- * a memcpy()
- */
- intersected_rule = &rd->reg_rules[rule_idx];
r = reg_rules_intersect(rd1, rd2, rule1, rule2,
- intersected_rule);
+ &intersected_rule);
/*
* No need to memset here the intersected rule here as
* we're not using the stack anymore
*/
if (r)
continue;
- rule_idx++;
- }
- }
- if (rule_idx != num_rules) {
- kfree(rd);
- return NULL;
+ add_rule(&intersected_rule, rd->reg_rules,
+ &rd->n_reg_rules);
+ }
}
- rd->n_reg_rules = num_rules;
rd->alpha2[0] = '9';
rd->alpha2[1] = '8';
rd->dfs_region = reg_intersect_dfs_region(rd1->dfs_region,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0798c62e6085..bda39f149810 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2,6 +2,7 @@
* cfg80211 scan result handling
*
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -884,6 +885,7 @@ struct cfg80211_bss*
cfg80211_inform_bss_width(struct wiphy *wiphy,
struct ieee80211_channel *rx_channel,
enum nl80211_bss_scan_width scan_width,
+ enum cfg80211_bss_frame_type ftype,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
s32 signal, gfp_t gfp)
@@ -911,21 +913,32 @@ cfg80211_inform_bss_width(struct wiphy *wiphy,
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
/*
- * Since we do not know here whether the IEs are from a Beacon or Probe
+ * If we do not know here whether the IEs are from a Beacon or Probe
* Response frame, we need to pick one of the options and only use it
* with the driver that does not provide the full Beacon/Probe Response
* frame. Use Beacon frame pointer to avoid indicating that this should
* override the IEs pointer should we have received an earlier
* indication of Probe Response data.
*/
- ies = kmalloc(sizeof(*ies) + ielen, gfp);
+ ies = kzalloc(sizeof(*ies) + ielen, gfp);
if (!ies)
return NULL;
ies->len = ielen;
ies->tsf = tsf;
+ ies->from_beacon = false;
memcpy(ies->data, ie, ielen);
- rcu_assign_pointer(tmp.pub.beacon_ies, ies);
+ switch (ftype) {
+ case CFG80211_BSS_FTYPE_BEACON:
+ ies->from_beacon = true;
+ /* fall through to assign */
+ case CFG80211_BSS_FTYPE_UNKNOWN:
+ rcu_assign_pointer(tmp.pub.beacon_ies, ies);
+ break;
+ case CFG80211_BSS_FTYPE_PRESP:
+ rcu_assign_pointer(tmp.pub.proberesp_ies, ies);
+ break;
+ }
rcu_assign_pointer(tmp.pub.ies, ies);
signal_valid = abs(rx_channel->center_freq - channel->center_freq) <=
@@ -982,11 +995,12 @@ cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
if (!channel)
return NULL;
- ies = kmalloc(sizeof(*ies) + ielen, gfp);
+ ies = kzalloc(sizeof(*ies) + ielen, gfp);
if (!ies)
return NULL;
ies->len = ielen;
ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
+ ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control);
memcpy(ies->data, mgmt->u.probe_resp.variable, ielen);
if (ieee80211_is_probe_resp(mgmt->frame_control))
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 8bbeeb302216..dc1668ff543b 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -641,7 +641,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
}
if (status != WLAN_STATUS_SUCCESS) {
- kfree(wdev->connect_keys);
+ kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
wdev->ssid_len = 0;
if (bss) {
@@ -918,7 +918,7 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
if (WARN_ON(wdev->connect_keys)) {
- kfree(wdev->connect_keys);
+ kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
}
@@ -978,7 +978,7 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
- kfree(wdev->connect_keys);
+ kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
if (wdev->conn)
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 0c524cd76c83..625a6e6d1168 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1896,6 +1896,51 @@ TRACE_EVENT(rdev_set_ap_chanwidth,
WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG)
);
+TRACE_EVENT(rdev_add_tx_ts,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time),
+ TP_ARGS(wiphy, netdev, tsid, peer, user_prio, admitted_time),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ __field(u8, tsid)
+ __field(u8, user_prio)
+ __field(u16, admitted_time)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, peer);
+ __entry->tsid = tsid;
+ __entry->user_prio = user_prio;
+ __entry->admitted_time = admitted_time;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", TSID %d, UP %d, time %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
+ __entry->tsid, __entry->user_prio, __entry->admitted_time)
+);
+
+TRACE_EVENT(rdev_del_tx_ts,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ u8 tsid, const u8 *peer),
+ TP_ARGS(wiphy, netdev, tsid, peer),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ MAC_ENTRY(peer)
+ __field(u8, tsid)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(peer, peer);
+ __entry->tsid = tsid;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " MAC_PR_FMT ", TSID %d",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tsid)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 728f1c0dc70d..5e233a577d0f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -2,6 +2,7 @@
* Wireless utility functions
*
* Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright 2013-2014 Intel Mobile Communications GmbH
*/
#include <linux/export.h>
#include <linux/bitops.h>
@@ -796,7 +797,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
netdev_err(dev, "failed to set mgtdef %d\n", i);
}
- kfree(wdev->connect_keys);
+ kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 11120bb14162..0f47948c572f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -496,6 +496,8 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
err = 0;
if (!err) {
if (!addr) {
+ memset(wdev->wext.keys->data[idx], 0,
+ sizeof(wdev->wext.keys->data[idx]));
wdev->wext.keys->params[idx].key_len = 0;
wdev->wext.keys->params[idx].cipher = 0;
}
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index c7e5c8eb4f24..368611c05739 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -57,7 +57,7 @@ int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
err = cfg80211_connect(rdev, wdev->netdev,
&wdev->wext.connect, ck, prev_bssid);
if (err)
- kfree(ck);
+ kzfree(ck);
return err;
}
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 0622d319e1f2..666c5ffe929d 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -3,6 +3,7 @@
#include <linux/xfrm.h>
#include <linux/socket.h>
+#include <linux/jhash.h>
static inline unsigned int __xfrm4_addr_hash(const xfrm_address_t *addr)
{
@@ -28,6 +29,58 @@ static inline unsigned int __xfrm6_daddr_saddr_hash(const xfrm_address_t *daddr,
saddr->a6[2] ^ saddr->a6[3]);
}
+static inline u32 __bits2mask32(__u8 bits)
+{
+ u32 mask32 = 0xffffffff;
+
+ if (bits == 0)
+ mask32 = 0;
+ else if (bits < 32)
+ mask32 <<= (32 - bits);
+
+ return mask32;
+}
+
+static inline unsigned int __xfrm4_dpref_spref_hash(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ __u8 dbits,
+ __u8 sbits)
+{
+ return jhash_2words(ntohl(daddr->a4) & __bits2mask32(dbits),
+ ntohl(saddr->a4) & __bits2mask32(sbits),
+ 0);
+}
+
+static inline unsigned int __xfrm6_pref_hash(const xfrm_address_t *addr,
+ __u8 prefixlen)
+{
+ int pdw;
+ int pbi;
+ u32 initval = 0;
+
+ pdw = prefixlen >> 5; /* num of whole u32 in prefix */
+ pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
+
+ if (pbi) {
+ __be32 mask;
+
+ mask = htonl((0xffffffff) << (32 - pbi));
+
+ initval = (__force u32)(addr->a6[pdw] & mask);
+ }
+
+ return jhash2((__force u32 *)addr->a6, pdw, initval);
+}
+
+static inline unsigned int __xfrm6_dpref_spref_hash(const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr,
+ __u8 dbits,
+ __u8 sbits)
+{
+ return __xfrm6_pref_hash(daddr, dbits) ^
+ __xfrm6_pref_hash(saddr, sbits);
+}
+
static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
u32 reqid, unsigned short family,
@@ -84,7 +137,8 @@ static inline unsigned int __idx_hash(u32 index, unsigned int hmask)
}
static inline unsigned int __sel_hash(const struct xfrm_selector *sel,
- unsigned short family, unsigned int hmask)
+ unsigned short family, unsigned int hmask,
+ u8 dbits, u8 sbits)
{
const xfrm_address_t *daddr = &sel->daddr;
const xfrm_address_t *saddr = &sel->saddr;
@@ -92,19 +146,19 @@ static inline unsigned int __sel_hash(const struct xfrm_selector *sel,
switch (family) {
case AF_INET:
- if (sel->prefixlen_d != 32 ||
- sel->prefixlen_s != 32)
+ if (sel->prefixlen_d < dbits ||
+ sel->prefixlen_s < sbits)
return hmask + 1;
- h = __xfrm4_daddr_saddr_hash(daddr, saddr);
+ h = __xfrm4_dpref_spref_hash(daddr, saddr, dbits, sbits);
break;
case AF_INET6:
- if (sel->prefixlen_d != 128 ||
- sel->prefixlen_s != 128)
+ if (sel->prefixlen_d < dbits ||
+ sel->prefixlen_s < sbits)
return hmask + 1;
- h = __xfrm6_daddr_saddr_hash(daddr, saddr);
+ h = __xfrm6_dpref_spref_hash(daddr, saddr, dbits, sbits);
break;
}
h ^= (h >> 16);
@@ -113,17 +167,19 @@ static inline unsigned int __sel_hash(const struct xfrm_selector *sel,
static inline unsigned int __addr_hash(const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
- unsigned short family, unsigned int hmask)
+ unsigned short family,
+ unsigned int hmask,
+ u8 dbits, u8 sbits)
{
unsigned int h = 0;
switch (family) {
case AF_INET:
- h = __xfrm4_daddr_saddr_hash(daddr, saddr);
+ h = __xfrm4_dpref_spref_hash(daddr, saddr, dbits, sbits);
break;
case AF_INET6:
- h = __xfrm6_daddr_saddr_hash(daddr, saddr);
+ h = __xfrm6_dpref_spref_hash(daddr, saddr, dbits, sbits);
break;
}
h ^= (h >> 16);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index c51e8f7b8653..499d6c18a8ce 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -166,11 +166,7 @@ static int xfrm_output_gso(struct sk_buff *skb)
err = xfrm_output2(segs);
if (unlikely(err)) {
- while ((segs = nskb)) {
- nskb = segs->next;
- segs->next = NULL;
- kfree_skb(segs);
- }
+ kfree_skb_list(nskb);
return err;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index fdde51f4271a..4c4e457e7888 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -349,12 +349,39 @@ static inline unsigned int idx_hash(struct net *net, u32 index)
return __idx_hash(index, net->xfrm.policy_idx_hmask);
}
+/* calculate policy hash thresholds */
+static void __get_hash_thresh(struct net *net,
+ unsigned short family, int dir,
+ u8 *dbits, u8 *sbits)
+{
+ switch (family) {
+ case AF_INET:
+ *dbits = net->xfrm.policy_bydst[dir].dbits4;
+ *sbits = net->xfrm.policy_bydst[dir].sbits4;
+ break;
+
+ case AF_INET6:
+ *dbits = net->xfrm.policy_bydst[dir].dbits6;
+ *sbits = net->xfrm.policy_bydst[dir].sbits6;
+ break;
+
+ default:
+ *dbits = 0;
+ *sbits = 0;
+ }
+}
+
static struct hlist_head *policy_hash_bysel(struct net *net,
const struct xfrm_selector *sel,
unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
- unsigned int hash = __sel_hash(sel, family, hmask);
+ unsigned int hash;
+ u8 dbits;
+ u8 sbits;
+
+ __get_hash_thresh(net, family, dir, &dbits, &sbits);
+ hash = __sel_hash(sel, family, hmask, dbits, sbits);
return (hash == hmask + 1 ?
&net->xfrm.policy_inexact[dir] :
@@ -367,25 +394,35 @@ static struct hlist_head *policy_hash_direct(struct net *net,
unsigned short family, int dir)
{
unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
- unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
+ unsigned int hash;
+ u8 dbits;
+ u8 sbits;
+
+ __get_hash_thresh(net, family, dir, &dbits, &sbits);
+ hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
return net->xfrm.policy_bydst[dir].table + hash;
}
-static void xfrm_dst_hash_transfer(struct hlist_head *list,
+static void xfrm_dst_hash_transfer(struct net *net,
+ struct hlist_head *list,
struct hlist_head *ndsttable,
- unsigned int nhashmask)
+ unsigned int nhashmask,
+ int dir)
{
struct hlist_node *tmp, *entry0 = NULL;
struct xfrm_policy *pol;
unsigned int h0 = 0;
+ u8 dbits;
+ u8 sbits;
redo:
hlist_for_each_entry_safe(pol, tmp, list, bydst) {
unsigned int h;
+ __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
- pol->family, nhashmask);
+ pol->family, nhashmask, dbits, sbits);
if (!entry0) {
hlist_del(&pol->bydst);
hlist_add_head(&pol->bydst, ndsttable+h);
@@ -439,7 +476,7 @@ static void xfrm_bydst_resize(struct net *net, int dir)
write_lock_bh(&net->xfrm.xfrm_policy_lock);
for (i = hmask; i >= 0; i--)
- xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
+ xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
net->xfrm.policy_bydst[dir].table = ndst;
net->xfrm.policy_bydst[dir].hmask = nhashmask;
@@ -534,6 +571,86 @@ static void xfrm_hash_resize(struct work_struct *work)
mutex_unlock(&hash_resize_mutex);
}
+static void xfrm_hash_rebuild(struct work_struct *work)
+{
+ struct net *net = container_of(work, struct net,
+ xfrm.policy_hthresh.work);
+ unsigned int hmask;
+ struct xfrm_policy *pol;
+ struct xfrm_policy *policy;
+ struct hlist_head *chain;
+ struct hlist_head *odst;
+ struct hlist_node *newpos;
+ int i;
+ int dir;
+ unsigned seq;
+ u8 lbits4, rbits4, lbits6, rbits6;
+
+ mutex_lock(&hash_resize_mutex);
+
+ /* read selector prefixlen thresholds */
+ do {
+ seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
+
+ lbits4 = net->xfrm.policy_hthresh.lbits4;
+ rbits4 = net->xfrm.policy_hthresh.rbits4;
+ lbits6 = net->xfrm.policy_hthresh.lbits6;
+ rbits6 = net->xfrm.policy_hthresh.rbits6;
+ } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
+
+ write_lock_bh(&net->xfrm.xfrm_policy_lock);
+
+ /* reset the bydst and inexact table in all directions */
+ for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
+ INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
+ hmask = net->xfrm.policy_bydst[dir].hmask;
+ odst = net->xfrm.policy_bydst[dir].table;
+ for (i = hmask; i >= 0; i--)
+ INIT_HLIST_HEAD(odst + i);
+ if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
+ /* dir out => dst = remote, src = local */
+ net->xfrm.policy_bydst[dir].dbits4 = rbits4;
+ net->xfrm.policy_bydst[dir].sbits4 = lbits4;
+ net->xfrm.policy_bydst[dir].dbits6 = rbits6;
+ net->xfrm.policy_bydst[dir].sbits6 = lbits6;
+ } else {
+ /* dir in/fwd => dst = local, src = remote */
+ net->xfrm.policy_bydst[dir].dbits4 = lbits4;
+ net->xfrm.policy_bydst[dir].sbits4 = rbits4;
+ net->xfrm.policy_bydst[dir].dbits6 = lbits6;
+ net->xfrm.policy_bydst[dir].sbits6 = rbits6;
+ }
+ }
+
+ /* re-insert all policies by order of creation */
+ list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+ newpos = NULL;
+ chain = policy_hash_bysel(net, &policy->selector,
+ policy->family,
+ xfrm_policy_id2dir(policy->index));
+ hlist_for_each_entry(pol, chain, bydst) {
+ if (policy->priority >= pol->priority)
+ newpos = &pol->bydst;
+ else
+ break;
+ }
+ if (newpos)
+ hlist_add_behind(&policy->bydst, newpos);
+ else
+ hlist_add_head(&policy->bydst, chain);
+ }
+
+ write_unlock_bh(&net->xfrm.xfrm_policy_lock);
+
+ mutex_unlock(&hash_resize_mutex);
+}
+
+void xfrm_policy_hash_rebuild(struct net *net)
+{
+ schedule_work(&net->xfrm.policy_hthresh.work);
+}
+EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
+
/* Generate new index... KAME seems to generate them ordered by cost
* of an absolute inpredictability of ordering of rules. This will not pass. */
static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
@@ -1844,10 +1961,8 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
struct xfrm_policy *pol = xdst->pols[0];
struct xfrm_policy_queue *pq = &pol->polq;
- const struct sk_buff *fclone = skb + 1;
- if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
- fclone->fclone == SKB_FCLONE_CLONE)) {
+ if (unlikely(skb_fclone_busy(skb))) {
kfree_skb(skb);
return 0;
}
@@ -2862,10 +2977,21 @@ static int __net_init xfrm_policy_init(struct net *net)
if (!htab->table)
goto out_bydst;
htab->hmask = hmask;
+ htab->dbits4 = 32;
+ htab->sbits4 = 32;
+ htab->dbits6 = 128;
+ htab->sbits6 = 128;
}
+ net->xfrm.policy_hthresh.lbits4 = 32;
+ net->xfrm.policy_hthresh.rbits4 = 32;
+ net->xfrm.policy_hthresh.lbits6 = 128;
+ net->xfrm.policy_hthresh.rbits6 = 128;
+
+ seqlock_init(&net->xfrm.policy_hthresh.lock);
INIT_LIST_HEAD(&net->xfrm.policy_all);
INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
+ INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
if (net_eq(net, &init_net))
register_netdevice_notifier(&xfrm_dev_notifier);
return 0;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 0ab54134bb40..de971b6d38c5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -97,8 +97,6 @@ static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
}
-static DEFINE_MUTEX(hash_resize_mutex);
-
static void xfrm_hash_resize(struct work_struct *work)
{
struct net *net = container_of(work, struct net, xfrm.state_hash_work);
@@ -107,22 +105,20 @@ static void xfrm_hash_resize(struct work_struct *work)
unsigned int nhashmask, ohashmask;
int i;
- mutex_lock(&hash_resize_mutex);
-
nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
ndst = xfrm_hash_alloc(nsize);
if (!ndst)
- goto out_unlock;
+ return;
nsrc = xfrm_hash_alloc(nsize);
if (!nsrc) {
xfrm_hash_free(ndst, nsize);
- goto out_unlock;
+ return;
}
nspi = xfrm_hash_alloc(nsize);
if (!nspi) {
xfrm_hash_free(ndst, nsize);
xfrm_hash_free(nsrc, nsize);
- goto out_unlock;
+ return;
}
spin_lock_bh(&net->xfrm.xfrm_state_lock);
@@ -148,9 +144,6 @@ static void xfrm_hash_resize(struct work_struct *work)
xfrm_hash_free(odst, osize);
xfrm_hash_free(osrc, osize);
xfrm_hash_free(ospi, osize);
-
-out_unlock:
- mutex_unlock(&hash_resize_mutex);
}
static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d4db6ebb089d..e812e988c111 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -333,8 +333,7 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
if (!algo)
return -ENOSYS;
- if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
- ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
+ if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
return -EINVAL;
*props = algo->desc.sadb_alg_id;
@@ -964,7 +963,9 @@ static inline size_t xfrm_spdinfo_msgsize(void)
{
return NLMSG_ALIGN(4)
+ nla_total_size(sizeof(struct xfrmu_spdinfo))
- + nla_total_size(sizeof(struct xfrmu_spdhinfo));
+ + nla_total_size(sizeof(struct xfrmu_spdhinfo))
+ + nla_total_size(sizeof(struct xfrmu_spdhthresh))
+ + nla_total_size(sizeof(struct xfrmu_spdhthresh));
}
static int build_spdinfo(struct sk_buff *skb, struct net *net,
@@ -973,9 +974,11 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
struct xfrmk_spdinfo si;
struct xfrmu_spdinfo spc;
struct xfrmu_spdhinfo sph;
+ struct xfrmu_spdhthresh spt4, spt6;
struct nlmsghdr *nlh;
int err;
u32 *f;
+ unsigned lseq;
nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
if (nlh == NULL) /* shouldn't really happen ... */
@@ -993,9 +996,22 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
sph.spdhcnt = si.spdhcnt;
sph.spdhmcnt = si.spdhmcnt;
+ do {
+ lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
+
+ spt4.lbits = net->xfrm.policy_hthresh.lbits4;
+ spt4.rbits = net->xfrm.policy_hthresh.rbits4;
+ spt6.lbits = net->xfrm.policy_hthresh.lbits6;
+ spt6.rbits = net->xfrm.policy_hthresh.rbits6;
+ } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
+
err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
if (!err)
err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
+ if (!err)
+ err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
+ if (!err)
+ err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
if (err) {
nlmsg_cancel(skb, nlh);
return err;
@@ -1004,6 +1020,51 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
return nlmsg_end(skb, nlh);
}
+static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct nlattr **attrs)
+{
+ struct net *net = sock_net(skb->sk);
+ struct xfrmu_spdhthresh *thresh4 = NULL;
+ struct xfrmu_spdhthresh *thresh6 = NULL;
+
+ /* selector prefixlen thresholds to hash policies */
+ if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
+ struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
+
+ if (nla_len(rta) < sizeof(*thresh4))
+ return -EINVAL;
+ thresh4 = nla_data(rta);
+ if (thresh4->lbits > 32 || thresh4->rbits > 32)
+ return -EINVAL;
+ }
+ if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
+ struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
+
+ if (nla_len(rta) < sizeof(*thresh6))
+ return -EINVAL;
+ thresh6 = nla_data(rta);
+ if (thresh6->lbits > 128 || thresh6->rbits > 128)
+ return -EINVAL;
+ }
+
+ if (thresh4 || thresh6) {
+ write_seqlock(&net->xfrm.policy_hthresh.lock);
+ if (thresh4) {
+ net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
+ net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
+ }
+ if (thresh6) {
+ net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
+ net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
+ }
+ write_sequnlock(&net->xfrm.policy_hthresh.lock);
+
+ xfrm_policy_hash_rebuild(net);
+ }
+
+ return 0;
+}
+
static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
@@ -2274,6 +2335,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
+ [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
};
@@ -2308,10 +2370,17 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
};
+static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
+ [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
+ [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
+};
+
static const struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
+ const struct nla_policy *nla_pol;
+ int nla_max;
} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
@@ -2335,6 +2404,9 @@ static const struct xfrm_link {
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
[XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
[XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
+ [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
+ .nla_pol = xfrma_spd_policy,
+ .nla_max = XFRMA_SPD_MAX },
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
};
@@ -2371,8 +2443,9 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
}
}
- err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,
- xfrma_policy);
+ err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs,
+ link->nla_max ? : XFRMA_MAX,
+ link->nla_pol ? : xfrma_policy);
if (err < 0)
return err;