summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/8021q/vlan_dev.c8
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/Kconfig5
-rw-r--r--net/Kconfig.debug19
-rw-r--r--net/ax25/af_ax25.c10
-rw-r--r--net/ax25/ax25_dev.c8
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/multicast.c15
-rw-r--r--net/batman-adv/multicast.h10
-rw-r--r--net/batman-adv/netlink.c30
-rw-r--r--net/batman-adv/network-coding.c8
-rw-r--r--net/batman-adv/soft-interface.c7
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/aosp.c168
-rw-r--r--net/bluetooth/aosp.h13
-rw-r--r--net/bluetooth/bnep/sock.c1
-rw-r--r--net/bluetooth/cmtp/core.c4
-rw-r--r--net/bluetooth/eir.h2
-rw-r--r--net/bluetooth/hci_codec.c18
-rw-r--r--net/bluetooth/hci_conn.c325
-rw-r--r--net/bluetooth/hci_core.c1356
-rw-r--r--net/bluetooth/hci_event.c3295
-rw-r--r--net/bluetooth/hci_request.c567
-rw-r--r--net/bluetooth/hci_request.h18
-rw-r--r--net/bluetooth/hci_sock.c16
-rw-r--r--net/bluetooth/hci_sync.c5281
-rw-r--r--net/bluetooth/hci_sysfs.c2
-rw-r--r--net/bluetooth/hidp/sock.c1
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/bluetooth/l2cap_sock.c46
-rw-r--r--net/bluetooth/mgmt.c2400
-rw-r--r--net/bluetooth/mgmt_util.c81
-rw-r--r--net/bluetooth/mgmt_util.h8
-rw-r--r--net/bluetooth/msft.c513
-rw-r--r--net/bluetooth/msft.h20
-rw-r--r--net/bridge/br_if.c18
-rw-r--r--net/bridge/br_ioctl.c76
-rw-r--r--net/bridge/br_netfilter_hooks.c7
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_sysfs_br.c7
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c20
-rw-r--r--net/caif/caif_socket.c1
-rw-r--r--net/caif/cfserl.c1
-rw-r--r--net/can/isotp.c4
-rw-r--r--net/core/Makefile4
-rw-r--r--net/core/bpf_sk_storage.c10
-rw-r--r--net/core/dev.c735
-rw-r--r--net/core/dev_addr_lists.c93
-rw-r--r--net/core/dev_addr_lists_test.c236
-rw-r--r--net/core/dev_ioctl.c7
-rw-r--r--net/core/devlink.c81
-rw-r--r--net/core/drop_monitor.c16
-rw-r--r--net/core/dst.c8
-rw-r--r--net/core/failover.c4
-rw-r--r--net/core/fib_rules.c25
-rw-r--r--net/core/filter.c187
-rw-r--r--net/core/flow_dissector.c3
-rw-r--r--net/core/flow_offload.c46
-rw-r--r--net/core/gro.c770
-rw-r--r--net/core/link_watch.c17
-rw-r--r--net/core/lwt_bpf.c1
-rw-r--r--net/core/lwtunnel.c4
-rw-r--r--net/core/neighbour.c22
-rw-r--r--net/core/net-sysfs.c34
-rw-r--r--net/core/net_namespace.c3
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/of_net.c33
-rw-r--r--net/core/page_pool.c10
-rw-r--r--net/core/pktgen.c8
-rw-r--r--net/core/rtnetlink.c37
-rw-r--r--net/core/secure_seq.c4
-rw-r--r--net/core/skbuff.c198
-rw-r--r--net/core/sock.c71
-rw-r--r--net/core/sock_diag.c1
-rw-r--r--net/core/sock_map.c23
-rw-r--r--net/core/sysctl_net_core.c1
-rw-r--r--net/core/xdp.c104
-rw-r--r--net/dccp/proto.c27
-rw-r--r--net/dccp/trace.h4
-rw-r--r--net/decnet/dn_nsp_in.c1
-rw-r--r--net/decnet/dn_rules.c5
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/dsa/dsa2.c201
-rw-r--r--net/dsa/dsa_priv.h91
-rw-r--r--net/dsa/master.c29
-rw-r--r--net/dsa/port.c252
-rw-r--r--net/dsa/slave.c64
-rw-r--r--net/dsa/switch.c132
-rw-r--r--net/dsa/tag_8021q.c20
-rw-r--r--net/dsa/tag_dsa.c5
-rw-r--r--net/dsa/tag_ocelot.c2
-rw-r--r--net/dsa/tag_ocelot_8021q.c52
-rw-r--r--net/dsa/tag_sja1105.c214
-rw-r--r--net/ethernet/eth.c7
-rw-r--r--net/ethtool/cabletest.c4
-rw-r--r--net/ethtool/channels.c2
-rw-r--r--net/ethtool/coalesce.c2
-rw-r--r--net/ethtool/common.c1
-rw-r--r--net/ethtool/debug.c2
-rw-r--r--net/ethtool/eee.c2
-rw-r--r--net/ethtool/features.c3
-rw-r--r--net/ethtool/fec.c2
-rw-r--r--net/ethtool/ioctl.c28
-rw-r--r--net/ethtool/linkinfo.c2
-rw-r--r--net/ethtool/linkmodes.c2
-rw-r--r--net/ethtool/module.c2
-rw-r--r--net/ethtool/netlink.c9
-rw-r--r--net/ethtool/netlink.h9
-rw-r--r--net/ethtool/pause.c2
-rw-r--r--net/ethtool/privflags.c2
-rw-r--r--net/ethtool/rings.c34
-rw-r--r--net/ethtool/stats.c15
-rw-r--r--net/ethtool/tunnels.c6
-rw-r--r--net/ethtool/wol.c2
-rw-r--r--net/hsr/hsr_device.c6
-rw-r--r--net/ieee802154/socket.c4
-rw-r--r--net/ipv4/af_inet.c31
-rw-r--r--net/ipv4/arp.c33
-rw-r--r--net/ipv4/bpf_tcp_ca.c6
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/esp4_offload.c1
-rw-r--r--net/ipv4/fib_rules.c6
-rw-r--r--net/ipv4/fib_semantics.c61
-rw-r--r--net/ipv4/fou.c26
-rw-r--r--net/ipv4/gre_offload.c13
-rw-r--r--net/ipv4/igmp.c1
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_hashtables.c8
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ipmr.c8
-rw-r--r--net/ipv4/netfilter/Kconfig8
-rw-r--r--net/ipv4/netfilter/Makefile3
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c5
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c37
-rw-r--r--net/ipv4/nexthop.c9
-rw-r--r--net/ipv4/ping.c15
-rw-r--r--net/ipv4/raw.c15
-rw-r--r--net/ipv4/route.c83
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c95
-rw-r--r--net/ipv4/tcp_bpf.c27
-rw-r--r--net/ipv4/tcp_input.c18
-rw-r--r--net/ipv4/tcp_ipv4.c25
-rw-r--r--net/ipv4/tcp_offload.c1
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c22
-rw-r--r--net/ipv4/udp_offload.c32
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/addrconf_core.c2
-rw-r--r--net/ipv6/af_inet6.c9
-rw-r--r--net/ipv6/ah6.c5
-rw-r--r--net/ipv6/esp6.c3
-rw-r--r--net/ipv6/esp6_offload.c1
-rw-r--r--net/ipv6/exthdrs.c1
-rw-r--r--net/ipv6/fib6_rules.c5
-rw-r--r--net/ipv6/icmp.c6
-rw-r--r--net/ipv6/inet6_hashtables.c8
-rw-r--r--net/ipv6/ioam6.c16
-rw-r--r--net/ipv6/ip6_fib.c1
-rw-r--r--net/ipv6/ip6_gre.c13
-rw-r--r--net/ipv6/ip6_offload.c14
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/ip6_vti.c4
-rw-r--r--net/ipv6/ip6mr.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c17
-rw-r--r--net/ipv6/netfilter/Kconfig8
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c38
-rw-r--r--net/ipv6/ping.c1
-rw-r--r--net/ipv6/route.c70
-rw-r--r--net/ipv6/seg6.c59
-rw-r--r--net/ipv6/seg6_local.c34
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c11
-rw-r--r--net/ipv6/tcpv6_offload.c1
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_offload.c3
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/iucv/af_iucv.c41
-rw-r--r--net/iucv/iucv.c124
-rw-r--r--net/kcm/kcmsock.c1
-rw-r--r--net/l2tp/l2tp_core.c52
-rw-r--r--net/l2tp/l2tp_core.h2
-rw-r--r--net/l2tp/l2tp_debugfs.c22
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/cfg.c45
-rw-r--r--net/mac80211/debugfs_sta.c9
-rw-r--r--net/mac80211/driver-ops.h22
-rw-r--r--net/mac80211/ethtool.c8
-rw-r--r--net/mac80211/ieee80211_i.h26
-rw-r--r--net/mac80211/iface.c59
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/mac80211/mesh.h22
-rw-r--r--net/mac80211/mesh_pathtbl.c89
-rw-r--r--net/mac80211/mlme.c69
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/rx.c9
-rw-r--r--net/mac80211/trace.h7
-rw-r--r--net/mac80211/tx.c10
-rw-r--r--net/mac80211/util.c13
-rw-r--r--net/mac80211/wpa.c4
-rw-r--r--net/mctp/af_mctp.c3
-rw-r--r--net/mctp/device.c53
-rw-r--r--net/mctp/neigh.c9
-rw-r--r--net/mctp/route.c7
-rw-r--r--net/mctp/test/route-test.c5
-rw-r--r--net/mpls/af_mpls.c8
-rw-r--r--net/mpls/internal.h13
-rw-r--r--net/mptcp/options.c119
-rw-r--r--net/mptcp/pm.c34
-rw-r--r--net/mptcp/pm_netlink.c215
-rw-r--r--net/mptcp/protocol.c476
-rw-r--r--net/mptcp/protocol.h69
-rw-r--r--net/mptcp/sockopt.c262
-rw-r--r--net/mptcp/subflow.c34
-rw-r--r--net/mptcp/token.c1
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c29
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c7
-rw-r--r--net/netfilter/nf_conntrack_core.c68
-rw-r--r--net/netfilter/nf_conntrack_expect.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c14
-rw-r--r--net/netfilter/nf_conntrack_standalone.c4
-rw-r--r--net/netfilter/nf_flow_table_core.c2
-rw-r--r--net/netfilter/nf_flow_table_inet.c26
-rw-r--r--net/netfilter/nf_nat_core.c47
-rw-r--r--net/netfilter/nf_nat_masquerade.c4
-rw-r--r--net/netfilter/nf_synproxy_core.c1
-rw-r--r--net/netfilter/nf_tables_api.c161
-rw-r--r--net/netfilter/nf_tables_core.c87
-rw-r--r--net/netfilter/nf_tables_trace.c2
-rw-r--r--net/netfilter/nfnetlink_hook.c1
-rw-r--r--net/netfilter/nfnetlink_log.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c14
-rw-r--r--net/netfilter/nft_bitwise.c95
-rw-r--r--net/netfilter/nft_connlimit.c26
-rw-r--r--net/netfilter/nft_counter.c58
-rw-r--r--net/netfilter/nft_ct.c4
-rw-r--r--net/netfilter/nft_fwd_netdev.c7
-rw-r--r--net/netfilter/nft_last.c69
-rw-r--r--net/netfilter/nft_limit.c172
-rw-r--r--net/netfilter/nft_meta.c48
-rw-r--r--net/netfilter/nft_numgen.c34
-rw-r--r--net/netfilter/nft_payload.c60
-rw-r--r--net/netfilter/nft_quota.c52
-rw-r--r--net/netfilter/nft_reject_netdev.c1
-rw-r--r--net/netfilter/nft_set_pipapo.c8
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c4
-rw-r--r--net/netfilter/xt_CT.c3
-rw-r--r--net/netlabel/netlabel_unlabeled.c2
-rw-r--r--net/netlabel/netlabel_user.h2
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/nfc/nci/uart.c5
-rw-r--r--net/openvswitch/conntrack.c21
-rw-r--r--net/openvswitch/flow.c16
-rw-r--r--net/openvswitch/vport-netdev.c9
-rw-r--r--net/openvswitch/vport.h2
-rw-r--r--net/packet/af_packet.c32
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rfkill/core.c12
-rw-r--r--net/rose/rose_in.c1
-rw-r--r--net/sched/act_api.c459
-rw-r--r--net/sched/act_bpf.c2
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/sched/act_csum.c19
-rw-r--r--net/sched/act_ct.c64
-rw-r--r--net/sched/act_ctinfo.c2
-rw-r--r--net/sched/act_gact.c38
-rw-r--r--net/sched/act_gate.c51
-rw-r--r--net/sched/act_ife.c2
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c68
-rw-r--r--net/sched/act_mpls.c54
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c36
-rw-r--r--net/sched/act_police.c27
-rw-r--r--net/sched/act_sample.c32
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/act_skbedit.c38
-rw-r--r--net/sched/act_skbmod.c2
-rw-r--r--net/sched/act_tunnel_key.c54
-rw-r--r--net/sched/act_vlan.c48
-rw-r--r--net/sched/cls_api.c280
-rw-r--r--net/sched/cls_flower.c29
-rw-r--r--net/sched/cls_matchall.c27
-rw-r--r--net/sched/cls_u32.c12
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_cake.c40
-rw-r--r--net/sched/sch_frag.c1
-rw-r--r--net/sched/sch_generic.c83
-rw-r--r--net/sched/sch_netem.c18
-rw-r--r--net/sched/sch_qfq.c6
-rw-r--r--net/sctp/diag.c46
-rw-r--r--net/sctp/input.c27
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c3
-rw-r--r--net/sctp/proc.c10
-rw-r--r--net/sctp/sm_statefuns.c11
-rw-r--r--net/sctp/socket.c33
-rw-r--r--net/sctp/transport.c26
-rw-r--r--net/smc/af_smc.c103
-rw-r--r--net/smc/smc_clc.c1
-rw-r--r--net/smc/smc_core.c56
-rw-r--r--net/smc/smc_core.h21
-rw-r--r--net/smc/smc_diag.c16
-rw-r--r--net/smc/smc_ib.c2
-rw-r--r--net/smc/smc_ib.h7
-rw-r--r--net/smc/smc_ism.c1
-rw-r--r--net/smc/smc_llc.c19
-rw-r--r--net/smc/smc_pnet.c30
-rw-r--r--net/smc/smc_tracepoint.h23
-rw-r--r--net/smc/smc_wr.c15
-rw-r--r--net/socket.c32
-rw-r--r--net/switchdev/switchdev.c5
-rw-r--r--net/tipc/bearer.c4
-rw-r--r--net/tipc/crypto.c19
-rw-r--r--net/tipc/link.c3
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tls/tls_sw.c37
-rw-r--r--net/unix/af_unix.c572
-rw-r--r--net/unix/diag.c23
-rw-r--r--net/unix/sysctl_net_unix.c4
-rw-r--r--net/vmw_vsock/af_vsock.c1
-rw-r--r--net/vmw_vsock/hyperv_transport.c18
-rw-r--r--net/wireless/chan.c78
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/core.h16
-rw-r--r--net/wireless/mlme.c153
-rw-r--r--net/wireless/nl80211.c123
-rw-r--r--net/wireless/rdev-ops.h17
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/wireless/scan.c121
-rw-r--r--net/wireless/sme.c22
-rw-r--r--net/wireless/trace.h47
-rw-r--r--net/wireless/wext-sme.c12
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/xdp/xsk.c8
-rw-r--r--net/xdp/xskmap.c1
-rw-r--r--net/xfrm/xfrm_algo.c41
-rw-r--r--net/xfrm/xfrm_compat.c6
-rw-r--r--net/xfrm/xfrm_device.c3
-rw-r--r--net/xfrm/xfrm_input.c1
-rw-r--r--net/xfrm/xfrm_interface.c14
-rw-r--r--net/xfrm/xfrm_output.c31
-rw-r--r--net/xfrm/xfrm_policy.c24
-rw-r--r--net/xfrm/xfrm_state.c24
-rw-r--r--net/xfrm/xfrm_user.c42
357 files changed, 16777 insertions, 9347 deletions
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 887e73d520e4..1997b7dd265e 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -65,7 +65,7 @@ static int hippi_header(struct sk_buff *skb, struct net_device *dev,
hip->le.src_addr_type = 2; /* 12 bit SC address */
memcpy(hip->le.src_switch_addr, dev->dev_addr + 3, 3);
- memset(&hip->le.reserved, 0, 16);
+ memset_startat(&hip->le, 0, reserved);
hip->snap.dsap = HIPPI_EXTENDED_SAP;
hip->snap.ssap = HIPPI_EXTENDED_SAP;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index abaa5d96ded2..788076b002b3 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -319,8 +319,8 @@ static void vlan_transfer_features(struct net_device *dev,
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
- vlandev->gso_max_size = dev->gso_max_size;
- vlandev->gso_max_segs = dev->gso_max_segs;
+ netif_set_gso_max_size(vlandev, dev->gso_max_size);
+ netif_set_gso_max_segs(vlandev, dev->gso_max_segs);
if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
vlandev->hard_header_len = dev->hard_header_len;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 59bc13b5f14f..acf8c791f320 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -476,10 +476,9 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
type = vhdr->h_vlan_encapsulated_proto;
- rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype)
- goto out_unlock;
+ goto out;
flush = 0;
@@ -501,8 +500,6 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
ipv6_gro_receive, inet_gro_receive,
head, skb);
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
@@ -516,14 +513,12 @@ static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
struct packet_offload *ptype;
int err = -ENOENT;
- rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype)
err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
ipv6_gro_complete, inet_gro_complete,
skb, nhoff + sizeof(*vhdr));
- rcu_read_unlock();
return err;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index a54535cbcf4c..26d031a43cc1 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -573,8 +573,8 @@ static int vlan_dev_init(struct net_device *dev)
NETIF_F_ALL_FCOE;
dev->features |= dev->hw_features | NETIF_F_LLTX;
- dev->gso_max_size = real_dev->gso_max_size;
- dev->gso_max_segs = real_dev->gso_max_segs;
+ netif_set_gso_max_size(dev, real_dev->gso_max_size);
+ netif_set_gso_max_segs(dev, real_dev->gso_max_segs);
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
@@ -616,7 +616,7 @@ static int vlan_dev_init(struct net_device *dev)
return -ENOMEM;
/* Get vlan's reference to real_dev */
- dev_hold(real_dev);
+ dev_hold_track(real_dev, &vlan->dev_tracker, GFP_KERNEL);
return 0;
}
@@ -848,7 +848,7 @@ static void vlan_dev_free(struct net_device *dev)
vlan->vlan_pcpu_stats = NULL;
/* Get rid of the vlan's reference to real_dev */
- dev_put(vlan->real_dev);
+ dev_put_track(vlan->real_dev, &vlan->dev_tracker);
}
void vlan_setup(struct net_device *dev)
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index ec87dea23719..08bf6c839e25 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -252,7 +252,7 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
stats = dev_get_stats(vlandev, &temp);
seq_printf(seq,
- "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
+ "%s VID: %d REORDER_HDR: %i dev->priv_flags: %llx\n",
vlandev->name, vlan->vlan_id,
(int)(vlan->flags & 1), vlandev->priv_flags);
diff --git a/net/Kconfig b/net/Kconfig
index 074472dfa94a..8a1f9d0287de 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -455,4 +455,9 @@ config ETHTOOL_NETLINK
netlink. It provides better extensibility and some new features,
e.g. notification messages.
+config NETDEV_ADDR_LIST_TEST
+ tristate "Unit tests for device address list"
+ default KUNIT_ALL_TESTS
+ depends on KUNIT
+
endif # if NET
diff --git a/net/Kconfig.debug b/net/Kconfig.debug
new file mode 100644
index 000000000000..2f50611df858
--- /dev/null
+++ b/net/Kconfig.debug
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config NET_DEV_REFCNT_TRACKER
+ bool "Enable net device refcount tracking"
+ depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
+ select REF_TRACKER
+ default n
+ help
+ Enable debugging feature to track device references.
+ This adds memory and cpu costs.
+
+config NET_NS_REFCNT_TRACKER
+ bool "Enable networking namespace refcount tracking"
+ depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
+ select REF_TRACKER
+ default n
+ help
+ Enable debugging feature to track netns references.
+ This adds memory and cpu costs.
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index cfca99e295b8..02f43f3e2c56 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -536,7 +536,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
ax25_cb *ax25;
struct net_device *dev;
char devname[IFNAMSIZ];
- unsigned long opt;
+ unsigned int opt;
int res = 0;
if (level != SOL_AX25)
@@ -568,7 +568,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_T1:
- if (opt < 1 || opt > ULONG_MAX / HZ) {
+ if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
@@ -577,7 +577,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_T2:
- if (opt < 1 || opt > ULONG_MAX / HZ) {
+ if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
@@ -593,7 +593,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_T3:
- if (opt < 1 || opt > ULONG_MAX / HZ) {
+ if (opt < 1 || opt > UINT_MAX / HZ) {
res = -EINVAL;
break;
}
@@ -601,7 +601,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
case AX25_IDLE:
- if (opt > ULONG_MAX / (60 * HZ)) {
+ if (opt > UINT_MAX / (60 * HZ)) {
res = -EINVAL;
break;
}
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index d0a043a51848..256fadb94df3 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -58,7 +58,7 @@ void ax25_dev_device_up(struct net_device *dev)
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
- dev_hold(dev);
+ dev_hold_track(dev, &ax25_dev->dev_tracker, GFP_ATOMIC);
ax25_dev->forward = NULL;
ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE;
@@ -114,7 +114,7 @@ void ax25_dev_device_down(struct net_device *dev)
ax25_dev_list = s->next;
spin_unlock_bh(&ax25_dev_lock);
dev->ax25_ptr = NULL;
- dev_put(dev);
+ dev_put_track(dev, &ax25_dev->dev_tracker);
kfree(ax25_dev);
return;
}
@@ -124,7 +124,7 @@ void ax25_dev_device_down(struct net_device *dev)
s->next = ax25_dev->next;
spin_unlock_bh(&ax25_dev_lock);
dev->ax25_ptr = NULL;
- dev_put(dev);
+ dev_put_track(dev, &ax25_dev->dev_tracker);
kfree(ax25_dev);
return;
}
@@ -188,7 +188,7 @@ void __exit ax25_dev_free(void)
ax25_dev = ax25_dev_list;
while (ax25_dev != NULL) {
s = ax25_dev;
- dev_put(ax25_dev->dev);
+ dev_put_track(ax25_dev->dev, &ax25_dev->dev_tracker);
ax25_dev = ax25_dev->next;
kfree(s);
}
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 058b8f2eef65..494d1ebecac2 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2021.3"
+#define BATADV_SOURCE_VERSION "2022.0"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 433901dcf0c3..f4004cf0ff6f 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1339,6 +1339,7 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: The multicast packet to check
* @orig: an originator to be set to forward the skb to
+ * @is_routable: stores whether the destination is routable
*
* Return: the forwarding mode as enum batadv_forw_mode and in case of
* BATADV_FORW_SINGLE set the orig to the single originator the skb
@@ -1346,17 +1347,16 @@ batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
*/
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **orig)
+ struct batadv_orig_node **orig, int *is_routable)
{
int ret, tt_count, ip_count, unsnoop_count, total_count;
bool is_unsnoopable = false;
unsigned int mcast_fanout;
struct ethhdr *ethhdr;
- int is_routable = 0;
int rtr_count = 0;
ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
- &is_routable);
+ is_routable);
if (ret == -ENOMEM)
return BATADV_FORW_NONE;
else if (ret < 0)
@@ -1369,7 +1369,7 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
unsnoop_count = !is_unsnoopable ? 0 :
atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
- rtr_count = batadv_mcast_forw_rtr_count(bat_priv, is_routable);
+ rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
total_count = tt_count + ip_count + unsnoop_count + rtr_count;
@@ -1689,6 +1689,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
* @bat_priv: the bat priv with all the soft interface information
* @skb: the multicast packet to transmit
* @vid: the vlan identifier
+ * @is_routable: stores whether the destination is routable
*
* Sends copies of a frame with multicast destination to any node that signaled
* interest in it, that is either via the translation table or the according
@@ -1701,7 +1702,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
* is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
*/
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid)
+ unsigned short vid, int is_routable)
{
int ret;
@@ -1717,12 +1718,16 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
return ret;
}
+ if (!is_routable)
+ goto skip_mc_router;
+
ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
if (ret != NET_XMIT_SUCCESS) {
kfree_skb(skb);
return ret;
}
+skip_mc_router:
consume_skb(skb);
return ret;
}
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 9fee5da08311..8aec818d0bf6 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -43,7 +43,8 @@ enum batadv_forw_mode {
enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **mcast_single_orig);
+ struct batadv_orig_node **mcast_single_orig,
+ int *is_routable);
int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct sk_buff *skb,
@@ -51,7 +52,7 @@ int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node);
int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid);
+ unsigned short vid, int is_routable);
void batadv_mcast_init(struct batadv_priv *bat_priv);
@@ -68,7 +69,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig_node);
static inline enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
- struct batadv_orig_node **mcast_single_orig)
+ struct batadv_orig_node **mcast_single_orig,
+ int *is_routable)
{
return BATADV_FORW_ALL;
}
@@ -85,7 +87,7 @@ batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
static inline int
batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
- unsigned short vid)
+ unsigned short vid, int is_routable)
{
kfree_skb(skb);
return NET_XMIT_DROP;
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 29276284d281..00875e1d8c44 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -1368,21 +1368,21 @@ static const struct genl_small_ops batadv_netlink_ops[] = {
{
.cmd = BATADV_CMD_TP_METER,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_tp_meter_start,
.internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_TP_METER_CANCEL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_tp_meter_cancel,
.internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_GET_ROUTING_ALGOS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_algo_dump,
},
{
@@ -1397,68 +1397,68 @@ static const struct genl_small_ops batadv_netlink_ops[] = {
{
.cmd = BATADV_CMD_GET_TRANSTABLE_LOCAL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_tt_local_dump,
},
{
.cmd = BATADV_CMD_GET_TRANSTABLE_GLOBAL,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_tt_global_dump,
},
{
.cmd = BATADV_CMD_GET_ORIGINATORS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_orig_dump,
},
{
.cmd = BATADV_CMD_GET_NEIGHBORS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_hardif_neigh_dump,
},
{
.cmd = BATADV_CMD_GET_GATEWAYS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_gw_dump,
},
{
.cmd = BATADV_CMD_GET_BLA_CLAIM,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_bla_claim_dump,
},
{
.cmd = BATADV_CMD_GET_BLA_BACKBONE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_bla_backbone_dump,
},
{
.cmd = BATADV_CMD_GET_DAT_CACHE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_dat_cache_dump,
},
{
.cmd = BATADV_CMD_GET_MCAST_FLAGS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.dumpit = batadv_mcast_flags_dump,
},
{
.cmd = BATADV_CMD_SET_MESH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_set_mesh,
.internal_flags = BATADV_FLAG_NEED_MESH,
},
{
.cmd = BATADV_CMD_SET_HARDIF,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_set_hardif,
.internal_flags = BATADV_FLAG_NEED_MESH |
BATADV_FLAG_NEED_HARDIF,
@@ -1474,7 +1474,7 @@ static const struct genl_small_ops batadv_netlink_ops[] = {
{
.cmd = BATADV_CMD_SET_VLAN,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .flags = GENL_ADMIN_PERM,
+ .flags = GENL_UNS_ADMIN_PERM,
.doit = batadv_netlink_set_vlan,
.internal_flags = BATADV_FLAG_NEED_MESH |
BATADV_FLAG_NEED_VLAN,
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 0a7f1d36a6a8..974d726fabb9 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -58,13 +58,9 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
*/
int __init batadv_nc_init(void)
{
- int ret;
-
/* Register our packet type */
- ret = batadv_recv_handler_register(BATADV_CODED,
- batadv_nc_recv_coded_packet);
-
- return ret;
+ return batadv_recv_handler_register(BATADV_CODED,
+ batadv_nc_recv_coded_packet);
}
/**
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 7ee09337fc40..2dbbe6c19609 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -198,6 +198,7 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
int gw_mode;
enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE;
struct batadv_orig_node *mcast_single_orig = NULL;
+ int mcast_is_routable = 0;
int network_offset = ETH_HLEN;
__be16 proto;
@@ -300,7 +301,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
send:
if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) {
forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
- &mcast_single_orig);
+ &mcast_single_orig,
+ &mcast_is_routable);
if (forw_mode == BATADV_FORW_NONE)
goto dropped;
@@ -359,7 +361,8 @@ send:
ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid,
mcast_single_orig);
} else if (forw_mode == BATADV_FORW_SOME) {
- ret = batadv_mcast_forw_send(bat_priv, skb, vid);
+ ret = batadv_mcast_forw_send(bat_priv, skb, vid,
+ mcast_is_routable);
} else {
if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
skb))
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 291770fc9551..a52bba8500e1 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -15,7 +15,7 @@ bluetooth_6lowpan-y := 6lowpan.o
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \
ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \
- eir.o
+ eir.o hci_sync.o
bluetooth-$(CONFIG_BT_BREDR) += sco.o
bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
diff --git a/net/bluetooth/aosp.c b/net/bluetooth/aosp.c
index a1b7762335a5..432ae3aac9e3 100644
--- a/net/bluetooth/aosp.c
+++ b/net/bluetooth/aosp.c
@@ -8,9 +8,43 @@
#include "aosp.h"
+/* Command complete parameters of LE_Get_Vendor_Capabilities_Command
+ * The parameters grow over time. The base version that declares the
+ * version_supported field is v0.95. Refer to
+ * https://cs.android.com/android/platform/superproject/+/master:system/
+ * bt/gd/hci/controller.cc;l=452?q=le_get_vendor_capabilities_handler
+ */
+struct aosp_rp_le_get_vendor_capa {
+ /* v0.95: 15 octets */
+ __u8 status;
+ __u8 max_advt_instances;
+ __u8 offloaded_resolution_of_private_address;
+ __le16 total_scan_results_storage;
+ __u8 max_irk_list_sz;
+ __u8 filtering_support;
+ __u8 max_filter;
+ __u8 activity_energy_info_support;
+ __le16 version_supported;
+ __le16 total_num_of_advt_tracked;
+ __u8 extended_scan_support;
+ __u8 debug_logging_supported;
+ /* v0.96: 16 octets */
+ __u8 le_address_generation_offloading_support;
+ /* v0.98: 21 octets */
+ __le32 a2dp_source_offload_capability_mask;
+ __u8 bluetooth_quality_report_support;
+ /* v1.00: 25 octets */
+ __le32 dynamic_audio_buffer_support;
+} __packed;
+
+#define VENDOR_CAPA_BASE_SIZE 15
+#define VENDOR_CAPA_0_98_SIZE 21
+
void aosp_do_open(struct hci_dev *hdev)
{
struct sk_buff *skb;
+ struct aosp_rp_le_get_vendor_capa *rp;
+ u16 version_supported;
if (!hdev->aosp_capable)
return;
@@ -20,9 +54,54 @@ void aosp_do_open(struct hci_dev *hdev)
/* LE Get Vendor Capabilities Command */
skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL,
HCI_CMD_TIMEOUT);
- if (IS_ERR(skb))
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)",
+ PTR_ERR(skb));
return;
+ }
+
+ /* A basic length check */
+ if (skb->len < VENDOR_CAPA_BASE_SIZE)
+ goto length_error;
+
+ rp = (struct aosp_rp_le_get_vendor_capa *)skb->data;
+
+ version_supported = le16_to_cpu(rp->version_supported);
+ /* AOSP displays the verion number like v0.98, v1.00, etc. */
+ bt_dev_info(hdev, "AOSP extensions version v%u.%02u",
+ version_supported >> 8, version_supported & 0xff);
+
+ /* Do not support very old versions. */
+ if (version_supported < 95) {
+ bt_dev_warn(hdev, "AOSP capabilities version %u too old",
+ version_supported);
+ goto done;
+ }
+
+ if (version_supported < 98) {
+ bt_dev_warn(hdev, "AOSP quality report is not supported");
+ goto done;
+ }
+
+ if (skb->len < VENDOR_CAPA_0_98_SIZE)
+ goto length_error;
+
+ /* The bluetooth_quality_report_support is defined at version
+ * v0.98. Refer to
+ * https://cs.android.com/android/platform/superproject/+/
+ * master:system/bt/gd/hci/controller.cc;l=477
+ */
+ if (rp->bluetooth_quality_report_support) {
+ hdev->aosp_quality_report = true;
+ bt_dev_info(hdev, "AOSP quality report is supported");
+ }
+
+ goto done;
+
+length_error:
+ bt_dev_err(hdev, "AOSP capabilities length %d too short", skb->len);
+done:
kfree_skb(skb);
}
@@ -33,3 +112,90 @@ void aosp_do_close(struct hci_dev *hdev)
bt_dev_dbg(hdev, "Cleanup of AOSP extension");
}
+
+/* BQR command */
+#define BQR_OPCODE hci_opcode_pack(0x3f, 0x015e)
+
+/* BQR report action */
+#define REPORT_ACTION_ADD 0x00
+#define REPORT_ACTION_DELETE 0x01
+#define REPORT_ACTION_CLEAR 0x02
+
+/* BQR event masks */
+#define QUALITY_MONITORING BIT(0)
+#define APPRAOCHING_LSTO BIT(1)
+#define A2DP_AUDIO_CHOPPY BIT(2)
+#define SCO_VOICE_CHOPPY BIT(3)
+
+#define DEFAULT_BQR_EVENT_MASK (QUALITY_MONITORING | APPRAOCHING_LSTO | \
+ A2DP_AUDIO_CHOPPY | SCO_VOICE_CHOPPY)
+
+/* Reporting at milliseconds so as not to stress the controller too much.
+ * Range: 0 ~ 65535 ms
+ */
+#define DEFALUT_REPORT_INTERVAL_MS 5000
+
+struct aosp_bqr_cp {
+ __u8 report_action;
+ __u32 event_mask;
+ __u16 min_report_interval;
+} __packed;
+
+static int enable_quality_report(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct aosp_bqr_cp cp;
+
+ cp.report_action = REPORT_ACTION_ADD;
+ cp.event_mask = DEFAULT_BQR_EVENT_MASK;
+ cp.min_report_interval = DEFALUT_REPORT_INTERVAL_MS;
+
+ skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Enabling Android BQR failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static int disable_quality_report(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ struct aosp_bqr_cp cp = { 0 };
+
+ cp.report_action = REPORT_ACTION_CLEAR;
+
+ skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Disabling Android BQR failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+
+bool aosp_has_quality_report(struct hci_dev *hdev)
+{
+ return hdev->aosp_quality_report;
+}
+
+int aosp_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ if (!aosp_has_quality_report(hdev))
+ return -EOPNOTSUPP;
+
+ bt_dev_dbg(hdev, "quality report enable %d", enable);
+
+ /* Enable or disable the quality report feature. */
+ if (enable)
+ return enable_quality_report(hdev);
+ else
+ return disable_quality_report(hdev);
+}
diff --git a/net/bluetooth/aosp.h b/net/bluetooth/aosp.h
index 328fc6d39f70..2fd8886d51b2 100644
--- a/net/bluetooth/aosp.h
+++ b/net/bluetooth/aosp.h
@@ -8,9 +8,22 @@
void aosp_do_open(struct hci_dev *hdev);
void aosp_do_close(struct hci_dev *hdev);
+bool aosp_has_quality_report(struct hci_dev *hdev);
+int aosp_set_quality_report(struct hci_dev *hdev, bool enable);
+
#else
static inline void aosp_do_open(struct hci_dev *hdev) {}
static inline void aosp_do_close(struct hci_dev *hdev) {}
+static inline bool aosp_has_quality_report(struct hci_dev *hdev)
+{
+ return false;
+}
+
+static inline int aosp_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ return -EOPNOTSUPP;
+}
+
#endif
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index d515571b2afb..57d509d77cb4 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,6 +24,7 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/file.h>
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 0a2d78e811cf..83eb84e8e688 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -501,9 +501,7 @@ static int __init cmtp_init(void)
{
BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION);
- cmtp_init_sockets();
-
- return 0;
+ return cmtp_init_sockets();
}
static void __exit cmtp_exit(void)
diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h
index 724662f8f8b1..05e2e917fc25 100644
--- a/net/bluetooth/eir.h
+++ b/net/bluetooth/eir.h
@@ -5,6 +5,8 @@
* Copyright (C) 2021 Intel Corporation
*/
+#include <asm/unaligned.h>
+
void eir_create(struct hci_dev *hdev, u8 *data);
u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr);
diff --git a/net/bluetooth/hci_codec.c b/net/bluetooth/hci_codec.c
index f0421d0edaa3..38201532f58e 100644
--- a/net/bluetooth/hci_codec.c
+++ b/net/bluetooth/hci_codec.c
@@ -25,9 +25,11 @@ static int hci_codec_list_add(struct list_head *list,
}
entry->transport = sent->transport;
entry->len = len;
- entry->num_caps = rp->num_caps;
- if (rp->num_caps)
+ entry->num_caps = 0;
+ if (rp) {
+ entry->num_caps = rp->num_caps;
memcpy(entry->caps, caps, len);
+ }
list_add(&entry->list, list);
return 0;
@@ -58,6 +60,18 @@ static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport,
__u32 len;
cmd->transport = i;
+
+ /* If Read_Codec_Capabilities command is not supported
+ * then just add codec to the list without caps
+ */
+ if (!(hdev->commands[45] & 0x08)) {
+ hci_dev_lock(hdev);
+ hci_codec_list_add(&hdev->local_codecs, cmd,
+ NULL, NULL, 0);
+ hci_dev_unlock(hdev);
+ continue;
+ }
+
skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS,
sizeof(*cmd), cmd,
HCI_CMD_TIMEOUT);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index bd669c95b9a7..04ebe901e86f 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -108,7 +108,7 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
break;
}
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
}
static void hci_conn_cleanup(struct hci_conn *conn)
@@ -900,288 +900,56 @@ void hci_le_conn_failed(struct hci_conn *conn, u8 status)
hci_conn_del(conn);
- /* The suspend notifier is waiting for all devices to disconnect and an
- * LE connect cancel will result in an hci_le_conn_failed. Once the last
- * connection is deleted, we should also wake the suspend queue to
- * complete suspend operations.
- */
- if (list_empty(&hdev->conn_hash.list) &&
- test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
- wake_up(&hdev->suspend_wait_q);
- }
-
/* Since we may have temporarily stopped the background scanning in
* favor of connection establishment, we should restart it.
*/
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
- /* Re-enable advertising in case this was a failed connection
+ /* Enable advertising in case this was a failed connection
* attempt as a peripheral.
*/
- hci_req_reenable_advertising(hdev);
+ hci_enable_advertising(hdev);
}
-static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
{
- struct hci_conn *conn;
+ struct hci_conn *conn = data;
hci_dev_lock(hdev);
- conn = hci_lookup_le_connect(hdev);
-
- if (hdev->adv_instance_cnt)
- hci_req_resume_adv_instances(hdev);
-
- if (!status) {
+ if (!err) {
hci_connect_le_scan_cleanup(conn);
goto done;
}
- bt_dev_err(hdev, "request failed to create LE connection: "
- "status 0x%2.2x", status);
+ bt_dev_err(hdev, "request failed to create LE connection: err %d", err);
if (!conn)
goto done;
- hci_le_conn_failed(conn, status);
+ hci_le_conn_failed(conn, err);
done:
hci_dev_unlock(hdev);
}
-static bool conn_use_rpa(struct hci_conn *conn)
+static int hci_connect_le_sync(struct hci_dev *hdev, void *data)
{
- struct hci_dev *hdev = conn->hdev;
-
- return hci_dev_test_flag(hdev, HCI_PRIVACY);
-}
-
-static void set_ext_conn_params(struct hci_conn *conn,
- struct hci_cp_le_ext_conn_param *p)
-{
- struct hci_dev *hdev = conn->hdev;
+ struct hci_conn *conn = data;
- memset(p, 0, sizeof(*p));
+ bt_dev_dbg(hdev, "conn %p", conn);
- p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
- p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
- p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
- p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
- p->conn_latency = cpu_to_le16(conn->le_conn_latency);
- p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
- p->min_ce_len = cpu_to_le16(0x0000);
- p->max_ce_len = cpu_to_le16(0x0000);
-}
-
-static void hci_req_add_le_create_conn(struct hci_request *req,
- struct hci_conn *conn,
- bdaddr_t *direct_rpa)
-{
- struct hci_dev *hdev = conn->hdev;
- u8 own_addr_type;
-
- /* If direct address was provided we use it instead of current
- * address.
- */
- if (direct_rpa) {
- if (bacmp(&req->hdev->random_addr, direct_rpa))
- hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
- direct_rpa);
-
- /* direct address is always RPA */
- own_addr_type = ADDR_LE_DEV_RANDOM;
- } else {
- /* Update random address, but set require_privacy to false so
- * that we never connect with an non-resolvable address.
- */
- if (hci_update_random_address(req, false, conn_use_rpa(conn),
- &own_addr_type))
- return;
- }
-
- if (use_ext_conn(hdev)) {
- struct hci_cp_le_ext_create_conn *cp;
- struct hci_cp_le_ext_conn_param *p;
- u8 data[sizeof(*cp) + sizeof(*p) * 3];
- u32 plen;
-
- cp = (void *) data;
- p = (void *) cp->data;
-
- memset(cp, 0, sizeof(*cp));
-
- bacpy(&cp->peer_addr, &conn->dst);
- cp->peer_addr_type = conn->dst_type;
- cp->own_addr_type = own_addr_type;
-
- plen = sizeof(*cp);
-
- if (scan_1m(hdev)) {
- cp->phys |= LE_SCAN_PHY_1M;
- set_ext_conn_params(conn, p);
-
- p++;
- plen += sizeof(*p);
- }
-
- if (scan_2m(hdev)) {
- cp->phys |= LE_SCAN_PHY_2M;
- set_ext_conn_params(conn, p);
-
- p++;
- plen += sizeof(*p);
- }
-
- if (scan_coded(hdev)) {
- cp->phys |= LE_SCAN_PHY_CODED;
- set_ext_conn_params(conn, p);
-
- plen += sizeof(*p);
- }
-
- hci_req_add(req, HCI_OP_LE_EXT_CREATE_CONN, plen, data);
-
- } else {
- struct hci_cp_le_create_conn cp;
-
- memset(&cp, 0, sizeof(cp));
-
- cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
- cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
-
- bacpy(&cp.peer_addr, &conn->dst);
- cp.peer_addr_type = conn->dst_type;
- cp.own_address_type = own_addr_type;
- cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
- cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
- cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
- cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
- cp.min_ce_len = cpu_to_le16(0x0000);
- cp.max_ce_len = cpu_to_le16(0x0000);
-
- hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
- }
-
- conn->state = BT_CONNECT;
- clear_bit(HCI_CONN_SCANNING, &conn->flags);
-}
-
-static void hci_req_directed_advertising(struct hci_request *req,
- struct hci_conn *conn)
-{
- struct hci_dev *hdev = req->hdev;
- u8 own_addr_type;
- u8 enable;
-
- if (ext_adv_capable(hdev)) {
- struct hci_cp_le_set_ext_adv_params cp;
- bdaddr_t random_addr;
-
- /* Set require_privacy to false so that the remote device has a
- * chance of identifying us.
- */
- if (hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
- &own_addr_type, &random_addr) < 0)
- return;
-
- memset(&cp, 0, sizeof(cp));
-
- cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
- cp.own_addr_type = own_addr_type;
- cp.channel_map = hdev->le_adv_channel_map;
- cp.tx_power = HCI_TX_POWER_INVALID;
- cp.primary_phy = HCI_ADV_PHY_1M;
- cp.secondary_phy = HCI_ADV_PHY_1M;
- cp.handle = 0; /* Use instance 0 for directed adv */
- cp.own_addr_type = own_addr_type;
- cp.peer_addr_type = conn->dst_type;
- bacpy(&cp.peer_addr, &conn->dst);
-
- /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
- * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
- * does not supports advertising data when the advertising set already
- * contains some, the controller shall return erroc code 'Invalid
- * HCI Command Parameters(0x12).
- * So it is required to remove adv set for handle 0x00. since we use
- * instance 0 for directed adv.
- */
- __hci_req_remove_ext_adv_instance(req, cp.handle);
-
- hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
-
- if (own_addr_type == ADDR_LE_DEV_RANDOM &&
- bacmp(&random_addr, BDADDR_ANY) &&
- bacmp(&random_addr, &hdev->random_addr)) {
- struct hci_cp_le_set_adv_set_rand_addr cp;
-
- memset(&cp, 0, sizeof(cp));
-
- cp.handle = 0;
- bacpy(&cp.bdaddr, &random_addr);
-
- hci_req_add(req,
- HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
- sizeof(cp), &cp);
- }
-
- __hci_req_enable_ext_advertising(req, 0x00);
- } else {
- struct hci_cp_le_set_adv_param cp;
-
- /* Clear the HCI_LE_ADV bit temporarily so that the
- * hci_update_random_address knows that it's safe to go ahead
- * and write a new random address. The flag will be set back on
- * as soon as the SET_ADV_ENABLE HCI command completes.
- */
- hci_dev_clear_flag(hdev, HCI_LE_ADV);
-
- /* Set require_privacy to false so that the remote device has a
- * chance of identifying us.
- */
- if (hci_update_random_address(req, false, conn_use_rpa(conn),
- &own_addr_type) < 0)
- return;
-
- memset(&cp, 0, sizeof(cp));
-
- /* Some controllers might reject command if intervals are not
- * within range for undirected advertising.
- * BCM20702A0 is known to be affected by this.
- */
- cp.min_interval = cpu_to_le16(0x0020);
- cp.max_interval = cpu_to_le16(0x0020);
-
- cp.type = LE_ADV_DIRECT_IND;
- cp.own_address_type = own_addr_type;
- cp.direct_addr_type = conn->dst_type;
- bacpy(&cp.direct_addr, &conn->dst);
- cp.channel_map = hdev->le_adv_channel_map;
-
- hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
-
- enable = 0x01;
- hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
- &enable);
- }
-
- conn->state = BT_CONNECT;
+ return hci_le_create_conn_sync(hdev, conn);
}
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, bool dst_resolved, u8 sec_level,
- u16 conn_timeout, u8 role, bdaddr_t *direct_rpa)
+ u16 conn_timeout, u8 role)
{
- struct hci_conn_params *params;
struct hci_conn *conn;
struct smp_irk *irk;
- struct hci_request req;
int err;
- /* This ensures that during disable le_scan address resolution
- * will not be disabled if it is followed by le_create_conn
- */
- bool rpa_le_conn = true;
-
/* Let's make sure that le is enabled.*/
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
if (lmp_le_capable(hdev))
@@ -1240,68 +1008,13 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
conn->sec_level = BT_SECURITY_LOW;
conn->conn_timeout = conn_timeout;
- hci_req_init(&req, hdev);
-
- /* Disable advertising if we're active. For central role
- * connections most controllers will refuse to connect if
- * advertising is enabled, and for peripheral role connections we
- * anyway have to disable it in order to start directed
- * advertising. Any registered advertisements will be
- * re-enabled after the connection attempt is finished.
- */
- if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- __hci_req_pause_adv_instances(&req);
-
- /* If requested to connect as peripheral use directed advertising */
- if (conn->role == HCI_ROLE_SLAVE) {
- /* If we're active scanning most controllers are unable
- * to initiate advertising. Simply reject the attempt.
- */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
- hdev->le_scan_type == LE_SCAN_ACTIVE) {
- hci_req_purge(&req);
- hci_conn_del(conn);
- return ERR_PTR(-EBUSY);
- }
-
- hci_req_directed_advertising(&req, conn);
- goto create_conn;
- }
-
- params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
- if (params) {
- conn->le_conn_min_interval = params->conn_min_interval;
- conn->le_conn_max_interval = params->conn_max_interval;
- conn->le_conn_latency = params->conn_latency;
- conn->le_supv_timeout = params->supervision_timeout;
- } else {
- conn->le_conn_min_interval = hdev->le_conn_min_interval;
- conn->le_conn_max_interval = hdev->le_conn_max_interval;
- conn->le_conn_latency = hdev->le_conn_latency;
- conn->le_supv_timeout = hdev->le_supv_timeout;
- }
-
- /* If controller is scanning, we stop it since some controllers are
- * not able to scan and connect at the same time. Also set the
- * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
- * handler for scan disabling knows to set the correct discovery
- * state.
- */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
- hci_req_add_le_scan_disable(&req, rpa_le_conn);
- hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
- }
-
- hci_req_add_le_create_conn(&req, conn, direct_rpa);
+ conn->state = BT_CONNECT;
+ clear_bit(HCI_CONN_SCANNING, &conn->flags);
-create_conn:
- err = hci_req_run(&req, create_le_conn_complete);
+ err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn,
+ create_le_conn_complete);
if (err) {
hci_conn_del(conn);
-
- if (hdev->adv_instance_cnt)
- hci_req_resume_adv_instances(hdev);
-
return ERR_PTR(err);
}
@@ -1411,7 +1124,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
conn->conn_timeout = conn_timeout;
conn->conn_reason = conn_reason;
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
done:
hci_conn_hold(conn);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 8d33aa64846b..2b7bd3655b07 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -62,824 +62,6 @@ DEFINE_MUTEX(hci_cb_list_lock);
/* HCI ID Numbering */
static DEFINE_IDA(hci_index_ida);
-static int hci_reset_req(struct hci_request *req, unsigned long opt)
-{
- BT_DBG("%s %ld", req->hdev->name, opt);
-
- /* Reset device */
- set_bit(HCI_RESET, &req->hdev->flags);
- hci_req_add(req, HCI_OP_RESET, 0, NULL);
- return 0;
-}
-
-static void bredr_init(struct hci_request *req)
-{
- req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
-
- /* Read Local Supported Features */
- hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
- /* Read Local Version */
- hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
- /* Read BD Address */
- hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
-}
-
-static void amp_init1(struct hci_request *req)
-{
- req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
-
- /* Read Local Version */
- hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
- /* Read Local Supported Commands */
- hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
-
- /* Read Local AMP Info */
- hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
-
- /* Read Data Blk size */
- hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
-
- /* Read Flow Control Mode */
- hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
-
- /* Read Location Data */
- hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
-}
-
-static int amp_init2(struct hci_request *req)
-{
- /* Read Local Supported Features. Not all AMP controllers
- * support this so it's placed conditionally in the second
- * stage init.
- */
- if (req->hdev->commands[14] & 0x20)
- hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
-
- return 0;
-}
-
-static int hci_init1_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- BT_DBG("%s %ld", hdev->name, opt);
-
- /* Reset */
- if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
- hci_reset_req(req, 0);
-
- switch (hdev->dev_type) {
- case HCI_PRIMARY:
- bredr_init(req);
- break;
- case HCI_AMP:
- amp_init1(req);
- break;
- default:
- bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
- break;
- }
-
- return 0;
-}
-
-static void bredr_setup(struct hci_request *req)
-{
- __le16 param;
- __u8 flt_type;
-
- /* Read Buffer Size (ACL mtu, max pkt, etc.) */
- hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
-
- /* Read Class of Device */
- hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
-
- /* Read Local Name */
- hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
-
- /* Read Voice Setting */
- hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
-
- /* Read Number of Supported IAC */
- hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
-
- /* Read Current IAC LAP */
- hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
-
- /* Clear Event Filters */
- flt_type = HCI_FLT_CLEAR_ALL;
- hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
-
- /* Connection accept timeout ~20 secs */
- param = cpu_to_le16(0x7d00);
- hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
-}
-
-static void le_setup(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* Read LE Buffer Size */
- hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
-
- /* Read LE Local Supported Features */
- hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
-
- /* Read LE Supported States */
- hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
-
- /* LE-only controllers have LE implicitly enabled */
- if (!lmp_bredr_capable(hdev))
- hci_dev_set_flag(hdev, HCI_LE_ENABLED);
-}
-
-static void hci_setup_event_mask(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* The second byte is 0xff instead of 0x9f (two reserved bits
- * disabled) since a Broadcom 1.2 dongle doesn't respond to the
- * command otherwise.
- */
- u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
-
- /* CSR 1.1 dongles does not accept any bitfield so don't try to set
- * any event mask for pre 1.2 devices.
- */
- if (hdev->hci_ver < BLUETOOTH_VER_1_2)
- return;
-
- if (lmp_bredr_capable(hdev)) {
- events[4] |= 0x01; /* Flow Specification Complete */
- } else {
- /* Use a different default for LE-only devices */
- memset(events, 0, sizeof(events));
- events[1] |= 0x20; /* Command Complete */
- events[1] |= 0x40; /* Command Status */
- events[1] |= 0x80; /* Hardware Error */
-
- /* If the controller supports the Disconnect command, enable
- * the corresponding event. In addition enable packet flow
- * control related events.
- */
- if (hdev->commands[0] & 0x20) {
- events[0] |= 0x10; /* Disconnection Complete */
- events[2] |= 0x04; /* Number of Completed Packets */
- events[3] |= 0x02; /* Data Buffer Overflow */
- }
-
- /* If the controller supports the Read Remote Version
- * Information command, enable the corresponding event.
- */
- if (hdev->commands[2] & 0x80)
- events[1] |= 0x08; /* Read Remote Version Information
- * Complete
- */
-
- if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
- events[0] |= 0x80; /* Encryption Change */
- events[5] |= 0x80; /* Encryption Key Refresh Complete */
- }
- }
-
- if (lmp_inq_rssi_capable(hdev) ||
- test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
- events[4] |= 0x02; /* Inquiry Result with RSSI */
-
- if (lmp_ext_feat_capable(hdev))
- events[4] |= 0x04; /* Read Remote Extended Features Complete */
-
- if (lmp_esco_capable(hdev)) {
- events[5] |= 0x08; /* Synchronous Connection Complete */
- events[5] |= 0x10; /* Synchronous Connection Changed */
- }
-
- if (lmp_sniffsubr_capable(hdev))
- events[5] |= 0x20; /* Sniff Subrating */
-
- if (lmp_pause_enc_capable(hdev))
- events[5] |= 0x80; /* Encryption Key Refresh Complete */
-
- if (lmp_ext_inq_capable(hdev))
- events[5] |= 0x40; /* Extended Inquiry Result */
-
- if (lmp_no_flush_capable(hdev))
- events[7] |= 0x01; /* Enhanced Flush Complete */
-
- if (lmp_lsto_capable(hdev))
- events[6] |= 0x80; /* Link Supervision Timeout Changed */
-
- if (lmp_ssp_capable(hdev)) {
- events[6] |= 0x01; /* IO Capability Request */
- events[6] |= 0x02; /* IO Capability Response */
- events[6] |= 0x04; /* User Confirmation Request */
- events[6] |= 0x08; /* User Passkey Request */
- events[6] |= 0x10; /* Remote OOB Data Request */
- events[6] |= 0x20; /* Simple Pairing Complete */
- events[7] |= 0x04; /* User Passkey Notification */
- events[7] |= 0x08; /* Keypress Notification */
- events[7] |= 0x10; /* Remote Host Supported
- * Features Notification
- */
- }
-
- if (lmp_le_capable(hdev))
- events[7] |= 0x20; /* LE Meta-Event */
-
- hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
-}
-
-static int hci_init2_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- if (hdev->dev_type == HCI_AMP)
- return amp_init2(req);
-
- if (lmp_bredr_capable(hdev))
- bredr_setup(req);
- else
- hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
-
- if (lmp_le_capable(hdev))
- le_setup(req);
-
- /* All Bluetooth 1.2 and later controllers should support the
- * HCI command for reading the local supported commands.
- *
- * Unfortunately some controllers indicate Bluetooth 1.2 support,
- * but do not have support for this command. If that is the case,
- * the driver can quirk the behavior and skip reading the local
- * supported commands.
- */
- if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
- !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
- hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
-
- if (lmp_ssp_capable(hdev)) {
- /* When SSP is available, then the host features page
- * should also be available as well. However some
- * controllers list the max_page as 0 as long as SSP
- * has not been enabled. To achieve proper debugging
- * output, force the minimum max_page to 1 at least.
- */
- hdev->max_page = 0x01;
-
- if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
- u8 mode = 0x01;
-
- hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
- sizeof(mode), &mode);
- } else {
- struct hci_cp_write_eir cp;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
- memset(&cp, 0, sizeof(cp));
-
- hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
- }
- }
-
- if (lmp_inq_rssi_capable(hdev) ||
- test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
- u8 mode;
-
- /* If Extended Inquiry Result events are supported, then
- * they are clearly preferred over Inquiry Result with RSSI
- * events.
- */
- mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
-
- hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
- }
-
- if (lmp_inq_tx_pwr_capable(hdev))
- hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
-
- if (lmp_ext_feat_capable(hdev)) {
- struct hci_cp_read_local_ext_features cp;
-
- cp.page = 0x01;
- hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
- sizeof(cp), &cp);
- }
-
- if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
- u8 enable = 1;
- hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
- &enable);
- }
-
- return 0;
-}
-
-static void hci_setup_link_policy(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_def_link_policy cp;
- u16 link_policy = 0;
-
- if (lmp_rswitch_capable(hdev))
- link_policy |= HCI_LP_RSWITCH;
- if (lmp_hold_capable(hdev))
- link_policy |= HCI_LP_HOLD;
- if (lmp_sniff_capable(hdev))
- link_policy |= HCI_LP_SNIFF;
- if (lmp_park_capable(hdev))
- link_policy |= HCI_LP_PARK;
-
- cp.policy = cpu_to_le16(link_policy);
- hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
-}
-
-static void hci_set_le_support(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_le_host_supported cp;
-
- /* LE-only devices do not support explicit enablement */
- if (!lmp_bredr_capable(hdev))
- return;
-
- memset(&cp, 0, sizeof(cp));
-
- if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
- cp.le = 0x01;
- cp.simul = 0x00;
- }
-
- if (cp.le != lmp_host_le_capable(hdev))
- hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
- &cp);
-}
-
-static void hci_set_event_mask_page_2(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
- bool changed = false;
-
- /* If Connectionless Peripheral Broadcast central role is supported
- * enable all necessary events for it.
- */
- if (lmp_cpb_central_capable(hdev)) {
- events[1] |= 0x40; /* Triggered Clock Capture */
- events[1] |= 0x80; /* Synchronization Train Complete */
- events[2] |= 0x10; /* Peripheral Page Response Timeout */
- events[2] |= 0x20; /* CPB Channel Map Change */
- changed = true;
- }
-
- /* If Connectionless Peripheral Broadcast peripheral role is supported
- * enable all necessary events for it.
- */
- if (lmp_cpb_peripheral_capable(hdev)) {
- events[2] |= 0x01; /* Synchronization Train Received */
- events[2] |= 0x02; /* CPB Receive */
- events[2] |= 0x04; /* CPB Timeout */
- events[2] |= 0x08; /* Truncated Page Complete */
- changed = true;
- }
-
- /* Enable Authenticated Payload Timeout Expired event if supported */
- if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
- events[2] |= 0x80;
- changed = true;
- }
-
- /* Some Broadcom based controllers indicate support for Set Event
- * Mask Page 2 command, but then actually do not support it. Since
- * the default value is all bits set to zero, the command is only
- * required if the event mask has to be changed. In case no change
- * to the event mask is needed, skip this command.
- */
- if (changed)
- hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
- sizeof(events), events);
-}
-
-static int hci_init3_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
- u8 p;
-
- hci_setup_event_mask(req);
-
- if (hdev->commands[6] & 0x20 &&
- !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
- struct hci_cp_read_stored_link_key cp;
-
- bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.read_all = 0x01;
- hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
- }
-
- if (hdev->commands[5] & 0x10)
- hci_setup_link_policy(req);
-
- if (hdev->commands[8] & 0x01)
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
-
- if (hdev->commands[18] & 0x04 &&
- !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
- hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
-
- /* Some older Broadcom based Bluetooth 1.2 controllers do not
- * support the Read Page Scan Type command. Check support for
- * this command in the bit mask of supported commands.
- */
- if (hdev->commands[13] & 0x01)
- hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
-
- if (lmp_le_capable(hdev)) {
- u8 events[8];
-
- memset(events, 0, sizeof(events));
-
- if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
- events[0] |= 0x10; /* LE Long Term Key Request */
-
- /* If controller supports the Connection Parameters Request
- * Link Layer Procedure, enable the corresponding event.
- */
- if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
- events[0] |= 0x20; /* LE Remote Connection
- * Parameter Request
- */
-
- /* If the controller supports the Data Length Extension
- * feature, enable the corresponding event.
- */
- if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
- events[0] |= 0x40; /* LE Data Length Change */
-
- /* If the controller supports LL Privacy feature, enable
- * the corresponding event.
- */
- if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
- events[1] |= 0x02; /* LE Enhanced Connection
- * Complete
- */
-
- /* If the controller supports Extended Scanner Filter
- * Policies, enable the corresponding event.
- */
- if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
- events[1] |= 0x04; /* LE Direct Advertising
- * Report
- */
-
- /* If the controller supports Channel Selection Algorithm #2
- * feature, enable the corresponding event.
- */
- if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
- events[2] |= 0x08; /* LE Channel Selection
- * Algorithm
- */
-
- /* If the controller supports the LE Set Scan Enable command,
- * enable the corresponding advertising report event.
- */
- if (hdev->commands[26] & 0x08)
- events[0] |= 0x02; /* LE Advertising Report */
-
- /* If the controller supports the LE Create Connection
- * command, enable the corresponding event.
- */
- if (hdev->commands[26] & 0x10)
- events[0] |= 0x01; /* LE Connection Complete */
-
- /* If the controller supports the LE Connection Update
- * command, enable the corresponding event.
- */
- if (hdev->commands[27] & 0x04)
- events[0] |= 0x04; /* LE Connection Update
- * Complete
- */
-
- /* If the controller supports the LE Read Remote Used Features
- * command, enable the corresponding event.
- */
- if (hdev->commands[27] & 0x20)
- events[0] |= 0x08; /* LE Read Remote Used
- * Features Complete
- */
-
- /* If the controller supports the LE Read Local P-256
- * Public Key command, enable the corresponding event.
- */
- if (hdev->commands[34] & 0x02)
- events[0] |= 0x80; /* LE Read Local P-256
- * Public Key Complete
- */
-
- /* If the controller supports the LE Generate DHKey
- * command, enable the corresponding event.
- */
- if (hdev->commands[34] & 0x04)
- events[1] |= 0x01; /* LE Generate DHKey Complete */
-
- /* If the controller supports the LE Set Default PHY or
- * LE Set PHY commands, enable the corresponding event.
- */
- if (hdev->commands[35] & (0x20 | 0x40))
- events[1] |= 0x08; /* LE PHY Update Complete */
-
- /* If the controller supports LE Set Extended Scan Parameters
- * and LE Set Extended Scan Enable commands, enable the
- * corresponding event.
- */
- if (use_ext_scan(hdev))
- events[1] |= 0x10; /* LE Extended Advertising
- * Report
- */
-
- /* If the controller supports the LE Extended Advertising
- * command, enable the corresponding event.
- */
- if (ext_adv_capable(hdev))
- events[2] |= 0x02; /* LE Advertising Set
- * Terminated
- */
-
- hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
- events);
-
- /* Read LE Advertising Channel TX Power */
- if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
- /* HCI TS spec forbids mixing of legacy and extended
- * advertising commands wherein READ_ADV_TX_POWER is
- * also included. So do not call it if extended adv
- * is supported otherwise controller will return
- * COMMAND_DISALLOWED for extended commands.
- */
- hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
- }
-
- if (hdev->commands[38] & 0x80) {
- /* Read LE Min/Max Tx Power*/
- hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
- 0, NULL);
- }
-
- if (hdev->commands[26] & 0x40) {
- /* Read LE Accept List Size */
- hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
- 0, NULL);
- }
-
- if (hdev->commands[26] & 0x80) {
- /* Clear LE Accept List */
- hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
- }
-
- if (hdev->commands[34] & 0x40) {
- /* Read LE Resolving List Size */
- hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
- 0, NULL);
- }
-
- if (hdev->commands[34] & 0x20) {
- /* Clear LE Resolving List */
- hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
- }
-
- if (hdev->commands[35] & 0x04) {
- __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
-
- /* Set RPA timeout */
- hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
- &rpa_timeout);
- }
-
- if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
- /* Read LE Maximum Data Length */
- hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
-
- /* Read LE Suggested Default Data Length */
- hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
- }
-
- if (ext_adv_capable(hdev)) {
- /* Read LE Number of Supported Advertising Sets */
- hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
- 0, NULL);
- }
-
- hci_set_le_support(req);
- }
-
- /* Read features beyond page 1 if available */
- for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
- struct hci_cp_read_local_ext_features cp;
-
- cp.page = p;
- hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
- sizeof(cp), &cp);
- }
-
- return 0;
-}
-
-static int hci_init4_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- /* Some Broadcom based Bluetooth controllers do not support the
- * Delete Stored Link Key command. They are clearly indicating its
- * absence in the bit mask of supported commands.
- *
- * Check the supported commands and only if the command is marked
- * as supported send it. If not supported assume that the controller
- * does not have actual support for stored link keys which makes this
- * command redundant anyway.
- *
- * Some controllers indicate that they support handling deleting
- * stored link keys, but they don't. The quirk lets a driver
- * just disable this command.
- */
- if (hdev->commands[6] & 0x80 &&
- !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
- struct hci_cp_delete_stored_link_key cp;
-
- bacpy(&cp.bdaddr, BDADDR_ANY);
- cp.delete_all = 0x01;
- hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
- sizeof(cp), &cp);
- }
-
- /* Set event mask page 2 if the HCI command for it is supported */
- if (hdev->commands[22] & 0x04)
- hci_set_event_mask_page_2(req);
-
- /* Read local pairing options if the HCI command is supported */
- if (hdev->commands[41] & 0x08)
- hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
-
- /* Get MWS transport configuration if the HCI command is supported */
- if (hdev->commands[30] & 0x08)
- hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
-
- /* Check for Synchronization Train support */
- if (lmp_sync_train_capable(hdev))
- hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
-
- /* Enable Secure Connections if supported and configured */
- if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
- bredr_sc_enabled(hdev)) {
- u8 support = 0x01;
-
- hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
- sizeof(support), &support);
- }
-
- /* Set erroneous data reporting if supported to the wideband speech
- * setting value
- */
- if (hdev->commands[18] & 0x08 &&
- !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
- bool enabled = hci_dev_test_flag(hdev,
- HCI_WIDEBAND_SPEECH_ENABLED);
-
- if (enabled !=
- (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
- struct hci_cp_write_def_err_data_reporting cp;
-
- cp.err_data_reporting = enabled ?
- ERR_DATA_REPORTING_ENABLED :
- ERR_DATA_REPORTING_DISABLED;
-
- hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
- sizeof(cp), &cp);
- }
- }
-
- /* Set Suggested Default Data Length to maximum if supported */
- if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
- struct hci_cp_le_write_def_data_len cp;
-
- cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
- cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
- hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
- }
-
- /* Set Default PHY parameters if command is supported */
- if (hdev->commands[35] & 0x20) {
- struct hci_cp_le_set_default_phy cp;
-
- cp.all_phys = 0x00;
- cp.tx_phys = hdev->le_tx_def_phys;
- cp.rx_phys = hdev->le_rx_def_phys;
-
- hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
- }
-
- return 0;
-}
-
-static int __hci_init(struct hci_dev *hdev)
-{
- int err;
-
- err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- if (hci_dev_test_flag(hdev, HCI_SETUP))
- hci_debugfs_create_basic(hdev);
-
- err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
- * BR/EDR/LE type controllers. AMP controllers only need the
- * first two stages of init.
- */
- if (hdev->dev_type != HCI_PRIMARY)
- return 0;
-
- err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- /* Read local codec list if the HCI command is supported */
- if (hdev->commands[45] & 0x04)
- hci_read_supported_codecs_v2(hdev);
- else if (hdev->commands[29] & 0x20)
- hci_read_supported_codecs(hdev);
-
- /* This function is only called when the controller is actually in
- * configured state. When the controller is marked as unconfigured,
- * this initialization procedure is not run.
- *
- * It means that it is possible that a controller runs through its
- * setup phase and then discovers missing settings. If that is the
- * case, then this function will not be called. It then will only
- * be called during the config phase.
- *
- * So only when in setup phase or config phase, create the debugfs
- * entries and register the SMP channels.
- */
- if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG))
- return 0;
-
- hci_debugfs_create_common(hdev);
-
- if (lmp_bredr_capable(hdev))
- hci_debugfs_create_bredr(hdev);
-
- if (lmp_le_capable(hdev))
- hci_debugfs_create_le(hdev);
-
- return 0;
-}
-
-static int hci_init0_req(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- BT_DBG("%s %ld", hdev->name, opt);
-
- /* Reset */
- if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
- hci_reset_req(req, 0);
-
- /* Read Local Version */
- hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
-
- /* Read BD Address */
- if (hdev->set_bdaddr)
- hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
-
- return 0;
-}
-
-static int __hci_unconf_init(struct hci_dev *hdev)
-{
- int err;
-
- if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
- return 0;
-
- err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
- if (err < 0)
- return err;
-
- if (hci_dev_test_flag(hdev, HCI_SETUP))
- hci_debugfs_create_basic(hdev);
-
- return 0;
-}
-
static int hci_scan_req(struct hci_request *req, unsigned long opt)
{
__u8 scan = opt;
@@ -975,7 +157,7 @@ void hci_discovery_set_state(struct hci_dev *hdev, int state)
switch (state) {
case DISCOVERY_STOPPED:
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
if (old_state != DISCOVERY_STARTING)
mgmt_discovering(hdev, 0);
@@ -1289,32 +471,6 @@ done:
return err;
}
-/**
- * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
- * (BD_ADDR) for a HCI device from
- * a firmware node property.
- * @hdev: The HCI device
- *
- * Search the firmware node for 'local-bd-address'.
- *
- * All-zero BD addresses are rejected, because those could be properties
- * that exist in the firmware tables, but were not updated by the firmware. For
- * example, the DTS could define 'local-bd-address', with zero BD addresses.
- */
-static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
-{
- struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
- bdaddr_t ba;
- int ret;
-
- ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
- (u8 *)&ba, sizeof(ba));
- if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
- return;
-
- bacpy(&hdev->public_addr, &ba);
-}
-
static int hci_dev_do_open(struct hci_dev *hdev)
{
int ret = 0;
@@ -1323,205 +479,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
hci_req_sync_lock(hdev);
- if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
- ret = -ENODEV;
- goto done;
- }
-
- if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG)) {
- /* Check for rfkill but allow the HCI setup stage to
- * proceed (which in itself doesn't cause any RF activity).
- */
- if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
- ret = -ERFKILL;
- goto done;
- }
-
- /* Check for valid public address or a configured static
- * random address, but let the HCI setup proceed to
- * be able to determine if there is a public address
- * or not.
- *
- * In case of user channel usage, it is not important
- * if a public address or static random address is
- * available.
- *
- * This check is only valid for BR/EDR controllers
- * since AMP controllers do not have an address.
- */
- if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hdev->dev_type == HCI_PRIMARY &&
- !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
- !bacmp(&hdev->static_addr, BDADDR_ANY)) {
- ret = -EADDRNOTAVAIL;
- goto done;
- }
- }
-
- if (test_bit(HCI_UP, &hdev->flags)) {
- ret = -EALREADY;
- goto done;
- }
-
- if (hdev->open(hdev)) {
- ret = -EIO;
- goto done;
- }
-
- set_bit(HCI_RUNNING, &hdev->flags);
- hci_sock_dev_event(hdev, HCI_DEV_OPEN);
-
- atomic_set(&hdev->cmd_cnt, 1);
- set_bit(HCI_INIT, &hdev->flags);
-
- if (hci_dev_test_flag(hdev, HCI_SETUP) ||
- test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
- bool invalid_bdaddr;
-
- hci_sock_dev_event(hdev, HCI_DEV_SETUP);
-
- if (hdev->setup)
- ret = hdev->setup(hdev);
-
- /* The transport driver can set the quirk to mark the
- * BD_ADDR invalid before creating the HCI device or in
- * its setup callback.
- */
- invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
- &hdev->quirks);
-
- if (ret)
- goto setup_failed;
-
- if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
- if (!bacmp(&hdev->public_addr, BDADDR_ANY))
- hci_dev_get_bd_addr_from_property(hdev);
-
- if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
- hdev->set_bdaddr) {
- ret = hdev->set_bdaddr(hdev,
- &hdev->public_addr);
-
- /* If setting of the BD_ADDR from the device
- * property succeeds, then treat the address
- * as valid even if the invalid BD_ADDR
- * quirk indicates otherwise.
- */
- if (!ret)
- invalid_bdaddr = false;
- }
- }
-
-setup_failed:
- /* The transport driver can set these quirks before
- * creating the HCI device or in its setup callback.
- *
- * For the invalid BD_ADDR quirk it is possible that
- * it becomes a valid address if the bootloader does
- * provide it (see above).
- *
- * In case any of them is set, the controller has to
- * start up as unconfigured.
- */
- if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
- invalid_bdaddr)
- hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
-
- /* For an unconfigured controller it is required to
- * read at least the version information provided by
- * the Read Local Version Information command.
- *
- * If the set_bdaddr driver callback is provided, then
- * also the original Bluetooth public device address
- * will be read using the Read BD Address command.
- */
- if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
- ret = __hci_unconf_init(hdev);
- }
-
- if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
- /* If public address change is configured, ensure that
- * the address gets programmed. If the driver does not
- * support changing the public address, fail the power
- * on procedure.
- */
- if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
- hdev->set_bdaddr)
- ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
- else
- ret = -EADDRNOTAVAIL;
- }
-
- if (!ret) {
- if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
- ret = __hci_init(hdev);
- if (!ret && hdev->post_init)
- ret = hdev->post_init(hdev);
- }
- }
-
- /* If the HCI Reset command is clearing all diagnostic settings,
- * then they need to be reprogrammed after the init procedure
- * completed.
- */
- if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
- ret = hdev->set_diag(hdev, true);
-
- msft_do_open(hdev);
- aosp_do_open(hdev);
-
- clear_bit(HCI_INIT, &hdev->flags);
-
- if (!ret) {
- hci_dev_hold(hdev);
- hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
- hci_adv_instances_set_rpa_expired(hdev, true);
- set_bit(HCI_UP, &hdev->flags);
- hci_sock_dev_event(hdev, HCI_DEV_UP);
- hci_leds_update_powered(hdev, true);
- if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
- !hci_dev_test_flag(hdev, HCI_CONFIG) &&
- !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hci_dev_test_flag(hdev, HCI_MGMT) &&
- hdev->dev_type == HCI_PRIMARY) {
- ret = __hci_req_hci_power_on(hdev);
- mgmt_power_on(hdev, ret);
- }
- } else {
- /* Init failed, cleanup */
- flush_work(&hdev->tx_work);
-
- /* Since hci_rx_work() is possible to awake new cmd_work
- * it should be flushed first to avoid unexpected call of
- * hci_cmd_work()
- */
- flush_work(&hdev->rx_work);
- flush_work(&hdev->cmd_work);
-
- skb_queue_purge(&hdev->cmd_q);
- skb_queue_purge(&hdev->rx_q);
-
- if (hdev->flush)
- hdev->flush(hdev);
-
- if (hdev->sent_cmd) {
- kfree_skb(hdev->sent_cmd);
- hdev->sent_cmd = NULL;
- }
-
- clear_bit(HCI_RUNNING, &hdev->flags);
- hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
-
- hdev->close(hdev);
- hdev->flags &= BIT(HCI_RAW);
- }
+ ret = hci_dev_open_sync(hdev);
-done:
hci_req_sync_unlock(hdev);
return ret;
}
@@ -1583,155 +542,18 @@ done:
return err;
}
-/* This function requires the caller holds hdev->lock */
-static void hci_pend_le_actions_clear(struct hci_dev *hdev)
-{
- struct hci_conn_params *p;
-
- list_for_each_entry(p, &hdev->le_conn_params, list) {
- if (p->conn) {
- hci_conn_drop(p->conn);
- hci_conn_put(p->conn);
- p->conn = NULL;
- }
- list_del_init(&p->action);
- }
-
- BT_DBG("All LE pending actions cleared");
-}
-
int hci_dev_do_close(struct hci_dev *hdev)
{
- bool auto_off;
- int err = 0;
+ int err;
BT_DBG("%s %p", hdev->name, hdev);
- cancel_delayed_work(&hdev->power_off);
- cancel_delayed_work(&hdev->ncmd_timer);
-
- hci_request_cancel_all(hdev);
hci_req_sync_lock(hdev);
- if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- test_bit(HCI_UP, &hdev->flags)) {
- /* Execute vendor specific shutdown routine */
- if (hdev->shutdown)
- err = hdev->shutdown(hdev);
- }
-
- if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
- cancel_delayed_work_sync(&hdev->cmd_timer);
- hci_req_sync_unlock(hdev);
- return err;
- }
-
- hci_leds_update_powered(hdev, false);
-
- /* Flush RX and TX works */
- flush_work(&hdev->tx_work);
- flush_work(&hdev->rx_work);
-
- if (hdev->discov_timeout > 0) {
- hdev->discov_timeout = 0;
- hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
- hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
- }
-
- if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
- cancel_delayed_work(&hdev->service_cache);
-
- if (hci_dev_test_flag(hdev, HCI_MGMT)) {
- struct adv_info *adv_instance;
-
- cancel_delayed_work_sync(&hdev->rpa_expired);
-
- list_for_each_entry(adv_instance, &hdev->adv_instances, list)
- cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
- }
-
- /* Avoid potential lockdep warnings from the *_flush() calls by
- * ensuring the workqueue is empty up front.
- */
- drain_workqueue(hdev->workqueue);
-
- hci_dev_lock(hdev);
-
- hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-
- auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
-
- if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
- hci_dev_test_flag(hdev, HCI_MGMT))
- __mgmt_power_off(hdev);
-
- hci_inquiry_cache_flush(hdev);
- hci_pend_le_actions_clear(hdev);
- hci_conn_hash_flush(hdev);
- hci_dev_unlock(hdev);
-
- smp_unregister(hdev);
-
- hci_sock_dev_event(hdev, HCI_DEV_DOWN);
-
- aosp_do_close(hdev);
- msft_do_close(hdev);
-
- if (hdev->flush)
- hdev->flush(hdev);
-
- /* Reset device */
- skb_queue_purge(&hdev->cmd_q);
- atomic_set(&hdev->cmd_cnt, 1);
- if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
- !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
- set_bit(HCI_INIT, &hdev->flags);
- __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
- clear_bit(HCI_INIT, &hdev->flags);
- }
-
- /* flush cmd work */
- flush_work(&hdev->cmd_work);
-
- /* Drop queues */
- skb_queue_purge(&hdev->rx_q);
- skb_queue_purge(&hdev->cmd_q);
- skb_queue_purge(&hdev->raw_q);
-
- /* Drop last sent command */
- if (hdev->sent_cmd) {
- cancel_delayed_work_sync(&hdev->cmd_timer);
- kfree_skb(hdev->sent_cmd);
- hdev->sent_cmd = NULL;
- }
-
- clear_bit(HCI_RUNNING, &hdev->flags);
- hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
-
- if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
- wake_up(&hdev->suspend_wait_q);
-
- /* After this point our queues are empty
- * and no tasks are scheduled. */
- hdev->close(hdev);
-
- /* Clear flags */
- hdev->flags &= BIT(HCI_RAW);
- hci_dev_clear_volatile_flags(hdev);
-
- /* Controller radio is available but is currently powered down */
- hdev->amp_status = AMP_STATUS_POWERED_DOWN;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
- memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
- bacpy(&hdev->random_addr, BDADDR_ANY);
- hci_codec_list_clear(&hdev->local_codecs);
+ err = hci_dev_close_sync(hdev);
hci_req_sync_unlock(hdev);
- hci_dev_put(hdev);
return err;
}
@@ -1787,7 +609,7 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
atomic_set(&hdev->cmd_cnt, 1);
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
- ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
+ ret = hci_reset_sync(hdev);
hci_req_sync_unlock(hdev);
return ret;
@@ -1850,7 +672,7 @@ done:
return ret;
}
-static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
+static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
{
bool conn_changed, discov_changed;
@@ -1951,7 +773,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
* get correctly modified as this was a non-mgmt change.
*/
if (!err)
- hci_update_scan_state(hdev, dr.dev_opt);
+ hci_update_passive_scan_state(hdev, dr.dev_opt);
break;
case HCISETLINKPOL:
@@ -2133,9 +955,7 @@ static void hci_power_on(struct work_struct *work)
hci_dev_test_flag(hdev, HCI_MGMT) &&
hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
cancel_delayed_work(&hdev->power_off);
- hci_req_sync_lock(hdev);
- err = __hci_req_hci_power_on(hdev);
- hci_req_sync_unlock(hdev);
+ err = hci_powered_update_sync(hdev);
mgmt_power_on(hdev, err);
return;
}
@@ -3096,7 +1916,7 @@ bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
switch (hci_get_adv_monitor_offload_ext(hdev)) {
case HCI_ADV_MONITOR_EXT_NONE:
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
/* Message was not forwarded to controller - not an error */
return false;
@@ -3160,7 +1980,7 @@ bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
if (!*err && !pending)
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
hdev->name, handle, *err, pending ? "" : "not ");
@@ -3192,7 +2012,7 @@ bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
}
if (update)
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
hdev->name, *err, pending ? "" : "not ");
@@ -3333,7 +2153,7 @@ int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
bacpy(&entry->bdaddr, bdaddr);
entry->bdaddr_type = type;
- entry->current_flags = flags;
+ bitmap_from_u64(entry->flags, flags);
list_add(&entry->list, list);
@@ -3486,7 +2306,7 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
hci_conn_params_free(params);
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
BT_DBG("addr %pMR (type %u)", addr, addr_type);
}
@@ -3554,61 +2374,6 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
}
-static void hci_suspend_clear_tasks(struct hci_dev *hdev)
-{
- int i;
-
- for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
- clear_bit(i, hdev->suspend_tasks);
-
- wake_up(&hdev->suspend_wait_q);
-}
-
-static int hci_suspend_wait_event(struct hci_dev *hdev)
-{
-#define WAKE_COND \
- (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
- __SUSPEND_NUM_TASKS)
-
- int i;
- int ret = wait_event_timeout(hdev->suspend_wait_q,
- WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
-
- if (ret == 0) {
- bt_dev_err(hdev, "Timed out waiting for suspend events");
- for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
- if (test_bit(i, hdev->suspend_tasks))
- bt_dev_err(hdev, "Suspend timeout bit: %d", i);
- clear_bit(i, hdev->suspend_tasks);
- }
-
- ret = -ETIMEDOUT;
- } else {
- ret = 0;
- }
-
- return ret;
-}
-
-static void hci_prepare_suspend(struct work_struct *work)
-{
- struct hci_dev *hdev =
- container_of(work, struct hci_dev, suspend_prepare);
-
- hci_dev_lock(hdev);
- hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
- hci_dev_unlock(hdev);
-}
-
-static int hci_change_suspend_state(struct hci_dev *hdev,
- enum suspended_state next)
-{
- hdev->suspend_state_next = next;
- set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
- queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
- return hci_suspend_wait_event(hdev);
-}
-
static void hci_clear_wake_reason(struct hci_dev *hdev)
{
hci_dev_lock(hdev);
@@ -3745,7 +2510,8 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
INIT_WORK(&hdev->tx_work, hci_tx_work);
INIT_WORK(&hdev->power_on, hci_power_on);
INIT_WORK(&hdev->error_reset, hci_error_reset);
- INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
+
+ hci_cmd_sync_init(hdev);
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
@@ -3754,7 +2520,6 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
skb_queue_head_init(&hdev->raw_q);
init_waitqueue_head(&hdev->req_wait_q);
- init_waitqueue_head(&hdev->suspend_wait_q);
INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
@@ -3864,6 +2629,12 @@ int hci_register_dev(struct hci_dev *hdev)
if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
+ /* Mark Remote Wakeup connection flag as supported if driver has wakeup
+ * callback.
+ */
+ if (hdev->wakeup)
+ set_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, hdev->conn_flags);
+
hci_sock_dev_event(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
@@ -3882,6 +2653,7 @@ int hci_register_dev(struct hci_dev *hdev)
return id;
err_wqueue:
+ debugfs_remove_recursive(hdev->debugfs);
destroy_workqueue(hdev->workqueue);
destroy_workqueue(hdev->req_workqueue);
err:
@@ -3904,11 +2676,10 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->power_on);
- if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
- hci_suspend_clear_tasks(hdev);
+ hci_cmd_sync_clear(hdev);
+
+ if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks))
unregister_pm_notifier(&hdev->suspend_notifier);
- cancel_work_sync(&hdev->suspend_prepare);
- }
msft_unregister(hdev);
@@ -3975,7 +2746,6 @@ EXPORT_SYMBOL(hci_release_dev);
int hci_suspend_dev(struct hci_dev *hdev)
{
int ret;
- u8 state = BT_RUNNING;
bt_dev_dbg(hdev, "");
@@ -3984,40 +2754,17 @@ int hci_suspend_dev(struct hci_dev *hdev)
hci_dev_test_flag(hdev, HCI_UNREGISTER))
return 0;
- /* If powering down, wait for completion. */
- if (mgmt_powering_down(hdev)) {
- set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
- ret = hci_suspend_wait_event(hdev);
- if (ret)
- goto done;
- }
-
- /* Suspend consists of two actions:
- * - First, disconnect everything and make the controller not
- * connectable (disabling scanning)
- * - Second, program event filter/accept list and enable scan
- */
- ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
- if (ret)
- goto clear;
-
- state = BT_SUSPEND_DISCONNECT;
+ /* If powering down don't attempt to suspend */
+ if (mgmt_powering_down(hdev))
+ return 0;
- /* Only configure accept list if device may wakeup. */
- if (hdev->wakeup && hdev->wakeup(hdev)) {
- ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE);
- if (!ret)
- state = BT_SUSPEND_CONFIGURE_WAKE;
- }
+ hci_req_sync_lock(hdev);
+ ret = hci_suspend_sync(hdev);
+ hci_req_sync_unlock(hdev);
-clear:
hci_clear_wake_reason(hdev);
- mgmt_suspending(hdev, state);
+ mgmt_suspending(hdev, hdev->suspend_state);
-done:
- /* We always allow suspend even if suspend preparation failed and
- * attempt to recover in resume.
- */
hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
return ret;
}
@@ -4039,10 +2786,12 @@ int hci_resume_dev(struct hci_dev *hdev)
if (mgmt_powering_down(hdev))
return 0;
- ret = hci_change_suspend_state(hdev, BT_RUNNING);
+ hci_req_sync_lock(hdev);
+ ret = hci_resume_sync(hdev);
+ hci_req_sync_unlock(hdev);
mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
- hdev->wake_addr_type);
+ hdev->wake_addr_type);
hci_sock_dev_event(hdev, HCI_DEV_RESUME);
return ret;
@@ -4163,7 +2912,7 @@ int hci_unregister_cb(struct hci_cb *cb)
}
EXPORT_SYMBOL(hci_unregister_cb);
-static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
int err;
@@ -4186,14 +2935,17 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
if (!test_bit(HCI_RUNNING, &hdev->flags)) {
kfree_skb(skb);
- return;
+ return -EINVAL;
}
err = hdev->send(hdev, skb);
if (err < 0) {
bt_dev_err(hdev, "sending frame failed (%d)", err);
kfree_skb(skb);
+ return err;
}
+
+ return 0;
}
/* Send HCI command */
@@ -4270,25 +3022,6 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
-/* Send HCI command and wait for command complete event */
-struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout)
-{
- struct sk_buff *skb;
-
- if (!test_bit(HCI_UP, &hdev->flags))
- return ERR_PTR(-ENETDOWN);
-
- bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
-
- hci_req_sync_lock(hdev);
- skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
- hci_req_sync_unlock(hdev);
-
- return skb;
-}
-EXPORT_SYMBOL(hci_cmd_sync);
-
/* Send ACL data */
static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
{
@@ -5119,10 +3852,15 @@ static void hci_cmd_work(struct work_struct *work)
hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
if (hdev->sent_cmd) {
+ int res;
if (hci_req_status_pend(hdev))
hci_dev_set_flag(hdev, HCI_CMD_PENDING);
atomic_dec(&hdev->cmd_cnt);
- hci_send_frame(hdev, skb);
+
+ res = hci_send_frame(hdev, skb);
+ if (res < 0)
+ __hci_cmd_sync_cancel(hdev, -res);
+
if (test_bit(HCI_RESET, &hdev->flags))
cancel_delayed_work(&hdev->cmd_timer);
else
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 7d0db1ca1248..fc30f4c03d29 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,12 +45,48 @@
/* Handle HCI Event packets */
-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
- u8 *new_status)
+static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
+ u8 ev, size_t len)
{
- __u8 status = *((__u8 *) skb->data);
+ void *data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ data = skb_pull_data(skb, len);
+ if (!data)
+ bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
+
+ return data;
+}
+
+static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
+ u16 op, size_t len)
+{
+ void *data;
+
+ data = skb_pull_data(skb, len);
+ if (!data)
+ bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
+
+ return data;
+}
+
+static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
+ u8 ev, size_t len)
+{
+ void *data;
+
+ data = skb_pull_data(skb, len);
+ if (!data)
+ bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
+
+ return data;
+}
+
+static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_ev_status *rp = data;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
/* It is possible that we receive Inquiry Complete event right
* before we receive Inquiry Cancel Command Complete event, in
@@ -59,15 +95,13 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
* we actually achieve what Inquiry Cancel wants to achieve,
* which is to end the last Inquiry session.
*/
- if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
+ if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
- status = 0x00;
+ rp->status = 0x00;
}
- *new_status = status;
-
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
clear_bit(HCI_INQUIRY, &hdev->flags);
smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
@@ -83,49 +117,62 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
hci_dev_unlock(hdev);
hci_conn_check_pending(hdev);
+
+ return rp->status;
}
-static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
+
+ return rp->status;
}
-static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
hci_conn_check_pending(hdev);
+
+ return rp->status;
}
-static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- BT_DBG("%s", hdev->name);
+ struct hci_ev_status *rp = data;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ return rp->status;
}
-static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_role_discovery *rp = (void *) skb->data;
+ struct hci_rp_role_discovery *rp = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -134,17 +181,20 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
conn->role = rp->role;
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_link_policy *rp = (void *) skb->data;
+ struct hci_rp_read_link_policy *rp = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -153,22 +203,25 @@ static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
conn->link_policy = __le16_to_cpu(rp->policy);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_write_link_policy *rp = (void *) skb->data;
+ struct hci_rp_write_link_policy *rp = data;
struct hci_conn *conn;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -177,49 +230,55 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
conn->link_policy = get_unaligned_le16(sent + 2);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
+ struct hci_rp_read_def_link_policy *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->link_policy = __le16_to_cpu(rp->policy);
+
+ return rp->status;
}
-static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
if (!sent)
- return;
+ return rp->status;
hdev->link_policy = get_unaligned_le16(sent);
+
+ return rp->status;
}
-static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
clear_bit(HCI_RESET, &hdev->flags);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
/* Reset all non-persistent flags */
hci_dev_clear_volatile_flags(hdev);
@@ -241,91 +300,104 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
hci_bdaddr_list_clear(&hdev->le_accept_list);
hci_bdaddr_list_clear(&hdev->le_resolv_list);
+
+ return rp->status;
}
-static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
+ struct hci_rp_read_stored_link_key *rp = data;
struct hci_cp_read_stored_link_key *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
if (!sent)
- return;
+ return rp->status;
if (!rp->status && sent->read_all == 0x01) {
- hdev->stored_max_keys = rp->max_keys;
- hdev->stored_num_keys = rp->num_keys;
+ hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
+ hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
}
+
+ return rp->status;
}
-static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
+ struct hci_rp_delete_stored_link_key *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (rp->num_keys <= hdev->stored_num_keys)
- hdev->stored_num_keys -= rp->num_keys;
+ hdev->stored_num_keys -= le16_to_cpu(rp->num_keys);
else
hdev->stored_num_keys = 0;
+
+ return rp->status;
}
-static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
if (hci_dev_test_flag(hdev, HCI_MGMT))
- mgmt_set_local_name_complete(hdev, sent, status);
- else if (!status)
+ mgmt_set_local_name_complete(hdev, sent, rp->status);
+ else if (!rp->status)
memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_name *rp = (void *) skb->data;
+ struct hci_rp_read_local_name *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG))
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
+
+ return rp->status;
}
-static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
- if (!status) {
+ if (!rp->status) {
__u8 param = *((__u8 *) sent);
if (param == AUTH_ENABLED)
@@ -335,25 +407,28 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
}
if (hci_dev_test_flag(hdev, HCI_MGMT))
- mgmt_auth_enable_complete(hdev, status);
+ mgmt_auth_enable_complete(hdev, rp->status);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
__u8 param;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
if (!sent)
- return;
+ return rp->status;
param = *((__u8 *) sent);
@@ -361,25 +436,28 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
set_bit(HCI_ENCRYPT, &hdev->flags);
else
clear_bit(HCI_ENCRYPT, &hdev->flags);
+
+ return rp->status;
}
-static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
__u8 param;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
if (!sent)
- return;
+ return rp->status;
param = *((__u8 *) sent);
hci_dev_lock(hdev);
- if (status) {
+ if (rp->status) {
hdev->discov_timeout = 0;
goto done;
}
@@ -396,22 +474,25 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
done:
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *)skb->data);
+ struct hci_ev_status *rp = data;
struct hci_cp_set_event_filter *cp;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
if (!sent)
- return;
+ return rp->status;
cp = (struct hci_cp_set_event_filter *)sent;
@@ -419,135 +500,149 @@ static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
else
hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
+
+ return rp->status;
}
-static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
+ struct hci_rp_read_class_of_dev *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
memcpy(hdev->dev_class, rp->dev_class, 3);
- BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
+ bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
+ hdev->dev_class[1], hdev->dev_class[0]);
+
+ return rp->status;
}
-static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
- if (status == 0)
+ if (!rp->status)
memcpy(hdev->dev_class, sent, 3);
if (hci_dev_test_flag(hdev, HCI_MGMT))
- mgmt_set_class_of_dev_complete(hdev, sent, status);
+ mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_voice_setting *rp = (void *) skb->data;
+ struct hci_rp_read_voice_setting *rp = data;
__u16 setting;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
setting = __le16_to_cpu(rp->voice_setting);
if (hdev->voice_setting == setting)
- return;
+ return rp->status;
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
+ bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
+
+ return rp->status;
}
-static void hci_cc_write_voice_setting(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
__u16 setting;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
if (!sent)
- return;
+ return rp->status;
setting = get_unaligned_le16(sent);
if (hdev->voice_setting == setting)
- return;
+ return rp->status;
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
+ bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
+
+ return rp->status;
}
-static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
+ struct hci_rp_read_num_supported_iac *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->num_iac = rp->num_iac;
- BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
+ bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
+
+ return rp->status;
}
-static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
struct hci_cp_write_ssp_mode *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
- if (!status) {
+ if (!rp->status) {
if (sent->mode)
hdev->features[1][0] |= LMP_HOST_SSP;
else
hdev->features[1][0] &= ~LMP_HOST_SSP;
}
- if (hci_dev_test_flag(hdev, HCI_MGMT))
- mgmt_ssp_enable_complete(hdev, sent->mode, status);
- else if (!status) {
+ if (!rp->status) {
if (sent->mode)
hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
else
@@ -555,29 +650,32 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
}
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- u8 status = *((u8 *) skb->data);
+ struct hci_ev_status *rp = data;
struct hci_cp_write_sc_support *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
- if (!status) {
+ if (!rp->status) {
if (sent->support)
hdev->features[1][0] |= LMP_HOST_SC;
else
hdev->features[1][0] &= ~LMP_HOST_SC;
}
- if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
+ if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
if (sent->support)
hci_dev_set_flag(hdev, HCI_SC_ENABLED);
else
@@ -585,16 +683,19 @@ static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
}
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_version *rp = (void *) skb->data;
+ struct hci_rp_read_local_version *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG)) {
@@ -604,33 +705,37 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
}
+
+ return rp->status;
}
-static void hci_cc_read_local_commands(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_commands *rp = (void *) skb->data;
+ struct hci_rp_read_local_commands *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG))
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
+
+ return rp->status;
}
-static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
+ struct hci_rp_read_auth_payload_to *rp = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -639,23 +744,25 @@ static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
+ struct hci_rp_write_auth_payload_to *rp = data;
struct hci_conn *conn;
void *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -664,17 +771,19 @@ static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_local_features(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_features *rp = (void *) skb->data;
+ struct hci_rp_read_local_features *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
memcpy(hdev->features, rp->features, 8);
@@ -714,46 +823,53 @@ static void hci_cc_read_local_features(struct hci_dev *hdev,
if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
+
+ return rp->status;
}
-static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
+ struct hci_rp_read_local_ext_features *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (hdev->max_page < rp->max_page)
hdev->max_page = rp->max_page;
if (rp->page < HCI_MAX_PAGES)
memcpy(hdev->features[rp->page], rp->features, 8);
+
+ return rp->status;
}
-static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
+ struct hci_rp_read_flow_control_mode *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->flow_ctl_mode = rp->mode;
+
+ return rp->status;
}
-static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_buffer_size *rp = (void *) skb->data;
+ struct hci_rp_read_buffer_size *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
hdev->sco_mtu = rp->sco_mtu;
@@ -770,115 +886,130 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
+
+ return rp->status;
}
-static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_bd_addr *rp = (void *) skb->data;
+ struct hci_rp_read_bd_addr *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (test_bit(HCI_INIT, &hdev->flags))
bacpy(&hdev->bdaddr, &rp->bdaddr);
if (hci_dev_test_flag(hdev, HCI_SETUP))
bacpy(&hdev->setup_addr, &rp->bdaddr);
+
+ return rp->status;
}
-static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
+ struct hci_rp_read_local_pairing_opts *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (hci_dev_test_flag(hdev, HCI_SETUP) ||
hci_dev_test_flag(hdev, HCI_CONFIG)) {
hdev->pairing_opts = rp->pairing_opts;
hdev->max_enc_key_size = rp->max_key_size;
}
+
+ return rp->status;
}
-static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
+ struct hci_rp_read_page_scan_activity *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (test_bit(HCI_INIT, &hdev->flags)) {
hdev->page_scan_interval = __le16_to_cpu(rp->interval);
hdev->page_scan_window = __le16_to_cpu(rp->window);
}
+
+ return rp->status;
}
-static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- u8 status = *((u8 *) skb->data);
+ struct hci_ev_status *rp = data;
struct hci_cp_write_page_scan_activity *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
if (!sent)
- return;
+ return rp->status;
hdev->page_scan_interval = __le16_to_cpu(sent->interval);
hdev->page_scan_window = __le16_to_cpu(sent->window);
+
+ return rp->status;
}
-static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
+ struct hci_rp_read_page_scan_type *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
if (test_bit(HCI_INIT, &hdev->flags))
hdev->page_scan_type = rp->type;
+
+ return rp->status;
}
-static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- u8 status = *((u8 *) skb->data);
+ struct hci_ev_status *rp = data;
u8 *type;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
if (type)
hdev->page_scan_type = *type;
+
+ return rp->status;
}
-static void hci_cc_read_data_block_size(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_data_block_size *rp = (void *) skb->data;
+ struct hci_rp_read_data_block_size *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
hdev->block_len = __le16_to_cpu(rp->block_len);
@@ -888,21 +1019,21 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
hdev->block_cnt, hdev->block_len);
+
+ return rp->status;
}
-static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_clock *rp = (void *) skb->data;
+ struct hci_rp_read_clock *rp = data;
struct hci_cp_read_clock *cp;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
-
- if (skb->len < sizeof(*rp))
- return;
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -923,17 +1054,18 @@ static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
unlock:
hci_dev_unlock(hdev);
+ return rp->status;
}
-static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
+ struct hci_rp_read_local_amp_info *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->amp_status = rp->amp_status;
hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
@@ -945,59 +1077,68 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
+
+ return rp->status;
}
-static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
+ struct hci_rp_read_inq_rsp_tx_power *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->inq_tx_power = rp->tx_power;
+
+ return rp->status;
}
-static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
+ struct hci_rp_read_def_err_data_reporting *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->err_data_reporting = rp->err_data_reporting;
+
+ return rp->status;
}
-static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *)skb->data);
+ struct hci_ev_status *rp = data;
struct hci_cp_write_def_err_data_reporting *cp;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
if (!cp)
- return;
+ return rp->status;
hdev->err_data_reporting = cp->err_data_reporting;
+
+ return rp->status;
}
-static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_pin_code_reply *rp = (void *) skb->data;
+ struct hci_rp_pin_code_reply *rp = data;
struct hci_cp_pin_code_reply *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
hci_dev_lock(hdev);
@@ -1017,13 +1158,15 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
unlock:
hci_dev_unlock(hdev);
+ return rp->status;
}
-static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
+ struct hci_rp_pin_code_neg_reply *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
hci_dev_lock(hdev);
@@ -1032,17 +1175,19 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
rp->status);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
+ struct hci_rp_le_read_buffer_size *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
hdev->le_pkts = rp->le_max_pkt;
@@ -1050,39 +1195,46 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
hdev->le_cnt = hdev->le_pkts;
BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
+
+ return rp->status;
}
-static void hci_cc_le_read_local_features(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_local_features *rp = (void *) skb->data;
+ struct hci_rp_le_read_local_features *rp = data;
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
- return;
+ return rp->status;
memcpy(hdev->le_features, rp->features, 8);
+
+ return rp->status;
}
-static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
+ struct hci_rp_le_read_adv_tx_power *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->adv_tx_power = rp->tx_power;
+
+ return rp->status;
}
-static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+ struct hci_rp_user_confirm_reply *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
hci_dev_lock(hdev);
@@ -1091,14 +1243,16 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
rp->status);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+ struct hci_rp_user_confirm_reply *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
hci_dev_lock(hdev);
@@ -1107,13 +1261,16 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
ACL_LINK, 0, rp->status);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+ struct hci_rp_user_confirm_reply *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
hci_dev_lock(hdev);
@@ -1122,14 +1279,16 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
0, rp->status);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
+ struct hci_rp_user_confirm_reply *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
hci_dev_lock(hdev);
@@ -1138,37 +1297,44 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
ACL_LINK, 0, rp->status);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
+ struct hci_rp_read_local_oob_data *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ return rp->status;
}
-static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
+ struct hci_rp_read_local_oob_ext_data *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ return rp->status;
}
-static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
bdaddr_t *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -1181,21 +1347,24 @@ static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
}
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
struct hci_cp_le_set_default_phy *cp;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
if (!cp)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -1203,17 +1372,21 @@ static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
hdev->le_rx_def_phys = cp->rx_phys;
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
struct hci_cp_le_set_adv_set_rand_addr *cp;
struct adv_info *adv;
- if (status)
- return;
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
/* Update only in case the adv instance since handle 0x00 shall be using
@@ -1221,7 +1394,7 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
* non-extended adverting.
*/
if (!cp || !cp->handle)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -1237,34 +1410,126 @@ static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
}
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
+ struct hci_ev_status *rp = data;
+ u8 *instance;
+ int err;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
+
+ instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
+ if (!instance)
+ return rp->status;
+
+ hci_dev_lock(hdev);
+
+ err = hci_remove_adv_instance(hdev, *instance);
+ if (!err)
+ mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
+ *instance);
+
+ hci_dev_unlock(hdev);
+
+ return rp->status;
+}
+
+static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_ev_status *rp = data;
+ struct adv_info *adv, *n;
+ int err;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ if (rp->status)
+ return rp->status;
+
+ if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
+ return rp->status;
+
+ hci_dev_lock(hdev);
+
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
+ u8 instance = adv->instance;
+
+ err = hci_remove_adv_instance(hdev, instance);
+ if (!err)
+ mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
+ hdev, instance);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return rp->status;
+}
+
+static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_rp_le_read_transmit_power *rp = data;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ if (rp->status)
+ return rp->status;
hdev->min_le_tx_power = rp->min_le_tx_power;
hdev->max_le_tx_power = rp->max_le_tx_power;
+
+ return rp->status;
}
-static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 *sent, status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
+ struct hci_cp_le_set_privacy_mode *cp;
+ struct hci_conn_params *params;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
+
+ cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
+ if (!cp)
+ return rp->status;
+
+ hci_dev_lock(hdev);
+
+ params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
+ if (params)
+ params->privacy_mode = cp->mode;
+
+ hci_dev_unlock(hdev);
+
+ return rp->status;
+}
+
+static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
+{
+ struct hci_ev_status *rp = data;
+ __u8 *sent;
+
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
+
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -1286,24 +1551,26 @@ static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
}
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_set_ext_adv_enable *cp;
struct hci_cp_ext_adv_set *set;
- __u8 status = *((__u8 *) skb->data);
struct adv_info *adv = NULL, *n;
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
if (!cp)
- return;
+ return rp->status;
set = (void *)cp->data;
@@ -1326,8 +1593,10 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
&conn->le_conn_timeout,
conn->conn_timeout);
} else {
- if (adv) {
- adv->enabled = false;
+ if (cp->num_of_sets) {
+ if (adv)
+ adv->enabled = false;
+
/* If just one instance was disabled check if there are
* any other instance enabled before clearing HCI_LE_ADV
*/
@@ -1348,44 +1617,48 @@ static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
unlock:
hci_dev_unlock(hdev);
+ return rp->status;
}
-static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_set_scan_param *cp;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
if (!cp)
- return;
+ return rp->status;
hci_dev_lock(hdev);
hdev->le_scan_type = cp->type;
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_set_ext_scan_params *cp;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
struct hci_cp_le_scan_phy_params *phy_param;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
if (!cp)
- return;
+ return rp->status;
phy_param = (void *)cp->data;
@@ -1394,6 +1667,8 @@ static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
hdev->le_scan_type = phy_param->type;
hci_dev_unlock(hdev);
+
+ return rp->status;
}
static bool has_pending_adv_report(struct hci_dev *hdev)
@@ -1463,16 +1738,10 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
/* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
* interrupted scanning due to a connect request. Mark
- * therefore discovery as stopped. If this was not
- * because of a connect request advertising might have
- * been disabled because of active scanning, so
- * re-enable it again if necessary.
+ * therefore discovery as stopped.
*/
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
- else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
- hdev->discovery.state == DISCOVERY_FINDING)
- hci_req_reenable_advertising(hdev);
break;
@@ -1485,244 +1754,273 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
hci_dev_unlock(hdev);
}
-static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_set_scan_enable *cp;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
if (!cp)
- return;
+ return rp->status;
le_set_scan_enable_complete(hdev, cp->enable);
+
+ return rp->status;
}
-static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_set_ext_scan_enable *cp;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
if (!cp)
- return;
+ return rp->status;
le_set_scan_enable_complete(hdev, cp->enable);
+
+ return rp->status;
}
-static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
+static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
+ struct hci_rp_le_read_num_supported_adv_sets *rp = data;
- BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
- rp->num_of_sets);
+ bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
+ rp->num_of_sets);
if (rp->status)
- return;
+ return rp->status;
hdev->le_num_of_adv_sets = rp->num_of_sets;
+
+ return rp->status;
}
-static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
+ struct hci_rp_le_read_accept_list_size *rp = data;
- BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
+ bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
if (rp->status)
- return;
+ return rp->status;
hdev->le_accept_list_size = rp->size;
+
+ return rp->status;
}
-static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
hci_bdaddr_list_clear(&hdev->le_accept_list);
+
+ return rp->status;
}
-static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_add_to_accept_list *sent;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
if (!sent)
- return;
+ return rp->status;
hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
sent->bdaddr_type);
+
+ return rp->status;
}
-static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_del_from_accept_list *sent;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
if (!sent)
- return;
+ return rp->status;
hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
sent->bdaddr_type);
+
+ return rp->status;
}
-static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
+ struct hci_rp_le_read_supported_states *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
memcpy(hdev->le_states, rp->le_states, 8);
+
+ return rp->status;
}
-static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
+ struct hci_rp_le_read_def_data_len *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
+
+ return rp->status;
}
-static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_write_def_data_len *sent;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
if (!sent)
- return;
+ return rp->status;
hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
+
+ return rp->status;
}
-static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_add_to_resolv_list *sent;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
if (!sent)
- return;
+ return rp->status;
hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
sent->bdaddr_type, sent->peer_irk,
sent->local_irk);
+
+ return rp->status;
}
-static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_del_from_resolv_list *sent;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
if (!sent)
- return;
+ return rp->status;
hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
sent->bdaddr_type);
+
+ return rp->status;
}
-static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
hci_bdaddr_list_clear(&hdev->le_resolv_list);
+
+ return rp->status;
}
-static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
+ struct hci_rp_le_read_resolv_list_size *rp = data;
- BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
+ bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
if (rp->status)
- return;
+ return rp->status;
hdev->le_resolv_list_size = rp->size;
+
+ return rp->status;
}
-static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 *sent, status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
+ __u8 *sent;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -1732,38 +2030,42 @@ static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
+ struct hci_rp_le_read_max_data_len *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
+
+ return rp->status;
}
-static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
- struct sk_buff *skb)
+static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_write_le_host_supported *sent;
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -1782,41 +2084,47 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_le_set_adv_param *cp;
- u8 status = *((u8 *) skb->data);
+ struct hci_ev_status *rp = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
if (!cp)
- return;
+ return rp->status;
hci_dev_lock(hdev);
hdev->adv_addr_type = cp->own_address_type;
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
+ struct hci_rp_le_set_ext_adv_params *rp = data;
struct hci_cp_le_set_ext_adv_params *cp;
struct adv_info *adv_instance;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
if (!cp)
- return;
+ return rp->status;
hci_dev_lock(hdev);
hdev->adv_addr_type = cp->own_addr_type;
@@ -1832,17 +2140,20 @@ static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_update_adv_data(hdev, cp->handle);
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_rp_read_rssi *rp = (void *) skb->data;
+ struct hci_rp_read_rssi *rp = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -1851,22 +2162,25 @@ static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
conn->rssi = rp->rssi;
hci_dev_unlock(hdev);
+
+ return rp->status;
}
-static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
struct hci_cp_read_tx_power *sent;
- struct hci_rp_read_tx_power *rp = (void *) skb->data;
+ struct hci_rp_read_tx_power *rp = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
- return;
+ return rp->status;
sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
if (!sent)
- return;
+ return rp->status;
hci_dev_lock(hdev);
@@ -1885,26 +2199,30 @@ static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
unlock:
hci_dev_unlock(hdev);
+ return rp->status;
}
-static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
+static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- u8 status = *((u8 *) skb->data);
+ struct hci_ev_status *rp = data;
u8 *mode;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
- if (status)
- return;
+ if (rp->status)
+ return rp->status;
mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
if (mode)
hdev->ssp_debug_mode = *mode;
+
+ return rp->status;
}
static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (status) {
hci_conn_check_pending(hdev);
@@ -1919,7 +2237,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
struct hci_cp_create_conn *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
if (!cp)
@@ -1929,7 +2247,7 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
+ bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
@@ -1958,7 +2276,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -1969,7 +2287,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
+ bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
hci_dev_lock(hdev);
@@ -1992,7 +2310,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
struct hci_cp_auth_requested *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2019,7 +2337,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
struct hci_cp_set_conn_encrypt *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2086,6 +2404,12 @@ static bool hci_resolve_next_name(struct hci_dev *hdev)
if (list_empty(&discov->resolve))
return false;
+ /* We should stop if we already spent too much time resolving names. */
+ if (time_after(jiffies, discov->name_resolve_timeout)) {
+ bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
+ return false;
+ }
+
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
if (!e)
return false;
@@ -2132,13 +2456,10 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
return;
list_del(&e->list);
- if (name) {
- e->name_state = NAME_KNOWN;
- mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
- e->data.rssi, name, name_len);
- } else {
- e->name_state = NAME_NOT_KNOWN;
- }
+
+ e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
+ mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
+ name, name_len);
if (hci_resolve_next_name(hdev))
return;
@@ -2152,7 +2473,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
struct hci_cp_remote_name_req *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
/* If successful wait for the name req complete event before
* checking for the need to do authentication */
@@ -2195,7 +2516,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
struct hci_cp_read_remote_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2222,7 +2543,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
struct hci_cp_read_remote_ext_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2250,7 +2571,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2261,7 +2582,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
+ bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
hci_dev_lock(hdev);
@@ -2319,7 +2640,7 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
struct hci_cp_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2346,7 +2667,7 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
struct hci_cp_exit_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2371,9 +2692,16 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
{
struct hci_cp_disconnect *cp;
+ struct hci_conn_params *params;
struct hci_conn *conn;
+ bool mgmt_conn;
- if (!status)
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
+
+ /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
+ * otherwise cleanup the connection immediately.
+ */
+ if (!status && !hdev->suspended)
return;
cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
@@ -2383,23 +2711,60 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
- if (conn) {
+ if (!conn)
+ goto unlock;
+
+ if (status) {
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
conn->dst_type, status);
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
hdev->cur_adv_instance = conn->adv_instance;
- hci_req_reenable_advertising(hdev);
+ hci_enable_advertising(hdev);
}
- /* If the disconnection failed for any reason, the upper layer
- * does not retry to disconnect in current implementation.
- * Hence, we need to do some basic cleanup here and re-enable
- * advertising if necessary.
- */
- hci_conn_del(conn);
+ goto done;
+ }
+
+ mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
+
+ if (conn->type == ACL_LINK) {
+ if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
+ hci_remove_link_key(hdev, &conn->dst);
}
+ params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+ if (params) {
+ switch (params->auto_connect) {
+ case HCI_AUTO_CONN_LINK_LOSS:
+ if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
+ break;
+ fallthrough;
+
+ case HCI_AUTO_CONN_DIRECT:
+ case HCI_AUTO_CONN_ALWAYS:
+ list_del_init(&params->action);
+ list_add(&params->action, &hdev->pend_le_conns);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
+ cp->reason, mgmt_conn);
+
+ hci_disconn_cfm(conn, cp->reason);
+
+done:
+ /* If the disconnection failed for any reason, the upper layer
+ * does not retry to disconnect in current implementation.
+ * Hence, we need to do some basic cleanup here and re-enable
+ * advertising if necessary.
+ */
+ hci_conn_del(conn);
+unlock:
hci_dev_unlock(hdev);
}
@@ -2466,7 +2831,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
{
struct hci_cp_le_create_conn *cp;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
/* All connection failure handling is taken care of by the
* hci_le_conn_failed function which is triggered by the HCI
@@ -2491,7 +2856,7 @@ static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
{
struct hci_cp_le_ext_create_conn *cp;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
/* All connection failure handling is taken care of by the
* hci_le_conn_failed function which is triggered by the HCI
@@ -2517,7 +2882,7 @@ static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
struct hci_cp_le_read_remote_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2544,7 +2909,7 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
struct hci_cp_le_start_enc *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", status);
if (!status)
return;
@@ -2592,13 +2957,14 @@ static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
hci_dev_unlock(hdev);
}
-static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- __u8 status = *((__u8 *) skb->data);
+ struct hci_ev_status *ev = data;
struct discovery_state *discov = &hdev->discovery;
struct inquiry_entry *e;
- BT_DBG("%s status 0x%2.2x", hdev->name, status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_conn_check_pending(hdev);
@@ -2634,6 +3000,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (e && hci_resolve_name(hdev, e) == 0) {
e->name_state = NAME_PENDING;
hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
+ discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
} else {
/* When BR/EDR inquiry is active and no LE scanning is in
* progress, then change discovery state to indicate completion.
@@ -2651,15 +3018,20 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
+ struct sk_buff *skb)
{
+ struct hci_ev_inquiry_result *ev = edata;
struct inquiry_data data;
- struct inquiry_info *info = (void *) (skb->data + 1);
- int num_rsp = *((__u8 *) skb->data);
+ int i;
+
+ if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
+ flex_array_size(ev, info, ev->num)))
+ return;
- BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
+ bt_dev_dbg(hdev, "num %d", ev->num);
- if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
+ if (!ev->num)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@@ -2667,7 +3039,8 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_lock(hdev);
- for (; num_rsp; num_rsp--, info++) {
+ for (i = 0; i < ev->num; i++) {
+ struct inquiry_info *info = &ev->info[i];
u32 flags;
bacpy(&data.bdaddr, &info->bdaddr);
@@ -2689,12 +3062,13 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_conn_complete *ev = (void *) skb->data;
+ struct hci_ev_conn_complete *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -2813,16 +3187,16 @@ static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
}
-static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_conn_request *ev = (void *) skb->data;
+ struct hci_ev_conn_request *ev = data;
int mask = hdev->link_mode;
struct inquiry_entry *ie;
struct hci_conn *conn;
__u8 flags = 0;
- BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
- ev->link_type);
+ bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
&flags);
@@ -2924,15 +3298,16 @@ static u8 hci_to_mgmt_reason(u8 err)
}
}
-static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_disconn_complete *ev = (void *) skb->data;
+ struct hci_ev_disconn_complete *ev = data;
u8 reason;
struct hci_conn_params *params;
struct hci_conn *conn;
bool mgmt_connected;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -2977,7 +3352,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_AUTO_CONN_ALWAYS:
list_del_init(&params->action);
list_add(&params->action, &hdev->pend_le_conns);
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
break;
default:
@@ -2987,14 +3362,6 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_disconn_cfm(conn, ev->reason);
- /* The suspend notifier is waiting for all devices to disconnect so
- * clear the bit from pending tasks and inform the wait queue.
- */
- if (list_empty(&hdev->conn_hash.list) &&
- test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
- wake_up(&hdev->suspend_wait_q);
- }
-
/* Re-enable advertising if necessary, since it might
* have been disabled by the connection. From the
* HCI_LE_Set_Advertise_Enable command description in
@@ -3007,7 +3374,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
*/
if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
hdev->cur_adv_instance = conn->adv_instance;
- hci_req_reenable_advertising(hdev);
+ hci_enable_advertising(hdev);
}
hci_conn_del(conn);
@@ -3016,12 +3383,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_auth_complete *ev = (void *) skb->data;
+ struct hci_ev_auth_complete *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -3086,12 +3454,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_remote_name *ev = (void *) skb->data;
+ struct hci_ev_remote_name *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_conn_check_pending(hdev);
@@ -3169,12 +3538,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_encrypt_change *ev = (void *) skb->data;
+ struct hci_ev_encrypt_change *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -3283,13 +3653,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
+static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
+ struct hci_ev_change_link_key_complete *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -3306,13 +3676,13 @@ static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static void hci_remote_features_evt(struct hci_dev *hdev,
+static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_remote_features *ev = (void *) skb->data;
+ struct hci_ev_remote_features *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -3370,366 +3740,227 @@ static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
}
}
-static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
- u16 *opcode, u8 *status,
+#define HCI_CC_VL(_op, _func, _min, _max) \
+{ \
+ .op = _op, \
+ .func = _func, \
+ .min_len = _min, \
+ .max_len = _max, \
+}
+
+#define HCI_CC(_op, _func, _len) \
+ HCI_CC_VL(_op, _func, _len, _len)
+
+#define HCI_CC_STATUS(_op, _func) \
+ HCI_CC(_op, _func, sizeof(struct hci_ev_status))
+
+static const struct hci_cc {
+ u16 op;
+ u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
+ u16 min_len;
+ u16 max_len;
+} hci_cc_table[] = {
+ HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
+ HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
+ HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
+ HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
+ hci_cc_remote_name_req_cancel),
+ HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
+ sizeof(struct hci_rp_role_discovery)),
+ HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
+ sizeof(struct hci_rp_read_link_policy)),
+ HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
+ sizeof(struct hci_rp_write_link_policy)),
+ HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
+ sizeof(struct hci_rp_read_def_link_policy)),
+ HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
+ hci_cc_write_def_link_policy),
+ HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
+ HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
+ sizeof(struct hci_rp_read_stored_link_key)),
+ HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
+ sizeof(struct hci_rp_delete_stored_link_key)),
+ HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
+ HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
+ sizeof(struct hci_rp_read_local_name)),
+ HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
+ HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
+ HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
+ HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
+ HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
+ sizeof(struct hci_rp_read_class_of_dev)),
+ HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
+ HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
+ sizeof(struct hci_rp_read_voice_setting)),
+ HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
+ HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
+ sizeof(struct hci_rp_read_num_supported_iac)),
+ HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
+ HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
+ HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
+ sizeof(struct hci_rp_read_auth_payload_to)),
+ HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
+ sizeof(struct hci_rp_write_auth_payload_to)),
+ HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
+ sizeof(struct hci_rp_read_local_version)),
+ HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
+ sizeof(struct hci_rp_read_local_commands)),
+ HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
+ sizeof(struct hci_rp_read_local_features)),
+ HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
+ sizeof(struct hci_rp_read_local_ext_features)),
+ HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
+ sizeof(struct hci_rp_read_buffer_size)),
+ HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
+ sizeof(struct hci_rp_read_bd_addr)),
+ HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
+ sizeof(struct hci_rp_read_local_pairing_opts)),
+ HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
+ sizeof(struct hci_rp_read_page_scan_activity)),
+ HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
+ hci_cc_write_page_scan_activity),
+ HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
+ sizeof(struct hci_rp_read_page_scan_type)),
+ HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
+ HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
+ sizeof(struct hci_rp_read_data_block_size)),
+ HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
+ sizeof(struct hci_rp_read_flow_control_mode)),
+ HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
+ sizeof(struct hci_rp_read_local_amp_info)),
+ HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
+ sizeof(struct hci_rp_read_clock)),
+ HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
+ sizeof(struct hci_rp_read_inq_rsp_tx_power)),
+ HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
+ hci_cc_read_def_err_data_reporting,
+ sizeof(struct hci_rp_read_def_err_data_reporting)),
+ HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
+ hci_cc_write_def_err_data_reporting),
+ HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
+ sizeof(struct hci_rp_pin_code_reply)),
+ HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
+ sizeof(struct hci_rp_pin_code_neg_reply)),
+ HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
+ sizeof(struct hci_rp_read_local_oob_data)),
+ HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
+ sizeof(struct hci_rp_read_local_oob_ext_data)),
+ HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
+ sizeof(struct hci_rp_le_read_buffer_size)),
+ HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
+ sizeof(struct hci_rp_le_read_local_features)),
+ HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
+ sizeof(struct hci_rp_le_read_adv_tx_power)),
+ HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
+ sizeof(struct hci_rp_user_confirm_reply)),
+ HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
+ sizeof(struct hci_rp_user_confirm_reply)),
+ HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
+ sizeof(struct hci_rp_user_confirm_reply)),
+ HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
+ sizeof(struct hci_rp_user_confirm_reply)),
+ HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
+ HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
+ HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
+ HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
+ HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
+ hci_cc_le_read_accept_list_size,
+ sizeof(struct hci_rp_le_read_accept_list_size)),
+ HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
+ HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
+ hci_cc_le_add_to_accept_list),
+ HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
+ hci_cc_le_del_from_accept_list),
+ HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
+ sizeof(struct hci_rp_le_read_supported_states)),
+ HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
+ sizeof(struct hci_rp_le_read_def_data_len)),
+ HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
+ hci_cc_le_write_def_data_len),
+ HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
+ hci_cc_le_add_to_resolv_list),
+ HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
+ hci_cc_le_del_from_resolv_list),
+ HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
+ hci_cc_le_clear_resolv_list),
+ HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
+ sizeof(struct hci_rp_le_read_resolv_list_size)),
+ HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
+ hci_cc_le_set_addr_resolution_enable),
+ HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
+ sizeof(struct hci_rp_le_read_max_data_len)),
+ HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
+ hci_cc_write_le_host_supported),
+ HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
+ HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
+ sizeof(struct hci_rp_read_rssi)),
+ HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
+ sizeof(struct hci_rp_read_tx_power)),
+ HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
+ HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
+ hci_cc_le_set_ext_scan_param),
+ HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
+ hci_cc_le_set_ext_scan_enable),
+ HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
+ HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
+ hci_cc_le_read_num_adv_sets,
+ sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
+ HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
+ sizeof(struct hci_rp_le_set_ext_adv_params)),
+ HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
+ hci_cc_le_set_ext_adv_enable),
+ HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
+ hci_cc_le_set_adv_set_random_addr),
+ HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
+ HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
+ HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
+ sizeof(struct hci_rp_le_read_transmit_power)),
+ HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode)
+};
+
+static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
+ struct sk_buff *skb)
+{
+ void *data;
+
+ if (skb->len < cc->min_len) {
+ bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
+ cc->op, skb->len, cc->min_len);
+ return HCI_ERROR_UNSPECIFIED;
+ }
+
+ /* Just warn if the length is over max_len size it still be possible to
+ * partially parse the cc so leave to callback to decide if that is
+ * acceptable.
+ */
+ if (skb->len > cc->max_len)
+ bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
+ cc->op, skb->len, cc->max_len);
+
+ data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
+ if (!data)
+ return HCI_ERROR_UNSPECIFIED;
+
+ return cc->func(hdev, data, skb);
+}
+
+static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb, u16 *opcode, u8 *status,
hci_req_complete_t *req_complete,
hci_req_complete_skb_t *req_complete_skb)
{
- struct hci_ev_cmd_complete *ev = (void *) skb->data;
+ struct hci_ev_cmd_complete *ev = data;
+ int i;
*opcode = __le16_to_cpu(ev->opcode);
- *status = skb->data[sizeof(*ev)];
-
- skb_pull(skb, sizeof(*ev));
-
- switch (*opcode) {
- case HCI_OP_INQUIRY_CANCEL:
- hci_cc_inquiry_cancel(hdev, skb, status);
- break;
-
- case HCI_OP_PERIODIC_INQ:
- hci_cc_periodic_inq(hdev, skb);
- break;
-
- case HCI_OP_EXIT_PERIODIC_INQ:
- hci_cc_exit_periodic_inq(hdev, skb);
- break;
-
- case HCI_OP_REMOTE_NAME_REQ_CANCEL:
- hci_cc_remote_name_req_cancel(hdev, skb);
- break;
-
- case HCI_OP_ROLE_DISCOVERY:
- hci_cc_role_discovery(hdev, skb);
- break;
-
- case HCI_OP_READ_LINK_POLICY:
- hci_cc_read_link_policy(hdev, skb);
- break;
-
- case HCI_OP_WRITE_LINK_POLICY:
- hci_cc_write_link_policy(hdev, skb);
- break;
-
- case HCI_OP_READ_DEF_LINK_POLICY:
- hci_cc_read_def_link_policy(hdev, skb);
- break;
-
- case HCI_OP_WRITE_DEF_LINK_POLICY:
- hci_cc_write_def_link_policy(hdev, skb);
- break;
-
- case HCI_OP_RESET:
- hci_cc_reset(hdev, skb);
- break;
-
- case HCI_OP_READ_STORED_LINK_KEY:
- hci_cc_read_stored_link_key(hdev, skb);
- break;
-
- case HCI_OP_DELETE_STORED_LINK_KEY:
- hci_cc_delete_stored_link_key(hdev, skb);
- break;
-
- case HCI_OP_WRITE_LOCAL_NAME:
- hci_cc_write_local_name(hdev, skb);
- break;
-
- case HCI_OP_READ_LOCAL_NAME:
- hci_cc_read_local_name(hdev, skb);
- break;
-
- case HCI_OP_WRITE_AUTH_ENABLE:
- hci_cc_write_auth_enable(hdev, skb);
- break;
-
- case HCI_OP_WRITE_ENCRYPT_MODE:
- hci_cc_write_encrypt_mode(hdev, skb);
- break;
-
- case HCI_OP_WRITE_SCAN_ENABLE:
- hci_cc_write_scan_enable(hdev, skb);
- break;
-
- case HCI_OP_SET_EVENT_FLT:
- hci_cc_set_event_filter(hdev, skb);
- break;
-
- case HCI_OP_READ_CLASS_OF_DEV:
- hci_cc_read_class_of_dev(hdev, skb);
- break;
-
- case HCI_OP_WRITE_CLASS_OF_DEV:
- hci_cc_write_class_of_dev(hdev, skb);
- break;
-
- case HCI_OP_READ_VOICE_SETTING:
- hci_cc_read_voice_setting(hdev, skb);
- break;
-
- case HCI_OP_WRITE_VOICE_SETTING:
- hci_cc_write_voice_setting(hdev, skb);
- break;
-
- case HCI_OP_READ_NUM_SUPPORTED_IAC:
- hci_cc_read_num_supported_iac(hdev, skb);
- break;
-
- case HCI_OP_WRITE_SSP_MODE:
- hci_cc_write_ssp_mode(hdev, skb);
- break;
-
- case HCI_OP_WRITE_SC_SUPPORT:
- hci_cc_write_sc_support(hdev, skb);
- break;
-
- case HCI_OP_READ_AUTH_PAYLOAD_TO:
- hci_cc_read_auth_payload_timeout(hdev, skb);
- break;
-
- case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
- hci_cc_write_auth_payload_timeout(hdev, skb);
- break;
-
- case HCI_OP_READ_LOCAL_VERSION:
- hci_cc_read_local_version(hdev, skb);
- break;
-
- case HCI_OP_READ_LOCAL_COMMANDS:
- hci_cc_read_local_commands(hdev, skb);
- break;
-
- case HCI_OP_READ_LOCAL_FEATURES:
- hci_cc_read_local_features(hdev, skb);
- break;
-
- case HCI_OP_READ_LOCAL_EXT_FEATURES:
- hci_cc_read_local_ext_features(hdev, skb);
- break;
-
- case HCI_OP_READ_BUFFER_SIZE:
- hci_cc_read_buffer_size(hdev, skb);
- break;
-
- case HCI_OP_READ_BD_ADDR:
- hci_cc_read_bd_addr(hdev, skb);
- break;
-
- case HCI_OP_READ_LOCAL_PAIRING_OPTS:
- hci_cc_read_local_pairing_opts(hdev, skb);
- break;
-
- case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
- hci_cc_read_page_scan_activity(hdev, skb);
- break;
-
- case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
- hci_cc_write_page_scan_activity(hdev, skb);
- break;
-
- case HCI_OP_READ_PAGE_SCAN_TYPE:
- hci_cc_read_page_scan_type(hdev, skb);
- break;
-
- case HCI_OP_WRITE_PAGE_SCAN_TYPE:
- hci_cc_write_page_scan_type(hdev, skb);
- break;
-
- case HCI_OP_READ_DATA_BLOCK_SIZE:
- hci_cc_read_data_block_size(hdev, skb);
- break;
- case HCI_OP_READ_FLOW_CONTROL_MODE:
- hci_cc_read_flow_control_mode(hdev, skb);
- break;
-
- case HCI_OP_READ_LOCAL_AMP_INFO:
- hci_cc_read_local_amp_info(hdev, skb);
- break;
-
- case HCI_OP_READ_CLOCK:
- hci_cc_read_clock(hdev, skb);
- break;
-
- case HCI_OP_READ_INQ_RSP_TX_POWER:
- hci_cc_read_inq_rsp_tx_power(hdev, skb);
- break;
-
- case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
- hci_cc_read_def_err_data_reporting(hdev, skb);
- break;
-
- case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
- hci_cc_write_def_err_data_reporting(hdev, skb);
- break;
-
- case HCI_OP_PIN_CODE_REPLY:
- hci_cc_pin_code_reply(hdev, skb);
- break;
-
- case HCI_OP_PIN_CODE_NEG_REPLY:
- hci_cc_pin_code_neg_reply(hdev, skb);
- break;
-
- case HCI_OP_READ_LOCAL_OOB_DATA:
- hci_cc_read_local_oob_data(hdev, skb);
- break;
+ bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
- case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
- hci_cc_read_local_oob_ext_data(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_BUFFER_SIZE:
- hci_cc_le_read_buffer_size(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_LOCAL_FEATURES:
- hci_cc_le_read_local_features(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_ADV_TX_POWER:
- hci_cc_le_read_adv_tx_power(hdev, skb);
- break;
-
- case HCI_OP_USER_CONFIRM_REPLY:
- hci_cc_user_confirm_reply(hdev, skb);
- break;
-
- case HCI_OP_USER_CONFIRM_NEG_REPLY:
- hci_cc_user_confirm_neg_reply(hdev, skb);
- break;
-
- case HCI_OP_USER_PASSKEY_REPLY:
- hci_cc_user_passkey_reply(hdev, skb);
- break;
-
- case HCI_OP_USER_PASSKEY_NEG_REPLY:
- hci_cc_user_passkey_neg_reply(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_RANDOM_ADDR:
- hci_cc_le_set_random_addr(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_ADV_ENABLE:
- hci_cc_le_set_adv_enable(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_SCAN_PARAM:
- hci_cc_le_set_scan_param(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_SCAN_ENABLE:
- hci_cc_le_set_scan_enable(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
- hci_cc_le_read_accept_list_size(hdev, skb);
- break;
-
- case HCI_OP_LE_CLEAR_ACCEPT_LIST:
- hci_cc_le_clear_accept_list(hdev, skb);
- break;
-
- case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
- hci_cc_le_add_to_accept_list(hdev, skb);
- break;
-
- case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
- hci_cc_le_del_from_accept_list(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_SUPPORTED_STATES:
- hci_cc_le_read_supported_states(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_DEF_DATA_LEN:
- hci_cc_le_read_def_data_len(hdev, skb);
- break;
-
- case HCI_OP_LE_WRITE_DEF_DATA_LEN:
- hci_cc_le_write_def_data_len(hdev, skb);
- break;
-
- case HCI_OP_LE_ADD_TO_RESOLV_LIST:
- hci_cc_le_add_to_resolv_list(hdev, skb);
- break;
-
- case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
- hci_cc_le_del_from_resolv_list(hdev, skb);
- break;
-
- case HCI_OP_LE_CLEAR_RESOLV_LIST:
- hci_cc_le_clear_resolv_list(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
- hci_cc_le_read_resolv_list_size(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
- hci_cc_le_set_addr_resolution_enable(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_MAX_DATA_LEN:
- hci_cc_le_read_max_data_len(hdev, skb);
- break;
-
- case HCI_OP_WRITE_LE_HOST_SUPPORTED:
- hci_cc_write_le_host_supported(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_ADV_PARAM:
- hci_cc_set_adv_param(hdev, skb);
- break;
-
- case HCI_OP_READ_RSSI:
- hci_cc_read_rssi(hdev, skb);
- break;
-
- case HCI_OP_READ_TX_POWER:
- hci_cc_read_tx_power(hdev, skb);
- break;
-
- case HCI_OP_WRITE_SSP_DEBUG_MODE:
- hci_cc_write_ssp_debug_mode(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
- hci_cc_le_set_ext_scan_param(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
- hci_cc_le_set_ext_scan_enable(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_DEFAULT_PHY:
- hci_cc_le_set_default_phy(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
- hci_cc_le_read_num_adv_sets(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_EXT_ADV_PARAMS:
- hci_cc_set_ext_adv_param(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_EXT_ADV_ENABLE:
- hci_cc_le_set_ext_adv_enable(hdev, skb);
- break;
-
- case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
- hci_cc_le_set_adv_set_random_addr(hdev, skb);
- break;
-
- case HCI_OP_LE_READ_TRANSMIT_POWER:
- hci_cc_le_read_transmit_power(hdev, skb);
- break;
-
- default:
- BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
- break;
+ for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
+ if (hci_cc_table[i].op == *opcode) {
+ *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
+ break;
+ }
}
handle_cmd_cnt_and_timer(hdev, ev->ncmd);
@@ -3747,94 +3978,56 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
queue_work(hdev->workqueue, &hdev->cmd_work);
}
-static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
- u16 *opcode, u8 *status,
+#define HCI_CS(_op, _func) \
+{ \
+ .op = _op, \
+ .func = _func, \
+}
+
+static const struct hci_cs {
+ u16 op;
+ void (*func)(struct hci_dev *hdev, __u8 status);
+} hci_cs_table[] = {
+ HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
+ HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
+ HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
+ HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
+ HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
+ HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
+ HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
+ HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
+ HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
+ hci_cs_read_remote_ext_features),
+ HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
+ HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
+ hci_cs_enhanced_setup_sync_conn),
+ HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
+ HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
+ HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
+ HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
+ HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
+ HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
+ HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn)
+};
+
+static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb, u16 *opcode, u8 *status,
hci_req_complete_t *req_complete,
hci_req_complete_skb_t *req_complete_skb)
{
- struct hci_ev_cmd_status *ev = (void *) skb->data;
-
- skb_pull(skb, sizeof(*ev));
+ struct hci_ev_cmd_status *ev = data;
+ int i;
*opcode = __le16_to_cpu(ev->opcode);
*status = ev->status;
- switch (*opcode) {
- case HCI_OP_INQUIRY:
- hci_cs_inquiry(hdev, ev->status);
- break;
-
- case HCI_OP_CREATE_CONN:
- hci_cs_create_conn(hdev, ev->status);
- break;
-
- case HCI_OP_DISCONNECT:
- hci_cs_disconnect(hdev, ev->status);
- break;
-
- case HCI_OP_ADD_SCO:
- hci_cs_add_sco(hdev, ev->status);
- break;
-
- case HCI_OP_AUTH_REQUESTED:
- hci_cs_auth_requested(hdev, ev->status);
- break;
-
- case HCI_OP_SET_CONN_ENCRYPT:
- hci_cs_set_conn_encrypt(hdev, ev->status);
- break;
-
- case HCI_OP_REMOTE_NAME_REQ:
- hci_cs_remote_name_req(hdev, ev->status);
- break;
-
- case HCI_OP_READ_REMOTE_FEATURES:
- hci_cs_read_remote_features(hdev, ev->status);
- break;
-
- case HCI_OP_READ_REMOTE_EXT_FEATURES:
- hci_cs_read_remote_ext_features(hdev, ev->status);
- break;
-
- case HCI_OP_SETUP_SYNC_CONN:
- hci_cs_setup_sync_conn(hdev, ev->status);
- break;
-
- case HCI_OP_ENHANCED_SETUP_SYNC_CONN:
- hci_cs_enhanced_setup_sync_conn(hdev, ev->status);
- break;
-
- case HCI_OP_SNIFF_MODE:
- hci_cs_sniff_mode(hdev, ev->status);
- break;
-
- case HCI_OP_EXIT_SNIFF_MODE:
- hci_cs_exit_sniff_mode(hdev, ev->status);
- break;
-
- case HCI_OP_SWITCH_ROLE:
- hci_cs_switch_role(hdev, ev->status);
- break;
-
- case HCI_OP_LE_CREATE_CONN:
- hci_cs_le_create_conn(hdev, ev->status);
- break;
-
- case HCI_OP_LE_READ_REMOTE_FEATURES:
- hci_cs_le_read_remote_features(hdev, ev->status);
- break;
-
- case HCI_OP_LE_START_ENC:
- hci_cs_le_start_enc(hdev, ev->status);
- break;
-
- case HCI_OP_LE_EXT_CREATE_CONN:
- hci_cs_le_ext_create_conn(hdev, ev->status);
- break;
+ bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
- default:
- BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
- break;
+ for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
+ if (hci_cs_table[i].op == *opcode) {
+ hci_cs_table[i].func(hdev, ev->status);
+ break;
+ }
}
handle_cmd_cnt_and_timer(hdev, ev->ncmd);
@@ -3845,36 +4038,39 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
* (since for this kind of commands there will not be a command
* complete event).
*/
- if (ev->status ||
- (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
+ if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
req_complete_skb);
-
- if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
- bt_dev_err(hdev,
- "unexpected event for opcode 0x%4.4x", *opcode);
- return;
+ if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
+ bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
+ *opcode);
+ return;
+ }
}
if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
queue_work(hdev->workqueue, &hdev->cmd_work);
}
-static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_hardware_error *ev = (void *) skb->data;
+ struct hci_ev_hardware_error *ev = data;
+
+ bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
hdev->hw_error_code = ev->code;
queue_work(hdev->req_workqueue, &hdev->error_reset);
}
-static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_role_change_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_role_change *ev = (void *) skb->data;
+ struct hci_ev_role_change *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -3891,25 +4087,24 @@ static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
+ struct hci_ev_num_comp_pkts *ev = data;
int i;
- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
- bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
+ if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
+ flex_array_size(ev, handles, ev->num)))
return;
- }
- if (skb->len < sizeof(*ev) ||
- skb->len < struct_size(ev, handles, ev->num_hndl)) {
- BT_DBG("%s bad parameters", hdev->name);
+ if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
+ bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
return;
}
- BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
+ bt_dev_dbg(hdev, "num %d", ev->num);
- for (i = 0; i < ev->num_hndl; i++) {
+ for (i = 0; i < ev->num; i++) {
struct hci_comp_pkts_info *info = &ev->handles[i];
struct hci_conn *conn;
__u16 handle, count;
@@ -3979,24 +4174,24 @@ static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
return NULL;
}
-static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
+ struct hci_ev_num_comp_blocks *ev = data;
int i;
- if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
- bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
+ if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
+ flex_array_size(ev, handles, ev->num_hndl)))
return;
- }
- if (skb->len < sizeof(*ev) ||
- skb->len < struct_size(ev, handles, ev->num_hndl)) {
- BT_DBG("%s bad parameters", hdev->name);
+ if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
+ bt_dev_err(hdev, "wrong event for mode %d",
+ hdev->flow_ctl_mode);
return;
}
- BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
- ev->num_hndl);
+ bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
+ ev->num_hndl);
for (i = 0; i < ev->num_hndl; i++) {
struct hci_comp_blocks_info *info = &ev->handles[i];
@@ -4030,12 +4225,13 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
queue_work(hdev->workqueue, &hdev->tx_work);
}
-static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_mode_change *ev = (void *) skb->data;
+ struct hci_ev_mode_change *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -4058,12 +4254,13 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_pin_code_req *ev = (void *) skb->data;
+ struct hci_ev_pin_code_req *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -4128,14 +4325,15 @@ static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
}
}
-static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_link_key_req *ev = (void *) skb->data;
+ struct hci_ev_link_key_req *ev = data;
struct hci_cp_link_key_reply cp;
struct hci_conn *conn;
struct link_key *key;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
if (!hci_dev_test_flag(hdev, HCI_MGMT))
return;
@@ -4144,13 +4342,11 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
key = hci_find_link_key(hdev, &ev->bdaddr);
if (!key) {
- BT_DBG("%s link key not found for %pMR", hdev->name,
- &ev->bdaddr);
+ bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
goto not_found;
}
- BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
- &ev->bdaddr);
+ bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn) {
@@ -4159,15 +4355,14 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
- BT_DBG("%s ignoring unauthenticated key", hdev->name);
+ bt_dev_dbg(hdev, "ignoring unauthenticated key");
goto not_found;
}
if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
(conn->pending_sec_level == BT_SECURITY_HIGH ||
conn->pending_sec_level == BT_SECURITY_FIPS)) {
- BT_DBG("%s ignoring key unauthenticated for high security",
- hdev->name);
+ bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
goto not_found;
}
@@ -4188,15 +4383,16 @@ not_found:
hci_dev_unlock(hdev);
}
-static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_link_key_notify *ev = (void *) skb->data;
+ struct hci_ev_link_key_notify *ev = data;
struct hci_conn *conn;
struct link_key *key;
bool persistent;
u8 pin_len = 0;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -4248,12 +4444,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_clock_offset *ev = (void *) skb->data;
+ struct hci_ev_clock_offset *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -4271,12 +4468,13 @@ static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_pkt_type_change *ev = (void *) skb->data;
+ struct hci_ev_pkt_type_change *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -4287,12 +4485,13 @@ static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
+ struct hci_ev_pscan_rep_mode *ev = data;
struct inquiry_entry *ie;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -4305,15 +4504,16 @@ static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
+static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
struct sk_buff *skb)
{
+ struct hci_ev_inquiry_result_rssi *ev = edata;
struct inquiry_data data;
- int num_rsp = *((__u8 *) skb->data);
+ int i;
- BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
+ bt_dev_dbg(hdev, "num_rsp %d", ev->num);
- if (!num_rsp)
+ if (!ev->num)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@@ -4321,16 +4521,22 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
- if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
- struct inquiry_info_with_rssi_and_pscan_mode *info;
- info = (void *) (skb->data + 1);
-
- if (skb->len < num_rsp * sizeof(*info) + 1)
- goto unlock;
+ if (skb->len == array_size(ev->num,
+ sizeof(struct inquiry_info_rssi_pscan))) {
+ struct inquiry_info_rssi_pscan *info;
- for (; num_rsp; num_rsp--, info++) {
+ for (i = 0; i < ev->num; i++) {
u32 flags;
+ info = hci_ev_skb_pull(hdev, skb,
+ HCI_EV_INQUIRY_RESULT_WITH_RSSI,
+ sizeof(*info));
+ if (!info) {
+ bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
+ HCI_EV_INQUIRY_RESULT_WITH_RSSI);
+ return;
+ }
+
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -4346,15 +4552,22 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
info->dev_class, info->rssi,
flags, NULL, 0, NULL, 0);
}
- } else {
- struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
+ } else if (skb->len == array_size(ev->num,
+ sizeof(struct inquiry_info_rssi))) {
+ struct inquiry_info_rssi *info;
- if (skb->len < num_rsp * sizeof(*info) + 1)
- goto unlock;
-
- for (; num_rsp; num_rsp--, info++) {
+ for (i = 0; i < ev->num; i++) {
u32 flags;
+ info = hci_ev_skb_pull(hdev, skb,
+ HCI_EV_INQUIRY_RESULT_WITH_RSSI,
+ sizeof(*info));
+ if (!info) {
+ bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
+ HCI_EV_INQUIRY_RESULT_WITH_RSSI);
+ return;
+ }
+
bacpy(&data.bdaddr, &info->bdaddr);
data.pscan_rep_mode = info->pscan_rep_mode;
data.pscan_period_mode = info->pscan_period_mode;
@@ -4370,19 +4583,21 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
info->dev_class, info->rssi,
flags, NULL, 0, NULL, 0);
}
+ } else {
+ bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
+ HCI_EV_INQUIRY_RESULT_WITH_RSSI);
}
-unlock:
hci_dev_unlock(hdev);
}
-static void hci_remote_ext_features_evt(struct hci_dev *hdev,
+static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_remote_ext_features *ev = (void *) skb->data;
+ struct hci_ev_remote_ext_features *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -4440,14 +4655,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
+static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
+ struct hci_ev_sync_conn_complete *ev = data;
struct hci_conn *conn;
- unsigned int notify_evt;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -4517,22 +4731,18 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
}
bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
-
- switch (ev->air_mode) {
- case 0x02:
- notify_evt = HCI_NOTIFY_ENABLE_SCO_CVSD;
- break;
- case 0x03:
- notify_evt = HCI_NOTIFY_ENABLE_SCO_TRANSP;
- break;
- }
-
/* Notify only in case of SCO over HCI transport data path which
* is zero and non-zero value shall be non-HCI transport data path
*/
- if (conn->codec.data_path == 0) {
- if (hdev->notify)
- hdev->notify(hdev, notify_evt);
+ if (conn->codec.data_path == 0 && hdev->notify) {
+ switch (ev->air_mode) {
+ case 0x02:
+ hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
+ break;
+ case 0x03:
+ hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
+ break;
+ }
}
hci_connect_cfm(conn, ev->status);
@@ -4560,17 +4770,21 @@ static inline size_t eir_get_length(u8 *eir, size_t eir_len)
return eir_len;
}
-static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
+static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
struct sk_buff *skb)
{
+ struct hci_ev_ext_inquiry_result *ev = edata;
struct inquiry_data data;
- struct extended_inquiry_info *info = (void *) (skb->data + 1);
- int num_rsp = *((__u8 *) skb->data);
size_t eir_len;
+ int i;
+
+ if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
+ flex_array_size(ev, info, ev->num)))
+ return;
- BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
+ bt_dev_dbg(hdev, "num %d", ev->num);
- if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
+ if (!ev->num)
return;
if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
@@ -4578,7 +4792,8 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
hci_dev_lock(hdev);
- for (; num_rsp; num_rsp--, info++) {
+ for (i = 0; i < ev->num; i++) {
+ struct extended_inquiry_info *info = &ev->info[i];
u32 flags;
bool name_known;
@@ -4610,14 +4825,14 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
+static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
+ struct hci_ev_key_refresh_complete *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
- __le16_to_cpu(ev->handle));
+ bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
+ __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
@@ -4720,12 +4935,13 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
return 0x01;
}
-static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_io_capa_request *ev = (void *) skb->data;
+ struct hci_ev_io_capa_request *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -4789,12 +5005,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_io_capa_reply *ev = (void *) skb->data;
+ struct hci_ev_io_capa_reply *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -4809,14 +5026,14 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_user_confirm_req *ev = (void *) skb->data;
+ struct hci_ev_user_confirm_req *ev = data;
int loc_mitm, rem_mitm, confirm_hint = 0;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -4837,7 +5054,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
*/
if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
- BT_DBG("Rejecting request: remote device can't provide MITM");
+ bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
goto unlock;
@@ -4856,7 +5073,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
(loc_mitm || rem_mitm)) {
- BT_DBG("Confirming auto-accept as acceptor");
+ bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
confirm_hint = 1;
goto confirm;
}
@@ -4894,24 +5111,24 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_user_passkey_request_evt(struct hci_dev *hdev,
+static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_user_passkey_req *ev = (void *) skb->data;
+ struct hci_ev_user_passkey_req *ev = data;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
if (hci_dev_test_flag(hdev, HCI_MGMT))
mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
}
-static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
+static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
+ struct hci_ev_user_passkey_notify *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
@@ -4926,12 +5143,13 @@ static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
conn->passkey_entered);
}
-static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_keypress_notify *ev = (void *) skb->data;
+ struct hci_ev_keypress_notify *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (!conn)
@@ -4964,13 +5182,13 @@ static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn->passkey_entered);
}
-static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
+static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
+ struct hci_ev_simple_pair_complete *ev = data;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -4995,14 +5213,14 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_remote_host_features_evt(struct hci_dev *hdev,
+static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_remote_host_features *ev = (void *) skb->data;
+ struct hci_ev_remote_host_features *ev = data;
struct inquiry_entry *ie;
struct hci_conn *conn;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -5017,13 +5235,13 @@ static void hci_remote_host_features_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
+static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
struct sk_buff *skb)
{
- struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
+ struct hci_ev_remote_oob_data_request *ev = edata;
struct oob_data *data;
- BT_DBG("%s", hdev->name);
+ bt_dev_dbg(hdev, "");
hci_dev_lock(hdev);
@@ -5072,14 +5290,13 @@ unlock:
}
#if IS_ENABLED(CONFIG_BT_HS)
-static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_channel_selected *ev = (void *)skb->data;
+ struct hci_ev_channel_selected *ev = data;
struct hci_conn *hcon;
- BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
-
- skb_pull(skb, sizeof(*ev));
+ bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
if (!hcon)
@@ -5088,14 +5305,14 @@ static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
amp_read_loc_assoc_final_data(hdev, hcon);
}
-static void hci_phy_link_complete_evt(struct hci_dev *hdev,
+static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_phy_link_complete *ev = (void *) skb->data;
+ struct hci_ev_phy_link_complete *ev = data;
struct hci_conn *hcon, *bredr_hcon;
- BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
- ev->status);
+ bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
+ ev->status);
hci_dev_lock(hdev);
@@ -5129,16 +5346,16 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_logical_link_complete *ev = (void *) skb->data;
+ struct hci_ev_logical_link_complete *ev = data;
struct hci_conn *hcon;
struct hci_chan *hchan;
struct amp_mgr *mgr;
- BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
- hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
- ev->status);
+ bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
+ le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
if (!hcon)
@@ -5168,14 +5385,14 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
}
-static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
+static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
+ struct hci_ev_disconn_logical_link_complete *ev = data;
struct hci_chan *hchan;
- BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
- le16_to_cpu(ev->handle), ev->status);
+ bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
+ le16_to_cpu(ev->handle), ev->status);
if (ev->status)
return;
@@ -5192,13 +5409,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
+static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
+ struct hci_ev_disconn_phy_link_complete *ev = data;
struct hci_conn *hcon;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
if (ev->status)
return;
@@ -5412,15 +5629,16 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
}
unlock:
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
hci_dev_unlock(hdev);
}
-static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_le_conn_complete *ev = (void *) skb->data;
+ struct hci_ev_le_conn_complete *ev = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
NULL, ev->role, le16_to_cpu(ev->handle),
@@ -5429,35 +5647,43 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
le16_to_cpu(ev->supervision_timeout));
}
-static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
+static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
+ struct hci_ev_le_enh_conn_complete *ev = data;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
&ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
le16_to_cpu(ev->interval),
le16_to_cpu(ev->latency),
le16_to_cpu(ev->supervision_timeout));
-
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
- hci_req_disable_address_resolution(hdev);
}
-static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
+ struct hci_evt_le_ext_adv_set_term *ev = data;
struct hci_conn *conn;
- struct adv_info *adv;
+ struct adv_info *adv, *n;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
adv = hci_find_adv_instance(hdev, ev->handle);
+ /* The Bluetooth Core 5.3 specification clearly states that this event
+ * shall not be sent when the Host disables the advertising set. So in
+ * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
+ *
+ * When the Host disables an advertising set, all cleanup is done via
+ * its command callback and not needed to be duplicated here.
+ */
+ if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
+ bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
+ return;
+ }
+
if (ev->status) {
if (!adv)
return;
@@ -5466,6 +5692,13 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
hci_remove_adv_instance(hdev, ev->handle);
mgmt_advertising_removed(NULL, hdev, ev->handle);
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
+ if (adv->enabled)
+ return;
+ }
+
+ /* We are no longer advertising, clear HCI_LE_ADV */
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
return;
}
@@ -5493,13 +5726,13 @@ static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
}
-static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
+static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
+ struct hci_ev_le_conn_update_complete *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
if (ev->status)
return;
@@ -5520,7 +5753,7 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
bdaddr_t *addr,
u8 addr_type, bool addr_resolved,
- u8 adv_type, bdaddr_t *direct_rpa)
+ u8 adv_type)
{
struct hci_conn *conn;
struct hci_conn_params *params;
@@ -5529,8 +5762,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
return NULL;
- /* Ignore if the device is blocked */
- if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
+ /* Ignore if the device is blocked or hdev is suspended */
+ if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
+ hdev->suspended)
return NULL;
/* Most controller will fail if we try to create new connections
@@ -5574,7 +5808,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
- HCI_ROLE_MASTER, direct_rpa);
+ HCI_ROLE_MASTER);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
* by higher layer that tried to connect, if no then
@@ -5697,7 +5931,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
* for advertising reports) and is already verified to be RPA above.
*/
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
- type, direct_addr);
+ type);
if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
/* Store report for later inclusion by
* mgmt_device_connected
@@ -5814,27 +6048,38 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
clear_pending_adv_report(hdev);
}
-static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- u8 num_reports = skb->data[0];
- void *ptr = &skb->data[1];
+ struct hci_ev_le_advertising_report *ev = data;
+
+ if (!ev->num)
+ return;
hci_dev_lock(hdev);
- while (num_reports--) {
- struct hci_ev_le_advertising_info *ev = ptr;
+ while (ev->num--) {
+ struct hci_ev_le_advertising_info *info;
s8 rssi;
- if (ev->length <= HCI_MAX_AD_LENGTH) {
- rssi = ev->data[ev->length];
- process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
- ev->bdaddr_type, NULL, 0, rssi,
- ev->data, ev->length, false);
+ info = hci_le_ev_skb_pull(hdev, skb,
+ HCI_EV_LE_ADVERTISING_REPORT,
+ sizeof(*info));
+ if (!info)
+ break;
+
+ if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
+ info->length + 1))
+ break;
+
+ if (info->length <= HCI_MAX_AD_LENGTH) {
+ rssi = info->data[info->length];
+ process_adv_report(hdev, info->type, &info->bdaddr,
+ info->bdaddr_type, NULL, 0, rssi,
+ info->data, info->length, false);
} else {
bt_dev_err(hdev, "Dropping invalid advertising data");
}
-
- ptr += sizeof(*ev) + ev->length + 1;
}
hci_dev_unlock(hdev);
@@ -5884,40 +6129,50 @@ invalid:
return LE_ADV_INVALID;
}
-static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- u8 num_reports = skb->data[0];
- void *ptr = &skb->data[1];
+ struct hci_ev_le_ext_adv_report *ev = data;
+
+ if (!ev->num)
+ return;
hci_dev_lock(hdev);
- while (num_reports--) {
- struct hci_ev_le_ext_adv_report *ev = ptr;
+ while (ev->num--) {
+ struct hci_ev_le_ext_adv_info *info;
u8 legacy_evt_type;
u16 evt_type;
- evt_type = __le16_to_cpu(ev->evt_type);
+ info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
+ sizeof(*info));
+ if (!info)
+ break;
+
+ if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
+ info->length))
+ break;
+
+ evt_type = __le16_to_cpu(info->type);
legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
if (legacy_evt_type != LE_ADV_INVALID) {
- process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
- ev->bdaddr_type, NULL, 0, ev->rssi,
- ev->data, ev->length,
+ process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
+ info->bdaddr_type, NULL, 0,
+ info->rssi, info->data, info->length,
!(evt_type & LE_EXT_ADV_LEGACY_PDU));
}
-
- ptr += sizeof(*ev) + ev->length;
}
hci_dev_unlock(hdev);
}
-static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
+static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
+ struct hci_ev_le_remote_feat_complete *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
hci_dev_lock(hdev);
@@ -5953,15 +6208,16 @@ static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_le_ltk_req *ev = (void *) skb->data;
+ struct hci_ev_le_ltk_req *ev = data;
struct hci_cp_le_ltk_reply cp;
struct hci_cp_le_ltk_neg_reply neg;
struct hci_conn *conn;
struct smp_ltk *ltk;
- BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
+ bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
@@ -6029,14 +6285,16 @@ static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
&cp);
}
-static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
+static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
+ struct hci_ev_le_remote_conn_param_req *ev = data;
struct hci_cp_le_conn_param_req_reply cp;
struct hci_conn *hcon;
u16 handle, min, max, latency, timeout;
+ bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
+
handle = le16_to_cpu(ev->handle);
min = le16_to_cpu(ev->interval_min);
max = le16_to_cpu(ev->interval_max);
@@ -6087,32 +6345,40 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
}
-static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
+static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
- u8 num_reports = skb->data[0];
- struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
+ struct hci_ev_le_direct_adv_report *ev = data;
+ int i;
+
+ if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
+ flex_array_size(ev, info, ev->num)))
+ return;
- if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
+ if (!ev->num)
return;
hci_dev_lock(hdev);
- for (; num_reports; num_reports--, ev++)
- process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
- ev->bdaddr_type, &ev->direct_addr,
- ev->direct_addr_type, ev->rssi, NULL, 0,
+ for (i = 0; i < ev->num; i++) {
+ struct hci_ev_le_direct_adv_info *info = &ev->info[i];
+
+ process_adv_report(hdev, info->type, &info->bdaddr,
+ info->bdaddr_type, &info->direct_addr,
+ info->direct_addr_type, info->rssi, NULL, 0,
false);
+ }
hci_dev_unlock(hdev);
}
-static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb)
{
- struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
+ struct hci_ev_le_phy_update_complete *ev = data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
+ bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
if (ev->status)
return;
@@ -6130,60 +6396,113 @@ unlock:
hci_dev_unlock(hdev);
}
-static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct hci_ev_le_meta *le_ev = (void *) skb->data;
-
- skb_pull(skb, sizeof(*le_ev));
-
- switch (le_ev->subevent) {
- case HCI_EV_LE_CONN_COMPLETE:
- hci_le_conn_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_LE_CONN_UPDATE_COMPLETE:
- hci_le_conn_update_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_LE_ADVERTISING_REPORT:
- hci_le_adv_report_evt(hdev, skb);
- break;
-
- case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
- hci_le_remote_feat_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_LE_LTK_REQ:
- hci_le_ltk_request_evt(hdev, skb);
- break;
-
- case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
- hci_le_remote_conn_param_req_evt(hdev, skb);
- break;
-
- case HCI_EV_LE_DIRECT_ADV_REPORT:
- hci_le_direct_adv_report_evt(hdev, skb);
- break;
+#define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
+[_op] = { \
+ .func = _func, \
+ .min_len = _min_len, \
+ .max_len = _max_len, \
+}
+
+#define HCI_LE_EV(_op, _func, _len) \
+ HCI_LE_EV_VL(_op, _func, _len, _len)
+
+#define HCI_LE_EV_STATUS(_op, _func) \
+ HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
+
+/* Entries in this table shall have their position according to the subevent
+ * opcode they handle so the use of the macros above is recommend since it does
+ * attempt to initialize at its proper index using Designated Initializers that
+ * way events without a callback function can be ommited.
+ */
+static const struct hci_le_ev {
+ void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
+ u16 min_len;
+ u16 max_len;
+} hci_le_ev_table[U8_MAX + 1] = {
+ /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
+ HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
+ sizeof(struct hci_ev_le_conn_complete)),
+ /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
+ HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
+ sizeof(struct hci_ev_le_advertising_report),
+ HCI_MAX_EVENT_SIZE),
+ /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
+ HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
+ hci_le_conn_update_complete_evt,
+ sizeof(struct hci_ev_le_conn_update_complete)),
+ /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
+ HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
+ hci_le_remote_feat_complete_evt,
+ sizeof(struct hci_ev_le_remote_feat_complete)),
+ /* [0x05 = HCI_EV_LE_LTK_REQ] */
+ HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
+ sizeof(struct hci_ev_le_ltk_req)),
+ /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
+ HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
+ hci_le_remote_conn_param_req_evt,
+ sizeof(struct hci_ev_le_remote_conn_param_req)),
+ /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
+ HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
+ hci_le_enh_conn_complete_evt,
+ sizeof(struct hci_ev_le_enh_conn_complete)),
+ /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
+ HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
+ sizeof(struct hci_ev_le_direct_adv_report),
+ HCI_MAX_EVENT_SIZE),
+ /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
+ HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
+ sizeof(struct hci_ev_le_phy_update_complete)),
+ /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
+ HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
+ sizeof(struct hci_ev_le_ext_adv_report),
+ HCI_MAX_EVENT_SIZE),
+ /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
+ HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
+ sizeof(struct hci_evt_le_ext_adv_set_term)),
+};
+
+static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb, u16 *opcode, u8 *status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb)
+{
+ struct hci_ev_le_meta *ev = data;
+ const struct hci_le_ev *subev;
+
+ bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
+
+ /* Only match event if command OGF is for LE */
+ if (hdev->sent_cmd &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
+ hci_skb_event(hdev->sent_cmd) == ev->subevent) {
+ *opcode = hci_skb_opcode(hdev->sent_cmd);
+ hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
+ req_complete_skb);
+ }
- case HCI_EV_LE_PHY_UPDATE_COMPLETE:
- hci_le_phy_update_evt(hdev, skb);
- break;
+ subev = &hci_le_ev_table[ev->subevent];
+ if (!subev->func)
+ return;
- case HCI_EV_LE_EXT_ADV_REPORT:
- hci_le_ext_adv_report_evt(hdev, skb);
- break;
+ if (skb->len < subev->min_len) {
+ bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
+ ev->subevent, skb->len, subev->min_len);
+ return;
+ }
- case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
- hci_le_enh_conn_complete_evt(hdev, skb);
- break;
+ /* Just warn if the length is over max_len size it still be
+ * possible to partially parse the event so leave to callback to
+ * decide if that is acceptable.
+ */
+ if (skb->len > subev->max_len)
+ bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
+ ev->subevent, skb->len, subev->max_len);
- case HCI_EV_LE_EXT_ADV_SET_TERM:
- hci_le_ext_adv_term_evt(hdev, skb);
- break;
+ data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
+ if (!data)
+ return;
- default:
- break;
- }
+ subev->func(hdev, data, skb);
}
static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
@@ -6195,13 +6514,9 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
if (!skb)
return false;
- if (skb->len < sizeof(*hdr)) {
- bt_dev_err(hdev, "too short HCI event");
+ hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
+ if (!hdr)
return false;
- }
-
- hdr = (void *) skb->data;
- skb_pull(skb, HCI_EVENT_HDR_SIZE);
if (event) {
if (hdr->evt != event)
@@ -6221,13 +6536,9 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
return false;
}
- if (skb->len < sizeof(*ev)) {
- bt_dev_err(hdev, "too short cmd_complete event");
+ ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
+ if (!ev)
return false;
- }
-
- ev = (void *) skb->data;
- skb_pull(skb, sizeof(*ev));
if (opcode != __le16_to_cpu(ev->opcode)) {
BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
@@ -6243,7 +6554,7 @@ static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
{
struct hci_ev_le_advertising_info *adv;
struct hci_ev_le_direct_adv_info *direct_adv;
- struct hci_ev_le_ext_adv_report *ext_adv;
+ struct hci_ev_le_ext_adv_info *ext_adv;
const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
const struct hci_ev_conn_request *conn_request = (void *)skb->data;
@@ -6307,25 +6618,252 @@ unlock:
hci_dev_unlock(hdev);
}
+#define HCI_EV_VL(_op, _func, _min_len, _max_len) \
+[_op] = { \
+ .req = false, \
+ .func = _func, \
+ .min_len = _min_len, \
+ .max_len = _max_len, \
+}
+
+#define HCI_EV(_op, _func, _len) \
+ HCI_EV_VL(_op, _func, _len, _len)
+
+#define HCI_EV_STATUS(_op, _func) \
+ HCI_EV(_op, _func, sizeof(struct hci_ev_status))
+
+#define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
+[_op] = { \
+ .req = true, \
+ .func_req = _func, \
+ .min_len = _min_len, \
+ .max_len = _max_len, \
+}
+
+#define HCI_EV_REQ(_op, _func, _len) \
+ HCI_EV_REQ_VL(_op, _func, _len, _len)
+
+/* Entries in this table shall have their position according to the event opcode
+ * they handle so the use of the macros above is recommend since it does attempt
+ * to initialize at its proper index using Designated Initializers that way
+ * events without a callback function don't have entered.
+ */
+static const struct hci_ev {
+ bool req;
+ union {
+ void (*func)(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb);
+ void (*func_req)(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb, u16 *opcode, u8 *status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb);
+ };
+ u16 min_len;
+ u16 max_len;
+} hci_ev_table[U8_MAX + 1] = {
+ /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
+ HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
+ /* [0x02 = HCI_EV_INQUIRY_RESULT] */
+ HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
+ sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
+ /* [0x03 = HCI_EV_CONN_COMPLETE] */
+ HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
+ sizeof(struct hci_ev_conn_complete)),
+ /* [0x04 = HCI_EV_CONN_REQUEST] */
+ HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
+ sizeof(struct hci_ev_conn_request)),
+ /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
+ HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
+ sizeof(struct hci_ev_disconn_complete)),
+ /* [0x06 = HCI_EV_AUTH_COMPLETE] */
+ HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
+ sizeof(struct hci_ev_auth_complete)),
+ /* [0x07 = HCI_EV_REMOTE_NAME] */
+ HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
+ sizeof(struct hci_ev_remote_name)),
+ /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
+ HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
+ sizeof(struct hci_ev_encrypt_change)),
+ /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
+ HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
+ hci_change_link_key_complete_evt,
+ sizeof(struct hci_ev_change_link_key_complete)),
+ /* [0x0b = HCI_EV_REMOTE_FEATURES] */
+ HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
+ sizeof(struct hci_ev_remote_features)),
+ /* [0x0e = HCI_EV_CMD_COMPLETE] */
+ HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
+ sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
+ /* [0x0f = HCI_EV_CMD_STATUS] */
+ HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
+ sizeof(struct hci_ev_cmd_status)),
+ /* [0x10 = HCI_EV_CMD_STATUS] */
+ HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
+ sizeof(struct hci_ev_hardware_error)),
+ /* [0x12 = HCI_EV_ROLE_CHANGE] */
+ HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
+ sizeof(struct hci_ev_role_change)),
+ /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
+ HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
+ sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
+ /* [0x14 = HCI_EV_MODE_CHANGE] */
+ HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
+ sizeof(struct hci_ev_mode_change)),
+ /* [0x16 = HCI_EV_PIN_CODE_REQ] */
+ HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
+ sizeof(struct hci_ev_pin_code_req)),
+ /* [0x17 = HCI_EV_LINK_KEY_REQ] */
+ HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
+ sizeof(struct hci_ev_link_key_req)),
+ /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
+ HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
+ sizeof(struct hci_ev_link_key_notify)),
+ /* [0x1c = HCI_EV_CLOCK_OFFSET] */
+ HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
+ sizeof(struct hci_ev_clock_offset)),
+ /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
+ HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
+ sizeof(struct hci_ev_pkt_type_change)),
+ /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
+ HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
+ sizeof(struct hci_ev_pscan_rep_mode)),
+ /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
+ HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
+ hci_inquiry_result_with_rssi_evt,
+ sizeof(struct hci_ev_inquiry_result_rssi),
+ HCI_MAX_EVENT_SIZE),
+ /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
+ HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
+ sizeof(struct hci_ev_remote_ext_features)),
+ /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
+ HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
+ sizeof(struct hci_ev_sync_conn_complete)),
+ /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
+ HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
+ hci_extended_inquiry_result_evt,
+ sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
+ /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
+ HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
+ sizeof(struct hci_ev_key_refresh_complete)),
+ /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
+ HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
+ sizeof(struct hci_ev_io_capa_request)),
+ /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
+ HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
+ sizeof(struct hci_ev_io_capa_reply)),
+ /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
+ HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
+ sizeof(struct hci_ev_user_confirm_req)),
+ /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
+ HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
+ sizeof(struct hci_ev_user_passkey_req)),
+ /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
+ HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
+ sizeof(struct hci_ev_remote_oob_data_request)),
+ /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
+ HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
+ sizeof(struct hci_ev_simple_pair_complete)),
+ /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
+ HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
+ sizeof(struct hci_ev_user_passkey_notify)),
+ /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
+ HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
+ sizeof(struct hci_ev_keypress_notify)),
+ /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
+ HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
+ sizeof(struct hci_ev_remote_host_features)),
+ /* [0x3e = HCI_EV_LE_META] */
+ HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
+ sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
+#if IS_ENABLED(CONFIG_BT_HS)
+ /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
+ HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
+ sizeof(struct hci_ev_phy_link_complete)),
+ /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
+ HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
+ sizeof(struct hci_ev_channel_selected)),
+ /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
+ HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
+ hci_disconn_loglink_complete_evt,
+ sizeof(struct hci_ev_disconn_logical_link_complete)),
+ /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
+ HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
+ sizeof(struct hci_ev_logical_link_complete)),
+ /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
+ HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
+ hci_disconn_phylink_complete_evt,
+ sizeof(struct hci_ev_disconn_phy_link_complete)),
+#endif
+ /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
+ HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
+ sizeof(struct hci_ev_num_comp_blocks)),
+ /* [0xff = HCI_EV_VENDOR] */
+ HCI_EV(HCI_EV_VENDOR, msft_vendor_evt, 0),
+};
+
+static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
+ u16 *opcode, u8 *status,
+ hci_req_complete_t *req_complete,
+ hci_req_complete_skb_t *req_complete_skb)
+{
+ const struct hci_ev *ev = &hci_ev_table[event];
+ void *data;
+
+ if (!ev->func)
+ return;
+
+ if (skb->len < ev->min_len) {
+ bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
+ event, skb->len, ev->min_len);
+ return;
+ }
+
+ /* Just warn if the length is over max_len size it still be
+ * possible to partially parse the event so leave to callback to
+ * decide if that is acceptable.
+ */
+ if (skb->len > ev->max_len)
+ bt_dev_warn(hdev, "unexpected event 0x%2.2x length: %u > %u",
+ event, skb->len, ev->max_len);
+
+ data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
+ if (!data)
+ return;
+
+ if (ev->req)
+ ev->func_req(hdev, data, skb, opcode, status, req_complete,
+ req_complete_skb);
+ else
+ ev->func(hdev, data, skb);
+}
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
hci_req_complete_t req_complete = NULL;
hci_req_complete_skb_t req_complete_skb = NULL;
struct sk_buff *orig_skb = NULL;
- u8 status = 0, event = hdr->evt, req_evt = 0;
+ u8 status = 0, event, req_evt = 0;
u16 opcode = HCI_OP_NOP;
+ if (skb->len < sizeof(*hdr)) {
+ bt_dev_err(hdev, "Malformed HCI Event");
+ goto done;
+ }
+
+ event = hdr->evt;
if (!event) {
- bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
+ bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
+ event);
goto done;
}
- if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
- struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
- opcode = __le16_to_cpu(cmd_hdr->opcode);
- hci_req_cmd_complete(hdev, opcode, status, &req_complete,
- &req_complete_skb);
+ /* Only match event if command OGF is not for LE */
+ if (hdev->sent_cmd &&
+ hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
+ hci_skb_event(hdev->sent_cmd) == event) {
+ hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
+ status, &req_complete, &req_complete_skb);
req_evt = event;
}
@@ -6343,191 +6881,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
/* Store wake reason if we're suspended */
hci_store_wake_reason(hdev, event, skb);
- switch (event) {
- case HCI_EV_INQUIRY_COMPLETE:
- hci_inquiry_complete_evt(hdev, skb);
- break;
+ bt_dev_dbg(hdev, "event 0x%2.2x", event);
- case HCI_EV_INQUIRY_RESULT:
- hci_inquiry_result_evt(hdev, skb);
- break;
-
- case HCI_EV_CONN_COMPLETE:
- hci_conn_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_CONN_REQUEST:
- hci_conn_request_evt(hdev, skb);
- break;
-
- case HCI_EV_DISCONN_COMPLETE:
- hci_disconn_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_AUTH_COMPLETE:
- hci_auth_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_REMOTE_NAME:
- hci_remote_name_evt(hdev, skb);
- break;
-
- case HCI_EV_ENCRYPT_CHANGE:
- hci_encrypt_change_evt(hdev, skb);
- break;
-
- case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
- hci_change_link_key_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_REMOTE_FEATURES:
- hci_remote_features_evt(hdev, skb);
- break;
-
- case HCI_EV_CMD_COMPLETE:
- hci_cmd_complete_evt(hdev, skb, &opcode, &status,
- &req_complete, &req_complete_skb);
- break;
-
- case HCI_EV_CMD_STATUS:
- hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
- &req_complete_skb);
- break;
-
- case HCI_EV_HARDWARE_ERROR:
- hci_hardware_error_evt(hdev, skb);
- break;
-
- case HCI_EV_ROLE_CHANGE:
- hci_role_change_evt(hdev, skb);
- break;
-
- case HCI_EV_NUM_COMP_PKTS:
- hci_num_comp_pkts_evt(hdev, skb);
- break;
-
- case HCI_EV_MODE_CHANGE:
- hci_mode_change_evt(hdev, skb);
- break;
-
- case HCI_EV_PIN_CODE_REQ:
- hci_pin_code_request_evt(hdev, skb);
- break;
-
- case HCI_EV_LINK_KEY_REQ:
- hci_link_key_request_evt(hdev, skb);
- break;
-
- case HCI_EV_LINK_KEY_NOTIFY:
- hci_link_key_notify_evt(hdev, skb);
- break;
-
- case HCI_EV_CLOCK_OFFSET:
- hci_clock_offset_evt(hdev, skb);
- break;
-
- case HCI_EV_PKT_TYPE_CHANGE:
- hci_pkt_type_change_evt(hdev, skb);
- break;
-
- case HCI_EV_PSCAN_REP_MODE:
- hci_pscan_rep_mode_evt(hdev, skb);
- break;
-
- case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
- hci_inquiry_result_with_rssi_evt(hdev, skb);
- break;
-
- case HCI_EV_REMOTE_EXT_FEATURES:
- hci_remote_ext_features_evt(hdev, skb);
- break;
-
- case HCI_EV_SYNC_CONN_COMPLETE:
- hci_sync_conn_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_EXTENDED_INQUIRY_RESULT:
- hci_extended_inquiry_result_evt(hdev, skb);
- break;
-
- case HCI_EV_KEY_REFRESH_COMPLETE:
- hci_key_refresh_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_IO_CAPA_REQUEST:
- hci_io_capa_request_evt(hdev, skb);
- break;
-
- case HCI_EV_IO_CAPA_REPLY:
- hci_io_capa_reply_evt(hdev, skb);
- break;
-
- case HCI_EV_USER_CONFIRM_REQUEST:
- hci_user_confirm_request_evt(hdev, skb);
- break;
-
- case HCI_EV_USER_PASSKEY_REQUEST:
- hci_user_passkey_request_evt(hdev, skb);
- break;
-
- case HCI_EV_USER_PASSKEY_NOTIFY:
- hci_user_passkey_notify_evt(hdev, skb);
- break;
-
- case HCI_EV_KEYPRESS_NOTIFY:
- hci_keypress_notify_evt(hdev, skb);
- break;
-
- case HCI_EV_SIMPLE_PAIR_COMPLETE:
- hci_simple_pair_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_REMOTE_HOST_FEATURES:
- hci_remote_host_features_evt(hdev, skb);
- break;
-
- case HCI_EV_LE_META:
- hci_le_meta_evt(hdev, skb);
- break;
-
- case HCI_EV_REMOTE_OOB_DATA_REQUEST:
- hci_remote_oob_data_request_evt(hdev, skb);
- break;
-
-#if IS_ENABLED(CONFIG_BT_HS)
- case HCI_EV_CHANNEL_SELECTED:
- hci_chan_selected_evt(hdev, skb);
- break;
-
- case HCI_EV_PHY_LINK_COMPLETE:
- hci_phy_link_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_LOGICAL_LINK_COMPLETE:
- hci_loglink_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
- hci_disconn_loglink_complete_evt(hdev, skb);
- break;
-
- case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
- hci_disconn_phylink_complete_evt(hdev, skb);
- break;
-#endif
-
- case HCI_EV_NUM_COMP_BLOCKS:
- hci_num_comp_blocks_evt(hdev, skb);
- break;
-
- case HCI_EV_VENDOR:
- msft_vendor_evt(hdev, skb);
- break;
-
- default:
- BT_DBG("%s event 0x%2.2x", hdev->name, event);
- break;
- }
+ hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
+ &req_complete_skb);
if (req_complete) {
req_complete(hdev, status, opcode);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index 92611bfc0b9e..42c8047a9897 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -32,10 +32,6 @@
#include "msft.h"
#include "eir.h"
-#define HCI_REQ_DONE 0
-#define HCI_REQ_PEND 1
-#define HCI_REQ_CANCELED 2
-
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
{
skb_queue_head_init(&req->cmd_q);
@@ -101,8 +97,8 @@ int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
return req_run(req, NULL, complete);
}
-static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
- struct sk_buff *skb)
+void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ struct sk_buff *skb)
{
bt_dev_dbg(hdev, "result 0x%2.2x", result);
@@ -115,81 +111,6 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
}
}
-void hci_req_sync_cancel(struct hci_dev *hdev, int err)
-{
- bt_dev_dbg(hdev, "err 0x%2.2x", err);
-
- if (hdev->req_status == HCI_REQ_PEND) {
- hdev->req_result = err;
- hdev->req_status = HCI_REQ_CANCELED;
- wake_up_interruptible(&hdev->req_wait_q);
- }
-}
-
-struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u8 event, u32 timeout)
-{
- struct hci_request req;
- struct sk_buff *skb;
- int err = 0;
-
- bt_dev_dbg(hdev, "");
-
- hci_req_init(&req, hdev);
-
- hci_req_add_ev(&req, opcode, plen, param, event);
-
- hdev->req_status = HCI_REQ_PEND;
-
- err = hci_req_run_skb(&req, hci_req_sync_complete);
- if (err < 0)
- return ERR_PTR(err);
-
- err = wait_event_interruptible_timeout(hdev->req_wait_q,
- hdev->req_status != HCI_REQ_PEND, timeout);
-
- if (err == -ERESTARTSYS)
- return ERR_PTR(-EINTR);
-
- switch (hdev->req_status) {
- case HCI_REQ_DONE:
- err = -bt_to_errno(hdev->req_result);
- break;
-
- case HCI_REQ_CANCELED:
- err = -hdev->req_result;
- break;
-
- default:
- err = -ETIMEDOUT;
- break;
- }
-
- hdev->req_status = hdev->req_result = 0;
- skb = hdev->req_skb;
- hdev->req_skb = NULL;
-
- bt_dev_dbg(hdev, "end: err %d", err);
-
- if (err < 0) {
- kfree_skb(skb);
- return ERR_PTR(err);
- }
-
- if (!skb)
- return ERR_PTR(-ENODATA);
-
- return skb;
-}
-EXPORT_SYMBOL(__hci_cmd_sync_ev);
-
-struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
- const void *param, u32 timeout)
-{
- return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
-}
-EXPORT_SYMBOL(__hci_cmd_sync);
-
/* Execute request and wait for completion. */
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt),
@@ -436,82 +357,6 @@ static bool __hci_update_interleaved_scan(struct hci_dev *hdev)
return false;
}
-/* This function controls the background scanning based on hdev->pend_le_conns
- * list. If there are pending LE connection we start the background scanning,
- * otherwise we stop it.
- *
- * This function requires the caller holds hdev->lock.
- */
-static void __hci_update_background_scan(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
-
- if (!test_bit(HCI_UP, &hdev->flags) ||
- test_bit(HCI_INIT, &hdev->flags) ||
- hci_dev_test_flag(hdev, HCI_SETUP) ||
- hci_dev_test_flag(hdev, HCI_CONFIG) ||
- hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
- hci_dev_test_flag(hdev, HCI_UNREGISTER))
- return;
-
- /* No point in doing scanning if LE support hasn't been enabled */
- if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
- return;
-
- /* If discovery is active don't interfere with it */
- if (hdev->discovery.state != DISCOVERY_STOPPED)
- return;
-
- /* Reset RSSI and UUID filters when starting background scanning
- * since these filters are meant for service discovery only.
- *
- * The Start Discovery and Start Service Discovery operations
- * ensure to set proper values for RSSI threshold and UUID
- * filter list. So it is safe to just reset them here.
- */
- hci_discovery_filter_clear(hdev);
-
- bt_dev_dbg(hdev, "ADV monitoring is %s",
- hci_is_adv_monitoring(hdev) ? "on" : "off");
-
- if (list_empty(&hdev->pend_le_conns) &&
- list_empty(&hdev->pend_le_reports) &&
- !hci_is_adv_monitoring(hdev)) {
- /* If there is no pending LE connections or devices
- * to be scanned for or no ADV monitors, we should stop the
- * background scanning.
- */
-
- /* If controller is not scanning we are done. */
- if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
- return;
-
- hci_req_add_le_scan_disable(req, false);
-
- bt_dev_dbg(hdev, "stopping background scanning");
- } else {
- /* If there is at least one pending LE connection, we should
- * keep the background scan running.
- */
-
- /* If controller is connecting, we should not start scanning
- * since some controllers are not able to scan and connect at
- * the same time.
- */
- if (hci_lookup_le_connect(hdev))
- return;
-
- /* If controller is currently scanning, we stop it to ensure we
- * don't miss any advertising (due to duplicates filter).
- */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
- hci_req_add_le_scan_disable(req, false);
-
- hci_req_add_le_passive_scan(req);
- bt_dev_dbg(hdev, "starting background scanning");
- }
-}
-
void __hci_req_update_name(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
@@ -560,9 +405,6 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
return;
}
- if (hdev->suspended)
- set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
-
if (use_ext_scan(hdev)) {
struct hci_cp_le_set_ext_scan_enable cp;
@@ -579,9 +421,7 @@ void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)
}
/* Disable address resolution */
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
+ if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) {
__u8 enable = 0x00;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
@@ -600,8 +440,7 @@ static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp);
- if (use_ll_privacy(req->hdev) &&
- hci_dev_test_flag(req->hdev, HCI_ENABLE_LL_PRIVACY)) {
+ if (use_ll_privacy(req->hdev)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type);
@@ -642,8 +481,8 @@ static int add_to_accept_list(struct hci_request *req,
}
/* During suspend, only wakeable devices can be in accept list */
- if (hdev->suspended && !hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
- params->current_flags))
+ if (hdev->suspended &&
+ !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
return 0;
*num_entries += 1;
@@ -654,8 +493,7 @@ static int add_to_accept_list(struct hci_request *req,
cp.bdaddr_type);
hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp);
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) {
+ if (use_ll_privacy(hdev)) {
struct smp_irk *irk;
irk = hci_find_irk_by_addr(hdev, &params->addr,
@@ -694,8 +532,7 @@ static u8 update_accept_list(struct hci_request *req)
*/
bool allow_rpa = hdev->suspended;
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
+ if (use_ll_privacy(hdev))
allow_rpa = true;
/* Go through the current accept list programmed into the
@@ -784,9 +621,7 @@ static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
return;
}
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
- addr_resolv) {
+ if (use_ll_privacy(hdev) && addr_resolv) {
u8 enable = 0x01;
hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable);
@@ -943,8 +778,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
if (hdev->suspended) {
window = hdev->le_scan_window_suspend;
interval = hdev->le_scan_int_suspend;
-
- set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
} else if (hci_is_le_conn_scanning(hdev)) {
window = hdev->le_scan_window_connect;
interval = hdev->le_scan_int_connect;
@@ -977,59 +810,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
addr_resolv);
}
-static void hci_req_clear_event_filter(struct hci_request *req)
-{
- struct hci_cp_set_event_filter f;
-
- if (!hci_dev_test_flag(req->hdev, HCI_BREDR_ENABLED))
- return;
-
- if (hci_dev_test_flag(req->hdev, HCI_EVENT_FILTER_CONFIGURED)) {
- memset(&f, 0, sizeof(f));
- f.flt_type = HCI_FLT_CLEAR_ALL;
- hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
- }
-}
-
-static void hci_req_set_event_filter(struct hci_request *req)
-{
- struct bdaddr_list_with_flags *b;
- struct hci_cp_set_event_filter f;
- struct hci_dev *hdev = req->hdev;
- u8 scan = SCAN_DISABLED;
- bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
-
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- return;
-
- /* Always clear event filter when starting */
- hci_req_clear_event_filter(req);
-
- list_for_each_entry(b, &hdev->accept_list, list) {
- if (!hci_conn_test_flag(HCI_CONN_FLAG_REMOTE_WAKEUP,
- b->current_flags))
- continue;
-
- memset(&f, 0, sizeof(f));
- bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
- f.flt_type = HCI_FLT_CONN_SETUP;
- f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
- f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
-
- bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
- hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
- scan = SCAN_PAGE;
- }
-
- if (scan && !scanning) {
- set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- } else if (!scan && scanning) {
- set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
- hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
- }
-}
-
static void cancel_adv_timeout(struct hci_dev *hdev)
{
if (hdev->adv_instance_timeout) {
@@ -1038,235 +818,6 @@ static void cancel_adv_timeout(struct hci_dev *hdev)
}
}
-/* This function requires the caller holds hdev->lock */
-void __hci_req_pause_adv_instances(struct hci_request *req)
-{
- bt_dev_dbg(req->hdev, "Pausing advertising instances");
-
- /* Call to disable any advertisements active on the controller.
- * This will succeed even if no advertisements are configured.
- */
- __hci_req_disable_advertising(req);
-
- /* If we are using software rotation, pause the loop */
- if (!ext_adv_capable(req->hdev))
- cancel_adv_timeout(req->hdev);
-}
-
-/* This function requires the caller holds hdev->lock */
-static void __hci_req_resume_adv_instances(struct hci_request *req)
-{
- struct adv_info *adv;
-
- bt_dev_dbg(req->hdev, "Resuming advertising instances");
-
- if (ext_adv_capable(req->hdev)) {
- /* Call for each tracked instance to be re-enabled */
- list_for_each_entry(adv, &req->hdev->adv_instances, list) {
- __hci_req_enable_ext_advertising(req,
- adv->instance);
- }
-
- } else {
- /* Schedule for most recent instance to be restarted and begin
- * the software rotation loop
- */
- __hci_req_schedule_adv_instance(req,
- req->hdev->cur_adv_instance,
- true);
- }
-}
-
-/* This function requires the caller holds hdev->lock */
-int hci_req_resume_adv_instances(struct hci_dev *hdev)
-{
- struct hci_request req;
-
- hci_req_init(&req, hdev);
- __hci_req_resume_adv_instances(&req);
-
- return hci_req_run(&req, NULL);
-}
-
-static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
- status);
- if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
- test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
- clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
- clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
- wake_up(&hdev->suspend_wait_q);
- }
-
- if (test_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks)) {
- clear_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
- wake_up(&hdev->suspend_wait_q);
- }
-}
-
-static void hci_req_prepare_adv_monitor_suspend(struct hci_request *req,
- bool suspending)
-{
- struct hci_dev *hdev = req->hdev;
-
- switch (hci_get_adv_monitor_offload_ext(hdev)) {
- case HCI_ADV_MONITOR_EXT_MSFT:
- if (suspending)
- msft_suspend(hdev);
- else
- msft_resume(hdev);
- break;
- default:
- return;
- }
-
- /* No need to block when enabling since it's on resume path */
- if (hdev->suspended && suspending)
- set_bit(SUSPEND_SET_ADV_FILTER, hdev->suspend_tasks);
-}
-
-/* Call with hci_dev_lock */
-void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
-{
- int old_state;
- struct hci_conn *conn;
- struct hci_request req;
- u8 page_scan;
- int disconnect_counter;
-
- if (next == hdev->suspend_state) {
- bt_dev_dbg(hdev, "Same state before and after: %d", next);
- goto done;
- }
-
- hdev->suspend_state = next;
- hci_req_init(&req, hdev);
-
- if (next == BT_SUSPEND_DISCONNECT) {
- /* Mark device as suspended */
- hdev->suspended = true;
-
- /* Pause discovery if not already stopped */
- old_state = hdev->discovery.state;
- if (old_state != DISCOVERY_STOPPED) {
- set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- }
-
- hdev->discovery_paused = true;
- hdev->discovery_old_state = old_state;
-
- /* Stop directed advertising */
- old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
- if (old_state) {
- set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
- cancel_delayed_work(&hdev->discov_off);
- queue_delayed_work(hdev->req_workqueue,
- &hdev->discov_off, 0);
- }
-
- /* Pause other advertisements */
- if (hdev->adv_instance_cnt)
- __hci_req_pause_adv_instances(&req);
-
- hdev->advertising_paused = true;
- hdev->advertising_old_state = old_state;
-
- /* Disable page scan if enabled */
- if (test_bit(HCI_PSCAN, &hdev->flags)) {
- page_scan = SCAN_DISABLED;
- hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1,
- &page_scan);
- set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
- }
-
- /* Disable LE passive scan if enabled */
- if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
- cancel_interleave_scan(hdev);
- hci_req_add_le_scan_disable(&req, false);
- }
-
- /* Disable advertisement filters */
- hci_req_prepare_adv_monitor_suspend(&req, true);
-
- /* Prevent disconnects from causing scanning to be re-enabled */
- hdev->scanning_paused = true;
-
- /* Run commands before disconnecting */
- hci_req_run(&req, suspend_req_complete);
-
- disconnect_counter = 0;
- /* Soft disconnect everything (power off) */
- list_for_each_entry(conn, &hdev->conn_hash.list, list) {
- hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
- disconnect_counter++;
- }
-
- if (disconnect_counter > 0) {
- bt_dev_dbg(hdev,
- "Had %d disconnects. Will wait on them",
- disconnect_counter);
- set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
- }
- } else if (next == BT_SUSPEND_CONFIGURE_WAKE) {
- /* Unpause to take care of updating scanning params */
- hdev->scanning_paused = false;
- /* Enable event filter for paired devices */
- hci_req_set_event_filter(&req);
- /* Enable passive scan at lower duty cycle */
- __hci_update_background_scan(&req);
- /* Pause scan changes again. */
- hdev->scanning_paused = true;
- hci_req_run(&req, suspend_req_complete);
- } else {
- hdev->suspended = false;
- hdev->scanning_paused = false;
-
- /* Clear any event filters and restore scan state */
- hci_req_clear_event_filter(&req);
- __hci_req_update_scan(&req);
-
- /* Reset passive/background scanning to normal */
- __hci_update_background_scan(&req);
- /* Enable all of the advertisement filters */
- hci_req_prepare_adv_monitor_suspend(&req, false);
-
- /* Unpause directed advertising */
- hdev->advertising_paused = false;
- if (hdev->advertising_old_state) {
- set_bit(SUSPEND_UNPAUSE_ADVERTISING,
- hdev->suspend_tasks);
- hci_dev_set_flag(hdev, HCI_ADVERTISING);
- queue_work(hdev->req_workqueue,
- &hdev->discoverable_update);
- hdev->advertising_old_state = 0;
- }
-
- /* Resume other advertisements */
- if (hdev->adv_instance_cnt)
- __hci_req_resume_adv_instances(&req);
-
- /* Unpause discovery */
- hdev->discovery_paused = false;
- if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
- hdev->discovery_old_state != DISCOVERY_STOPPING) {
- set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
- hci_discovery_set_state(hdev, DISCOVERY_STARTING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- }
-
- hci_req_run(&req, suspend_req_complete);
- }
-
- hdev->suspend_state = next;
-
-done:
- clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
- wake_up(&hdev->suspend_wait_q);
-}
-
static bool adv_cur_instance_is_scannable(struct hci_dev *hdev)
{
return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance);
@@ -1548,8 +1099,7 @@ void hci_req_disable_address_resolution(struct hci_dev *hdev)
struct hci_request req;
__u8 enable = 0x00;
- if (!use_ll_privacy(hdev) &&
- !hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
+ if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
return;
hci_req_init(&req, hdev);
@@ -1692,8 +1242,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
+ if (use_ll_privacy(hdev))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
@@ -1871,7 +1420,8 @@ int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
- if (own_addr_type == ADDR_LE_DEV_RANDOM &&
+ if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
+ own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
bacmp(&random_addr, BDADDR_ANY)) {
struct hci_cp_le_set_adv_set_rand_addr cp;
@@ -2160,8 +1710,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
/* If Controller supports LL Privacy use own address type is
* 0x03
*/
- if (use_ll_privacy(hdev) &&
- hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
+ if (use_ll_privacy(hdev))
*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
else
*own_addr_type = ADDR_LE_DEV_RANDOM;
@@ -2301,47 +1850,6 @@ static void scan_update_work(struct work_struct *work)
hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
}
-static int connectable_update(struct hci_request *req, unsigned long opt)
-{
- struct hci_dev *hdev = req->hdev;
-
- hci_dev_lock(hdev);
-
- __hci_req_update_scan(req);
-
- /* If BR/EDR is not enabled and we disable advertising as a
- * by-product of disabling connectable, we need to update the
- * advertising flags.
- */
- if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
- __hci_req_update_adv_data(req, hdev->cur_adv_instance);
-
- /* Update the advertising parameters if necessary */
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
- !list_empty(&hdev->adv_instances)) {
- if (ext_adv_capable(hdev))
- __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
- else
- __hci_req_enable_advertising(req);
- }
-
- __hci_update_background_scan(req);
-
- hci_dev_unlock(hdev);
-
- return 0;
-}
-
-static void connectable_update_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- connectable_update);
- u8 status;
-
- hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
- mgmt_set_connectable_complete(hdev, status);
-}
-
static u8 get_service_classes(struct hci_dev *hdev)
{
struct bt_uuid *uuid;
@@ -2445,16 +1953,6 @@ static int discoverable_update(struct hci_request *req, unsigned long opt)
return 0;
}
-static void discoverable_update_work(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- discoverable_update);
- u8 status;
-
- hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
- mgmt_set_discoverable_complete(hdev, status);
-}
-
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason)
{
@@ -2548,35 +2046,6 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason)
return 0;
}
-static int update_bg_scan(struct hci_request *req, unsigned long opt)
-{
- hci_dev_lock(req->hdev);
- __hci_update_background_scan(req);
- hci_dev_unlock(req->hdev);
- return 0;
-}
-
-static void bg_scan_update(struct work_struct *work)
-{
- struct hci_dev *hdev = container_of(work, struct hci_dev,
- bg_scan_update);
- struct hci_conn *conn;
- u8 status;
- int err;
-
- err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
- if (!err)
- return;
-
- hci_dev_lock(hdev);
-
- conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
- if (conn)
- hci_le_conn_failed(conn, status);
-
- hci_dev_unlock(hdev);
-}
-
static int le_scan_disable(struct hci_request *req, unsigned long opt)
{
hci_req_add_le_scan_disable(req, false);
@@ -3163,10 +2632,7 @@ int __hci_req_hci_power_on(struct hci_dev *hdev)
void hci_request_setup(struct hci_dev *hdev)
{
INIT_WORK(&hdev->discov_update, discov_update);
- INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
INIT_WORK(&hdev->scan_update, scan_update_work);
- INIT_WORK(&hdev->connectable_update, connectable_update_work);
- INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
@@ -3176,13 +2642,10 @@ void hci_request_setup(struct hci_dev *hdev)
void hci_request_cancel_all(struct hci_dev *hdev)
{
- hci_req_sync_cancel(hdev, ENODEV);
+ __hci_cmd_sync_cancel(hdev, ENODEV);
cancel_work_sync(&hdev->discov_update);
- cancel_work_sync(&hdev->bg_scan_update);
cancel_work_sync(&hdev->scan_update);
- cancel_work_sync(&hdev->connectable_update);
- cancel_work_sync(&hdev->discoverable_update);
cancel_delayed_work_sync(&hdev->discov_off);
cancel_delayed_work_sync(&hdev->le_scan_disable);
cancel_delayed_work_sync(&hdev->le_scan_restart);
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index f31420f58525..7f8df258e295 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -22,9 +22,17 @@
#include <asm/unaligned.h>
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock)
#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock)
+#define HCI_REQ_DONE 0
+#define HCI_REQ_PEND 1
+#define HCI_REQ_CANCELED 2
+
struct hci_request {
struct hci_dev *hdev;
struct sk_buff_head cmd_q;
@@ -40,6 +48,8 @@ void hci_req_purge(struct hci_request *req);
bool hci_req_status_pend(struct hci_dev *hdev);
int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
+void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ struct sk_buff *skb);
void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
const void *param);
void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
@@ -54,7 +64,6 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
unsigned long opt),
unsigned long opt, u32 timeout, u8 *hci_status);
-void hci_req_sync_cancel(struct hci_dev *hdev, int err);
struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
const void *param);
@@ -71,8 +80,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req);
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
void hci_req_disable_address_resolution(struct hci_dev *hdev);
-void __hci_req_pause_adv_instances(struct hci_request *req);
-int hci_req_resume_adv_instances(struct hci_dev *hdev);
void hci_req_reenable_advertising(struct hci_dev *hdev);
void __hci_req_enable_advertising(struct hci_request *req);
void __hci_req_disable_advertising(struct hci_request *req);
@@ -117,10 +124,5 @@ int hci_abort_conn(struct hci_conn *conn, u8 reason);
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
u8 reason);
-static inline void hci_update_background_scan(struct hci_dev *hdev)
-{
- queue_work(hdev->req_workqueue, &hdev->bg_scan_update);
-}
-
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index d0dad1fafe07..33b3c0ffc339 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -889,10 +889,6 @@ static int hci_sock_release(struct socket *sock)
}
sock_orphan(sk);
-
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-
release_sock(sk);
sock_put(sk);
return 0;
@@ -1915,7 +1911,8 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
sockptr_t optval, unsigned int len)
{
struct sock *sk = sock->sk;
- int err = 0, opt = 0;
+ int err = 0;
+ u16 opt;
BT_DBG("sk %p, opt %d", sk, optname);
@@ -1941,7 +1938,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
goto done;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u16))) {
+ if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
err = -EFAULT;
break;
}
@@ -2058,6 +2055,12 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
return err;
}
+static void hci_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+}
+
static const struct proto_ops hci_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
@@ -2111,6 +2114,7 @@ static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
sock->state = SS_UNCONNECTED;
sk->sk_state = BT_OPEN;
+ sk->sk_destruct = hci_sock_destruct;
bt_sock_link(&hci_sk_list, sk);
return 0;
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
new file mode 100644
index 000000000000..0feb68f12545
--- /dev/null
+++ b/net/bluetooth/hci_sync.c
@@ -0,0 +1,5281 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BlueZ - Bluetooth protocol stack for Linux
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/property.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/mgmt.h>
+
+#include "hci_request.h"
+#include "hci_debugfs.h"
+#include "smp.h"
+#include "eir.h"
+#include "msft.h"
+#include "aosp.h"
+#include "leds.h"
+
+static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
+ struct sk_buff *skb)
+{
+ bt_dev_dbg(hdev, "result 0x%2.2x", result);
+
+ if (hdev->req_status != HCI_REQ_PEND)
+ return;
+
+ hdev->req_result = result;
+ hdev->req_status = HCI_REQ_DONE;
+
+ if (skb) {
+ struct sock *sk = hci_skb_sk(skb);
+
+ /* Drop sk reference if set */
+ if (sk)
+ sock_put(sk);
+
+ hdev->req_skb = skb_get(skb);
+ }
+
+ wake_up_interruptible(&hdev->req_wait_q);
+}
+
+static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode,
+ u32 plen, const void *param,
+ struct sock *sk)
+{
+ int len = HCI_COMMAND_HDR_SIZE + plen;
+ struct hci_command_hdr *hdr;
+ struct sk_buff *skb;
+
+ skb = bt_skb_alloc(len, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
+ hdr->opcode = cpu_to_le16(opcode);
+ hdr->plen = plen;
+
+ if (plen)
+ skb_put_data(skb, param, plen);
+
+ bt_dev_dbg(hdev, "skb len %d", skb->len);
+
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
+ hci_skb_opcode(skb) = opcode;
+
+ /* Grab a reference if command needs to be associated with a sock (e.g.
+ * likely mgmt socket that initiated the command).
+ */
+ if (sk) {
+ hci_skb_sk(skb) = sk;
+ sock_hold(sk);
+ }
+
+ return skb;
+}
+
+static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
+ const void *param, u8 event, struct sock *sk)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+
+ bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
+
+ /* If an error occurred during request building, there is no point in
+ * queueing the HCI command. We can simply return.
+ */
+ if (req->err)
+ return;
+
+ skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
+ if (!skb) {
+ bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
+ opcode);
+ req->err = -ENOMEM;
+ return;
+ }
+
+ if (skb_queue_empty(&req->cmd_q))
+ bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
+
+ hci_skb_event(skb) = event;
+
+ skb_queue_tail(&req->cmd_q, skb);
+}
+
+static int hci_cmd_sync_run(struct hci_request *req)
+{
+ struct hci_dev *hdev = req->hdev;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
+
+ /* If an error occurred during request building, remove all HCI
+ * commands queued on the HCI request queue.
+ */
+ if (req->err) {
+ skb_queue_purge(&req->cmd_q);
+ return req->err;
+ }
+
+ /* Do not allow empty requests */
+ if (skb_queue_empty(&req->cmd_q))
+ return -ENODATA;
+
+ skb = skb_peek_tail(&req->cmd_q);
+ bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
+ bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
+
+ spin_lock_irqsave(&hdev->cmd_q.lock, flags);
+ skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
+ spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
+
+ queue_work(hdev->workqueue, &hdev->cmd_work);
+
+ return 0;
+}
+
+/* This function requires the caller holds hdev->req_lock. */
+struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk)
+{
+ struct hci_request req;
+ struct sk_buff *skb;
+ int err = 0;
+
+ bt_dev_dbg(hdev, "Opcode 0x%4x", opcode);
+
+ hci_req_init(&req, hdev);
+
+ hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
+
+ hdev->req_status = HCI_REQ_PEND;
+
+ err = hci_cmd_sync_run(&req);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ err = wait_event_interruptible_timeout(hdev->req_wait_q,
+ hdev->req_status != HCI_REQ_PEND,
+ timeout);
+
+ if (err == -ERESTARTSYS)
+ return ERR_PTR(-EINTR);
+
+ switch (hdev->req_status) {
+ case HCI_REQ_DONE:
+ err = -bt_to_errno(hdev->req_result);
+ break;
+
+ case HCI_REQ_CANCELED:
+ err = -hdev->req_result;
+ break;
+
+ default:
+ err = -ETIMEDOUT;
+ break;
+ }
+
+ hdev->req_status = 0;
+ hdev->req_result = 0;
+ skb = hdev->req_skb;
+ hdev->req_skb = NULL;
+
+ bt_dev_dbg(hdev, "end: err %d", err);
+
+ if (err < 0) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(__hci_cmd_sync_sk);
+
+/* This function requires the caller holds hdev->req_lock. */
+struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
+}
+EXPORT_SYMBOL(__hci_cmd_sync);
+
+/* Send HCI command and wait for command complete event */
+struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ struct sk_buff *skb;
+
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return ERR_PTR(-ENETDOWN);
+
+ bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
+
+ hci_req_sync_lock(hdev);
+ skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
+ hci_req_sync_unlock(hdev);
+
+ return skb;
+}
+EXPORT_SYMBOL(hci_cmd_sync);
+
+/* This function requires the caller holds hdev->req_lock. */
+struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout)
+{
+ return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
+ NULL);
+}
+EXPORT_SYMBOL(__hci_cmd_sync_ev);
+
+/* This function requires the caller holds hdev->req_lock. */
+int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u8 event, u32 timeout,
+ struct sock *sk)
+{
+ struct sk_buff *skb;
+ u8 status;
+
+ skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode,
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ /* If command return a status event skb will be set to NULL as there are
+ * no parameters, in case of failure IS_ERR(skb) would have be set to
+ * the actual error would be found with PTR_ERR(skb).
+ */
+ if (!skb)
+ return 0;
+
+ status = skb->data[0];
+
+ kfree_skb(skb);
+
+ return status;
+}
+EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
+
+int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
+ const void *param, u32 timeout)
+{
+ return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
+ NULL);
+}
+EXPORT_SYMBOL(__hci_cmd_sync_status);
+
+static void hci_cmd_sync_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
+ struct hci_cmd_sync_work_entry *entry;
+ hci_cmd_sync_work_func_t func;
+ hci_cmd_sync_work_destroy_t destroy;
+ void *data;
+
+ bt_dev_dbg(hdev, "");
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ entry = list_first_entry(&hdev->cmd_sync_work_list,
+ struct hci_cmd_sync_work_entry, list);
+ if (entry) {
+ list_del(&entry->list);
+ func = entry->func;
+ data = entry->data;
+ destroy = entry->destroy;
+ kfree(entry);
+ } else {
+ func = NULL;
+ data = NULL;
+ destroy = NULL;
+ }
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ if (func) {
+ int err;
+
+ hci_req_sync_lock(hdev);
+
+ err = func(hdev, data);
+
+ if (destroy)
+ destroy(hdev, data, err);
+
+ hci_req_sync_unlock(hdev);
+ }
+}
+
+static void hci_cmd_sync_cancel_work(struct work_struct *work)
+{
+ struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
+
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ cancel_delayed_work_sync(&hdev->ncmd_timer);
+ atomic_set(&hdev->cmd_cnt, 1);
+
+ wake_up_interruptible(&hdev->req_wait_q);
+}
+
+void hci_cmd_sync_init(struct hci_dev *hdev)
+{
+ INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
+ INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
+ mutex_init(&hdev->cmd_sync_work_lock);
+
+ INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
+}
+
+void hci_cmd_sync_clear(struct hci_dev *hdev)
+{
+ struct hci_cmd_sync_work_entry *entry, *tmp;
+
+ cancel_work_sync(&hdev->cmd_sync_work);
+
+ list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
+ if (entry->destroy)
+ entry->destroy(hdev, entry->data, -ECANCELED);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
+
+void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+{
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+ if (hdev->req_status == HCI_REQ_PEND) {
+ hdev->req_result = err;
+ hdev->req_status = HCI_REQ_CANCELED;
+
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ cancel_delayed_work_sync(&hdev->ncmd_timer);
+ atomic_set(&hdev->cmd_cnt, 1);
+
+ wake_up_interruptible(&hdev->req_wait_q);
+ }
+}
+
+void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
+{
+ bt_dev_dbg(hdev, "err 0x%2.2x", err);
+
+ if (hdev->req_status == HCI_REQ_PEND) {
+ hdev->req_result = err;
+ hdev->req_status = HCI_REQ_CANCELED;
+
+ queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
+ }
+}
+EXPORT_SYMBOL(hci_cmd_sync_cancel);
+
+int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
+ void *data, hci_cmd_sync_work_destroy_t destroy)
+{
+ struct hci_cmd_sync_work_entry *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->func = func;
+ entry->data = data;
+ entry->destroy = destroy;
+
+ mutex_lock(&hdev->cmd_sync_work_lock);
+ list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
+ mutex_unlock(&hdev->cmd_sync_work_lock);
+
+ queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(hci_cmd_sync_queue);
+
+int hci_update_eir_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_eir cp;
+
+ bt_dev_dbg(hdev, "");
+
+ if (!hdev_is_powered(hdev))
+ return 0;
+
+ if (!lmp_ext_inq_capable(hdev))
+ return 0;
+
+ if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ return 0;
+
+ if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ eir_create(hdev, cp.data);
+
+ if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
+ return 0;
+
+ memcpy(hdev->eir, cp.data, sizeof(cp.data));
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static u8 get_service_classes(struct hci_dev *hdev)
+{
+ struct bt_uuid *uuid;
+ u8 val = 0;
+
+ list_for_each_entry(uuid, &hdev->uuids, list)
+ val |= uuid->svc_hint;
+
+ return val;
+}
+
+int hci_update_class_sync(struct hci_dev *hdev)
+{
+ u8 cod[3];
+
+ bt_dev_dbg(hdev, "");
+
+ if (!hdev_is_powered(hdev))
+ return 0;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
+ return 0;
+
+ cod[0] = hdev->minor_class;
+ cod[1] = hdev->major_class;
+ cod[2] = get_service_classes(hdev);
+
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
+ cod[1] |= 0x20;
+
+ if (memcmp(cod, hdev->dev_class, 3) == 0)
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
+ sizeof(cod), cod, HCI_CMD_TIMEOUT);
+}
+
+static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
+{
+ /* If there is no connection we are OK to advertise. */
+ if (hci_conn_num(hdev, LE_LINK) == 0)
+ return true;
+
+ /* Check le_states if there is any connection in peripheral role. */
+ if (hdev->conn_hash.le_num_peripheral > 0) {
+ /* Peripheral connection state and non connectable mode
+ * bit 20.
+ */
+ if (!connectable && !(hdev->le_states[2] & 0x10))
+ return false;
+
+ /* Peripheral connection state and connectable mode bit 38
+ * and scannable bit 21.
+ */
+ if (connectable && (!(hdev->le_states[4] & 0x40) ||
+ !(hdev->le_states[2] & 0x20)))
+ return false;
+ }
+
+ /* Check le_states if there is any connection in central role. */
+ if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
+ /* Central connection state and non connectable mode bit 18. */
+ if (!connectable && !(hdev->le_states[2] & 0x02))
+ return false;
+
+ /* Central connection state and connectable mode bit 35 and
+ * scannable 19.
+ */
+ if (connectable && (!(hdev->le_states[4] & 0x08) ||
+ !(hdev->le_states[2] & 0x08)))
+ return false;
+ }
+
+ return true;
+}
+
+static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
+{
+ /* If privacy is not enabled don't use RPA */
+ if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
+ return false;
+
+ /* If basic privacy mode is enabled use RPA */
+ if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
+ return true;
+
+ /* If limited privacy mode is enabled don't use RPA if we're
+ * both discoverable and bondable.
+ */
+ if ((flags & MGMT_ADV_FLAG_DISCOV) &&
+ hci_dev_test_flag(hdev, HCI_BONDABLE))
+ return false;
+
+ /* We're neither bondable nor discoverable in the limited
+ * privacy mode, therefore use RPA.
+ */
+ return true;
+}
+
+static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
+{
+ /* If we're advertising or initiating an LE connection we can't
+ * go ahead and change the random address at this time. This is
+ * because the eventual initiator address used for the
+ * subsequently created connection will be undefined (some
+ * controllers use the new address and others the one we had
+ * when the operation started).
+ *
+ * In this kind of scenario skip the update and let the random
+ * address be updated at the next cycle.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
+ hci_lookup_le_connect(hdev)) {
+ bt_dev_dbg(hdev, "Deferring random address update");
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ return 0;
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
+ 6, rpa, HCI_CMD_TIMEOUT);
+}
+
+int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
+ bool rpa, u8 *own_addr_type)
+{
+ int err;
+
+ /* If privacy is enabled use a resolvable private address. If
+ * current RPA has expired or there is something else than
+ * the current RPA in use, then generate a new one.
+ */
+ if (rpa) {
+ /* If Controller supports LL Privacy use own address type is
+ * 0x03
+ */
+ if (use_ll_privacy(hdev))
+ *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
+ else
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ /* Check if RPA is valid */
+ if (rpa_valid(hdev))
+ return 0;
+
+ err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
+ if (err < 0) {
+ bt_dev_err(hdev, "failed to generate new RPA");
+ return err;
+ }
+
+ err = hci_set_random_addr_sync(hdev, &hdev->rpa);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ /* In case of required privacy without resolvable private address,
+ * use an non-resolvable private address. This is useful for active
+ * scanning and non-connectable advertising.
+ */
+ if (require_privacy) {
+ bdaddr_t nrpa;
+
+ while (true) {
+ /* The non-resolvable private address is generated
+ * from random six bytes with the two most significant
+ * bits cleared.
+ */
+ get_random_bytes(&nrpa, 6);
+ nrpa.b[5] &= 0x3f;
+
+ /* The non-resolvable private address shall not be
+ * equal to the public address.
+ */
+ if (bacmp(&hdev->bdaddr, &nrpa))
+ break;
+ }
+
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+
+ return hci_set_random_addr_sync(hdev, &nrpa);
+ }
+
+ /* If forcing static address is in use or there is no public
+ * address use the static address as random address (but skip
+ * the HCI command if the current random address is already the
+ * static one.
+ *
+ * In case BR/EDR has been disabled on a dual-mode controller
+ * and a static address has been configured, then use that
+ * address instead of the public BR/EDR address.
+ */
+ if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
+ (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
+ bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
+ if (bacmp(&hdev->static_addr, &hdev->random_addr))
+ return hci_set_random_addr_sync(hdev,
+ &hdev->static_addr);
+ return 0;
+ }
+
+ /* Neither privacy nor static address is being used so use a
+ * public address.
+ */
+ *own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ return 0;
+}
+
+static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_ext_adv_enable *cp;
+ struct hci_cp_ext_adv_set *set;
+ u8 data[sizeof(*cp) + sizeof(*set) * 1];
+ u8 size;
+
+ /* If request specifies an instance that doesn't exist, fail */
+ if (instance > 0) {
+ struct adv_info *adv;
+
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return -EINVAL;
+
+ /* If not enabled there is nothing to do */
+ if (!adv->enabled)
+ return 0;
+ }
+
+ memset(data, 0, sizeof(data));
+
+ cp = (void *)data;
+ set = (void *)cp->data;
+
+ /* Instance 0x00 indicates all advertising instances will be disabled */
+ cp->num_of_sets = !!instance;
+ cp->enable = 0x00;
+
+ set->handle = instance;
+
+ size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
+ size, data, HCI_CMD_TIMEOUT);
+}
+
+static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
+ bdaddr_t *random_addr)
+{
+ struct hci_cp_le_set_adv_set_rand_addr cp;
+ int err;
+
+ if (!instance) {
+ /* Instance 0x00 doesn't have an adv_info, instead it uses
+ * hdev->random_addr to track its address so whenever it needs
+ * to be updated this also set the random address since
+ * hdev->random_addr is shared with scan state machine.
+ */
+ err = hci_set_random_addr_sync(hdev, random_addr);
+ if (err)
+ return err;
+ }
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.handle = instance;
+ bacpy(&cp.bdaddr, random_addr);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_ext_adv_params cp;
+ bool connectable;
+ u32 flags;
+ bdaddr_t random_addr;
+ u8 own_addr_type;
+ int err;
+ struct adv_info *adv;
+ bool secondary_adv;
+
+ if (instance > 0) {
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return -EINVAL;
+ } else {
+ adv = NULL;
+ }
+
+ /* Updating parameters of an active instance will return a
+ * Command Disallowed error, so we must first disable the
+ * instance if it is active.
+ */
+ if (adv && !adv->pending) {
+ err = hci_disable_ext_adv_instance_sync(hdev, instance);
+ if (err)
+ return err;
+ }
+
+ flags = hci_adv_instance_flags(hdev, instance);
+
+ /* If the "connectable" instance flag was not set, then choose between
+ * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+ */
+ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+ mgmt_get_connectable(hdev);
+
+ if (!is_advertising_allowed(hdev, connectable))
+ return -EPERM;
+
+ /* Set require_privacy to true only when non-connectable
+ * advertising is used. In that case it is fine to use a
+ * non-resolvable private address.
+ */
+ err = hci_get_random_address(hdev, !connectable,
+ adv_use_rpa(hdev, flags), adv,
+ &own_addr_type, &random_addr);
+ if (err < 0)
+ return err;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (adv) {
+ hci_cpu_to_le24(adv->min_interval, cp.min_interval);
+ hci_cpu_to_le24(adv->max_interval, cp.max_interval);
+ cp.tx_power = adv->tx_power;
+ } else {
+ hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
+ hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
+ cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
+ }
+
+ secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
+
+ if (connectable) {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
+ } else if (hci_adv_instance_is_scannable(hdev, instance) ||
+ (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
+ } else {
+ if (secondary_adv)
+ cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
+ else
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
+ }
+
+ /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
+ * contains the peer’s Identity Address and the Peer_Address_Type
+ * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
+ * These parameters are used to locate the corresponding local IRK in
+ * the resolving list; this IRK is used to generate their own address
+ * used in the advertisement.
+ */
+ if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
+ hci_copy_identity_address(hdev, &cp.peer_addr,
+ &cp.peer_addr_type);
+
+ cp.own_addr_type = own_addr_type;
+ cp.channel_map = hdev->le_adv_channel_map;
+ cp.handle = instance;
+
+ if (flags & MGMT_ADV_FLAG_SEC_2M) {
+ cp.primary_phy = HCI_ADV_PHY_1M;
+ cp.secondary_phy = HCI_ADV_PHY_2M;
+ } else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
+ cp.primary_phy = HCI_ADV_PHY_CODED;
+ cp.secondary_phy = HCI_ADV_PHY_CODED;
+ } else {
+ /* In all other cases use 1M */
+ cp.primary_phy = HCI_ADV_PHY_1M;
+ cp.secondary_phy = HCI_ADV_PHY_1M;
+ }
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+
+ if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
+ own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
+ bacmp(&random_addr, BDADDR_ANY)) {
+ /* Check if random address need to be updated */
+ if (adv) {
+ if (!bacmp(&random_addr, &adv->random_addr))
+ return 0;
+ } else {
+ if (!bacmp(&random_addr, &hdev->random_addr))
+ return 0;
+ }
+
+ return hci_set_adv_set_random_addr_sync(hdev, instance,
+ &random_addr);
+ }
+
+ return 0;
+}
+
+static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct {
+ struct hci_cp_le_set_ext_scan_rsp_data cp;
+ u8 data[HCI_MAX_EXT_AD_LENGTH];
+ } pdu;
+ u8 len;
+
+ memset(&pdu, 0, sizeof(pdu));
+
+ len = eir_create_scan_rsp(hdev, instance, pdu.data);
+
+ if (hdev->scan_rsp_data_len == len &&
+ !memcmp(pdu.data, hdev->scan_rsp_data, len))
+ return 0;
+
+ memcpy(hdev->scan_rsp_data, pdu.data, len);
+ hdev->scan_rsp_data_len = len;
+
+ pdu.cp.handle = instance;
+ pdu.cp.length = len;
+ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
+ sizeof(pdu.cp) + len, &pdu.cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_scan_rsp_data cp;
+ u8 len;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = eir_create_scan_rsp(hdev, instance, cp.data);
+
+ if (hdev->scan_rsp_data_len == len &&
+ !memcmp(cp.data, hdev->scan_rsp_data, len))
+ return 0;
+
+ memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
+ hdev->scan_rsp_data_len = len;
+
+ cp.length = len;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ if (ext_adv_capable(hdev))
+ return hci_set_ext_scan_rsp_data_sync(hdev, instance);
+
+ return __hci_set_scan_rsp_data_sync(hdev, instance);
+}
+
+int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_ext_adv_enable *cp;
+ struct hci_cp_ext_adv_set *set;
+ u8 data[sizeof(*cp) + sizeof(*set) * 1];
+ struct adv_info *adv;
+
+ if (instance > 0) {
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return -EINVAL;
+ /* If already enabled there is nothing to do */
+ if (adv->enabled)
+ return 0;
+ } else {
+ adv = NULL;
+ }
+
+ cp = (void *)data;
+ set = (void *)cp->data;
+
+ memset(cp, 0, sizeof(*cp));
+
+ cp->enable = 0x01;
+ cp->num_of_sets = 0x01;
+
+ memset(set, 0, sizeof(*set));
+
+ set->handle = instance;
+
+ /* Set duration per instance since controller is responsible for
+ * scheduling it.
+ */
+ if (adv && adv->timeout) {
+ u16 duration = adv->timeout * MSEC_PER_SEC;
+
+ /* Time = N * 10 ms */
+ set->duration = cpu_to_le16(duration / 10);
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
+ sizeof(*cp) +
+ sizeof(*set) * cp->num_of_sets,
+ data, HCI_CMD_TIMEOUT);
+}
+
+int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
+{
+ int err;
+
+ err = hci_setup_ext_adv_instance_sync(hdev, instance);
+ if (err)
+ return err;
+
+ err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
+ if (err)
+ return err;
+
+ return hci_enable_ext_advertising_sync(hdev, instance);
+}
+
+static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
+{
+ int err;
+
+ if (ext_adv_capable(hdev))
+ return hci_start_ext_adv_sync(hdev, instance);
+
+ err = hci_update_adv_data_sync(hdev, instance);
+ if (err)
+ return err;
+
+ err = hci_update_scan_rsp_data_sync(hdev, instance);
+ if (err)
+ return err;
+
+ return hci_enable_advertising_sync(hdev);
+}
+
+int hci_enable_advertising_sync(struct hci_dev *hdev)
+{
+ struct adv_info *adv_instance;
+ struct hci_cp_le_set_adv_param cp;
+ u8 own_addr_type, enable = 0x01;
+ bool connectable;
+ u16 adv_min_interval, adv_max_interval;
+ u32 flags;
+ u8 status;
+
+ if (ext_adv_capable(hdev))
+ return hci_enable_ext_advertising_sync(hdev,
+ hdev->cur_adv_instance);
+
+ flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
+ adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
+
+ /* If the "connectable" instance flag was not set, then choose between
+ * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
+ */
+ connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
+ mgmt_get_connectable(hdev);
+
+ if (!is_advertising_allowed(hdev, connectable))
+ return -EINVAL;
+
+ status = hci_disable_advertising_sync(hdev);
+ if (status)
+ return status;
+
+ /* Clear the HCI_LE_ADV bit temporarily so that the
+ * hci_update_random_address knows that it's safe to go ahead
+ * and write a new random address. The flag will be set back on
+ * as soon as the SET_ADV_ENABLE HCI command completes.
+ */
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
+
+ /* Set require_privacy to true only when non-connectable
+ * advertising is used. In that case it is fine to use a
+ * non-resolvable private address.
+ */
+ status = hci_update_random_address_sync(hdev, !connectable,
+ adv_use_rpa(hdev, flags),
+ &own_addr_type);
+ if (status)
+ return status;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (adv_instance) {
+ adv_min_interval = adv_instance->min_interval;
+ adv_max_interval = adv_instance->max_interval;
+ } else {
+ adv_min_interval = hdev->le_adv_min_interval;
+ adv_max_interval = hdev->le_adv_max_interval;
+ }
+
+ if (connectable) {
+ cp.type = LE_ADV_IND;
+ } else {
+ if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
+ cp.type = LE_ADV_SCAN_IND;
+ else
+ cp.type = LE_ADV_NONCONN_IND;
+
+ if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
+ hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
+ adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
+ adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
+ }
+ }
+
+ cp.min_interval = cpu_to_le16(adv_min_interval);
+ cp.max_interval = cpu_to_le16(adv_max_interval);
+ cp.own_address_type = own_addr_type;
+ cp.channel_map = hdev->le_adv_channel_map;
+
+ status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (status)
+ return status;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
+ sizeof(enable), &enable, HCI_CMD_TIMEOUT);
+}
+
+static int enable_advertising_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_enable_advertising_sync(hdev);
+}
+
+int hci_enable_advertising(struct hci_dev *hdev)
+{
+ if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ list_empty(&hdev->adv_instances))
+ return 0;
+
+ return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
+}
+
+int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ struct sock *sk)
+{
+ int err;
+
+ if (!ext_adv_capable(hdev))
+ return 0;
+
+ err = hci_disable_ext_adv_instance_sync(hdev, instance);
+ if (err)
+ return err;
+
+ /* If request specifies an instance that doesn't exist, fail */
+ if (instance > 0 && !hci_find_adv_instance(hdev, instance))
+ return -EINVAL;
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
+ sizeof(instance), &instance, 0,
+ HCI_CMD_TIMEOUT, sk);
+}
+
+static void cancel_adv_timeout(struct hci_dev *hdev)
+{
+ if (hdev->adv_instance_timeout) {
+ hdev->adv_instance_timeout = 0;
+ cancel_delayed_work(&hdev->adv_instance_expire);
+ }
+}
+
+static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct {
+ struct hci_cp_le_set_ext_adv_data cp;
+ u8 data[HCI_MAX_EXT_AD_LENGTH];
+ } pdu;
+ u8 len;
+
+ memset(&pdu, 0, sizeof(pdu));
+
+ len = eir_create_adv_data(hdev, instance, pdu.data);
+
+ /* There's nothing to do if the data hasn't changed */
+ if (hdev->adv_data_len == len &&
+ memcmp(pdu.data, hdev->adv_data, len) == 0)
+ return 0;
+
+ memcpy(hdev->adv_data, pdu.data, len);
+ hdev->adv_data_len = len;
+
+ pdu.cp.length = len;
+ pdu.cp.handle = instance;
+ pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
+ pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
+ sizeof(pdu.cp) + len, &pdu.cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ struct hci_cp_le_set_adv_data cp;
+ u8 len;
+
+ memset(&cp, 0, sizeof(cp));
+
+ len = eir_create_adv_data(hdev, instance, cp.data);
+
+ /* There's nothing to do if the data hasn't changed */
+ if (hdev->adv_data_len == len &&
+ memcmp(cp.data, hdev->adv_data, len) == 0)
+ return 0;
+
+ memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
+ hdev->adv_data_len = len;
+
+ cp.length = len;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
+{
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ if (ext_adv_capable(hdev))
+ return hci_set_ext_adv_data_sync(hdev, instance);
+
+ return hci_set_adv_data_sync(hdev, instance);
+}
+
+int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
+ bool force)
+{
+ struct adv_info *adv = NULL;
+ u16 timeout;
+
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
+ return -EPERM;
+
+ if (hdev->adv_instance_timeout)
+ return -EBUSY;
+
+ adv = hci_find_adv_instance(hdev, instance);
+ if (!adv)
+ return -ENOENT;
+
+ /* A zero timeout means unlimited advertising. As long as there is
+ * only one instance, duration should be ignored. We still set a timeout
+ * in case further instances are being added later on.
+ *
+ * If the remaining lifetime of the instance is more than the duration
+ * then the timeout corresponds to the duration, otherwise it will be
+ * reduced to the remaining instance lifetime.
+ */
+ if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
+ timeout = adv->duration;
+ else
+ timeout = adv->remaining_time;
+
+ /* The remaining time is being reduced unless the instance is being
+ * advertised without time limit.
+ */
+ if (adv->timeout)
+ adv->remaining_time = adv->remaining_time - timeout;
+
+ /* Only use work for scheduling instances with legacy advertising */
+ if (!ext_adv_capable(hdev)) {
+ hdev->adv_instance_timeout = timeout;
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->adv_instance_expire,
+ msecs_to_jiffies(timeout * 1000));
+ }
+
+ /* If we're just re-scheduling the same instance again then do not
+ * execute any HCI commands. This happens when a single instance is
+ * being advertised.
+ */
+ if (!force && hdev->cur_adv_instance == instance &&
+ hci_dev_test_flag(hdev, HCI_LE_ADV))
+ return 0;
+
+ hdev->cur_adv_instance = instance;
+
+ return hci_start_adv_sync(hdev, instance);
+}
+
+static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
+{
+ int err;
+
+ if (!ext_adv_capable(hdev))
+ return 0;
+
+ /* Disable instance 0x00 to disable all instances */
+ err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
+ if (err)
+ return err;
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
+ 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
+}
+
+static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
+{
+ struct adv_info *adv, *n;
+
+ if (ext_adv_capable(hdev))
+ /* Remove all existing sets */
+ return hci_clear_adv_sets_sync(hdev, sk);
+
+ /* This is safe as long as there is no command send while the lock is
+ * held.
+ */
+ hci_dev_lock(hdev);
+
+ /* Cleanup non-ext instances */
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
+ u8 instance = adv->instance;
+ int err;
+
+ if (!(force || adv->timeout))
+ continue;
+
+ err = hci_remove_adv_instance(hdev, instance);
+ if (!err)
+ mgmt_advertising_removed(sk, hdev, instance);
+ }
+
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
+ struct sock *sk)
+{
+ int err;
+
+ /* If we use extended advertising, instance has to be removed first. */
+ if (ext_adv_capable(hdev))
+ return hci_remove_ext_adv_instance_sync(hdev, instance, sk);
+
+ /* This is safe as long as there is no command send while the lock is
+ * held.
+ */
+ hci_dev_lock(hdev);
+
+ err = hci_remove_adv_instance(hdev, instance);
+ if (!err)
+ mgmt_advertising_removed(sk, hdev, instance);
+
+ hci_dev_unlock(hdev);
+
+ return err;
+}
+
+/* For a single instance:
+ * - force == true: The instance will be removed even when its remaining
+ * lifetime is not zero.
+ * - force == false: the instance will be deactivated but kept stored unless
+ * the remaining lifetime is zero.
+ *
+ * For instance == 0x00:
+ * - force == true: All instances will be removed regardless of their timeout
+ * setting.
+ * - force == false: Only instances that have a timeout will be removed.
+ */
+int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
+ u8 instance, bool force)
+{
+ struct adv_info *next = NULL;
+ int err;
+
+ /* Cancel any timeout concerning the removed instance(s). */
+ if (!instance || hdev->cur_adv_instance == instance)
+ cancel_adv_timeout(hdev);
+
+ /* Get the next instance to advertise BEFORE we remove
+ * the current one. This can be the same instance again
+ * if there is only one instance.
+ */
+ if (hdev->cur_adv_instance == instance)
+ next = hci_get_next_instance(hdev, instance);
+
+ if (!instance) {
+ err = hci_clear_adv_sync(hdev, sk, force);
+ if (err)
+ return err;
+ } else {
+ struct adv_info *adv = hci_find_adv_instance(hdev, instance);
+
+ if (force || (adv && adv->timeout && !adv->remaining_time)) {
+ /* Don't advertise a removed instance. */
+ if (next && next->instance == instance)
+ next = NULL;
+
+ err = hci_remove_adv_sync(hdev, instance, sk);
+ if (err)
+ return err;
+ }
+ }
+
+ if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ return 0;
+
+ if (next && !ext_adv_capable(hdev))
+ hci_schedule_adv_instance_sync(hdev, next->instance, false);
+
+ return 0;
+}
+
+int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
+{
+ struct hci_cp_read_rssi cp;
+
+ cp.handle = handle;
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
+ sizeof(*cp), cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
+{
+ struct hci_cp_read_tx_power cp;
+
+ cp.handle = handle;
+ cp.type = type;
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_disable_advertising_sync(struct hci_dev *hdev)
+{
+ u8 enable = 0x00;
+
+ /* If controller is not advertising we are done. */
+ if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
+ return 0;
+
+ if (ext_adv_capable(hdev))
+ return hci_disable_ext_adv_instance_sync(hdev, 0x00);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
+ sizeof(enable), &enable, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
+ u8 filter_dup)
+{
+ struct hci_cp_le_set_ext_scan_enable cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = val;
+ cp.filter_dup = filter_dup;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
+ u8 filter_dup)
+{
+ struct hci_cp_le_set_scan_enable cp;
+
+ if (use_ext_scan(hdev))
+ return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.enable = val;
+ cp.filter_dup = filter_dup;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
+{
+ if (!use_ll_privacy(hdev))
+ return 0;
+
+ /* If controller is not/already resolving we are done. */
+ if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
+ sizeof(val), &val, HCI_CMD_TIMEOUT);
+}
+
+static int hci_scan_disable_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* If controller is not scanning we are done. */
+ if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
+ return 0;
+
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return 0;
+ }
+
+ err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
+ if (err) {
+ bt_dev_err(hdev, "Unable to disable scanning: %d", err);
+ return err;
+ }
+
+ return err;
+}
+
+static bool scan_use_rpa(struct hci_dev *hdev)
+{
+ return hci_dev_test_flag(hdev, HCI_PRIVACY);
+}
+
+static void hci_start_interleave_scan(struct hci_dev *hdev)
+{
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
+ queue_delayed_work(hdev->req_workqueue,
+ &hdev->interleave_scan, 0);
+}
+
+static bool is_interleave_scanning(struct hci_dev *hdev)
+{
+ return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
+}
+
+static void cancel_interleave_scan(struct hci_dev *hdev)
+{
+ bt_dev_dbg(hdev, "cancelling interleave scan");
+
+ cancel_delayed_work_sync(&hdev->interleave_scan);
+
+ hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
+}
+
+/* Return true if interleave_scan wasn't started until exiting this function,
+ * otherwise, return false
+ */
+static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
+{
+ /* Do interleaved scan only if all of the following are true:
+ * - There is at least one ADV monitor
+ * - At least one pending LE connection or one device to be scanned for
+ * - Monitor offloading is not supported
+ * If so, we should alternate between allowlist scan and one without
+ * any filters to save power.
+ */
+ bool use_interleaving = hci_is_adv_monitoring(hdev) &&
+ !(list_empty(&hdev->pend_le_conns) &&
+ list_empty(&hdev->pend_le_reports)) &&
+ hci_get_adv_monitor_offload_ext(hdev) ==
+ HCI_ADV_MONITOR_EXT_NONE;
+ bool is_interleaving = is_interleave_scanning(hdev);
+
+ if (use_interleaving && !is_interleaving) {
+ hci_start_interleave_scan(hdev);
+ bt_dev_dbg(hdev, "starting interleave scan");
+ return true;
+ }
+
+ if (!use_interleaving && is_interleaving)
+ cancel_interleave_scan(hdev);
+
+ return false;
+}
+
+/* Removes connection to resolve list if needed.*/
+static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 bdaddr_type)
+{
+ struct hci_cp_le_del_from_resolv_list cp;
+ struct bdaddr_list_with_irk *entry;
+
+ if (!use_ll_privacy(hdev))
+ return 0;
+
+ /* Check if the IRK has been programmed */
+ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
+ bdaddr_type);
+ if (!entry)
+ return 0;
+
+ cp.bdaddr_type = bdaddr_type;
+ bacpy(&cp.bdaddr, bdaddr);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
+ bdaddr_t *bdaddr, u8 bdaddr_type)
+{
+ struct hci_cp_le_del_from_accept_list cp;
+ int err;
+
+ /* Check if device is on accept list before removing it */
+ if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
+ return 0;
+
+ cp.bdaddr_type = bdaddr_type;
+ bacpy(&cp.bdaddr, bdaddr);
+
+ /* Ignore errors when removing from resolving list as that is likely
+ * that the device was never added.
+ */
+ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
+ return err;
+ }
+
+ bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
+ cp.bdaddr_type);
+
+ return 0;
+}
+
+/* Adds connection to resolve list if needed.
+ * Setting params to NULL programs local hdev->irk
+ */
+static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
+ struct hci_conn_params *params)
+{
+ struct hci_cp_le_add_to_resolv_list cp;
+ struct smp_irk *irk;
+ struct bdaddr_list_with_irk *entry;
+
+ if (!use_ll_privacy(hdev))
+ return 0;
+
+ /* Attempt to program local identity address, type and irk if params is
+ * NULL.
+ */
+ if (!params) {
+ if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
+ return 0;
+
+ hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
+ memcpy(cp.peer_irk, hdev->irk, 16);
+ goto done;
+ }
+
+ irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
+ if (!irk)
+ return 0;
+
+ /* Check if the IK has _not_ been programmed yet. */
+ entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
+ &params->addr,
+ params->addr_type);
+ if (entry)
+ return 0;
+
+ cp.bdaddr_type = params->addr_type;
+ bacpy(&cp.bdaddr, &params->addr);
+ memcpy(cp.peer_irk, irk->val, 16);
+
+done:
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY))
+ memcpy(cp.local_irk, hdev->irk, 16);
+ else
+ memset(cp.local_irk, 0, 16);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+/* Set Device Privacy Mode. */
+static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
+ struct hci_conn_params *params)
+{
+ struct hci_cp_le_set_privacy_mode cp;
+ struct smp_irk *irk;
+
+ /* If device privacy mode has already been set there is nothing to do */
+ if (params->privacy_mode == HCI_DEVICE_PRIVACY)
+ return 0;
+
+ /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
+ * indicates that LL Privacy has been enabled and
+ * HCI_OP_LE_SET_PRIVACY_MODE is supported.
+ */
+ if (!test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, params->flags))
+ return 0;
+
+ irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
+ if (!irk)
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.bdaddr_type = irk->addr_type;
+ bacpy(&cp.bdaddr, &irk->bdaddr);
+ cp.mode = HCI_DEVICE_PRIVACY;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+/* Adds connection to allow list if needed, if the device uses RPA (has IRK)
+ * this attempts to program the device in the resolving list as well and
+ * properly set the privacy mode.
+ */
+static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
+ struct hci_conn_params *params,
+ u8 *num_entries)
+{
+ struct hci_cp_le_add_to_accept_list cp;
+ int err;
+
+ /* Select filter policy to accept all advertising */
+ if (*num_entries >= hdev->le_accept_list_size)
+ return -ENOSPC;
+
+ /* Accept list can not be used with RPAs */
+ if (!use_ll_privacy(hdev) &&
+ hci_find_irk_by_addr(hdev, &params->addr, params->addr_type)) {
+ return -EINVAL;
+ }
+
+ /* During suspend, only wakeable devices can be in acceptlist */
+ if (hdev->suspended &&
+ !test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, params->flags))
+ return 0;
+
+ /* Attempt to program the device in the resolving list first to avoid
+ * having to rollback in case it fails since the resolving list is
+ * dynamic it can probably be smaller than the accept list.
+ */
+ err = hci_le_add_resolve_list_sync(hdev, params);
+ if (err) {
+ bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
+ return err;
+ }
+
+ /* Set Privacy Mode */
+ err = hci_le_set_privacy_mode_sync(hdev, params);
+ if (err) {
+ bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
+ return err;
+ }
+
+ /* Check if already in accept list */
+ if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
+ params->addr_type))
+ return 0;
+
+ *num_entries += 1;
+ cp.bdaddr_type = params->addr_type;
+ bacpy(&cp.bdaddr, &params->addr);
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err) {
+ bt_dev_err(hdev, "Unable to add to allow list: %d", err);
+ /* Rollback the device from the resolving list */
+ hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
+ return err;
+ }
+
+ bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
+ cp.bdaddr_type);
+
+ return 0;
+}
+
+/* This function disables/pause all advertising instances */
+static int hci_pause_advertising_sync(struct hci_dev *hdev)
+{
+ int err;
+ int old_state;
+
+ /* If already been paused there is nothing to do. */
+ if (hdev->advertising_paused)
+ return 0;
+
+ bt_dev_dbg(hdev, "Pausing directed advertising");
+
+ /* Stop directed advertising */
+ old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
+ if (old_state) {
+ /* When discoverable timeout triggers, then just make sure
+ * the limited discoverable flag is cleared. Even in the case
+ * of a timeout triggered from general discoverable, it is
+ * safe to unconditionally clear the flag.
+ */
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+ hdev->discov_timeout = 0;
+ }
+
+ bt_dev_dbg(hdev, "Pausing advertising instances");
+
+ /* Call to disable any advertisements active on the controller.
+ * This will succeed even if no advertisements are configured.
+ */
+ err = hci_disable_advertising_sync(hdev);
+ if (err)
+ return err;
+
+ /* If we are using software rotation, pause the loop */
+ if (!ext_adv_capable(hdev))
+ cancel_adv_timeout(hdev);
+
+ hdev->advertising_paused = true;
+ hdev->advertising_old_state = old_state;
+
+ return 0;
+}
+
+/* This function enables all user advertising instances */
+static int hci_resume_advertising_sync(struct hci_dev *hdev)
+{
+ struct adv_info *adv, *tmp;
+ int err;
+
+ /* If advertising has not been paused there is nothing to do. */
+ if (!hdev->advertising_paused)
+ return 0;
+
+ /* Resume directed advertising */
+ hdev->advertising_paused = false;
+ if (hdev->advertising_old_state) {
+ hci_dev_set_flag(hdev, HCI_ADVERTISING);
+ hdev->advertising_old_state = 0;
+ }
+
+ bt_dev_dbg(hdev, "Resuming advertising instances");
+
+ if (ext_adv_capable(hdev)) {
+ /* Call for each tracked instance to be re-enabled */
+ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
+ err = hci_enable_ext_advertising_sync(hdev,
+ adv->instance);
+ if (!err)
+ continue;
+
+ /* If the instance cannot be resumed remove it */
+ hci_remove_ext_adv_instance_sync(hdev, adv->instance,
+ NULL);
+ }
+ } else {
+ /* Schedule for most recent instance to be restarted and begin
+ * the software rotation loop
+ */
+ err = hci_schedule_adv_instance_sync(hdev,
+ hdev->cur_adv_instance,
+ true);
+ }
+
+ hdev->advertising_paused = false;
+
+ return err;
+}
+
+struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
+ bool extended, struct sock *sk)
+{
+ u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
+ HCI_OP_READ_LOCAL_OOB_DATA;
+
+ return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
+}
+
+/* Device must not be scanning when updating the accept list.
+ *
+ * Update is done using the following sequence:
+ *
+ * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
+ * Remove Devices From Accept List ->
+ * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
+ * Add Devices to Accept List ->
+ * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
+ * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
+ * Enable Scanning
+ *
+ * In case of failure advertising shall be restored to its original state and
+ * return would disable accept list since either accept or resolving list could
+ * not be programmed.
+ *
+ */
+static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
+{
+ struct hci_conn_params *params;
+ struct bdaddr_list *b, *t;
+ u8 num_entries = 0;
+ bool pend_conn, pend_report;
+ int err;
+
+ /* Pause advertising if resolving list can be used as controllers are
+ * cannot accept resolving list modifications while advertising.
+ */
+ if (use_ll_privacy(hdev)) {
+ err = hci_pause_advertising_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "pause advertising failed: %d", err);
+ return 0x00;
+ }
+ }
+
+ /* Disable address resolution while reprogramming accept list since
+ * devices that do have an IRK will be programmed in the resolving list
+ * when LL Privacy is enabled.
+ */
+ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
+ if (err) {
+ bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
+ goto done;
+ }
+
+ /* Go through the current accept list programmed into the
+ * controller one by one and check if that address is still
+ * in the list of pending connections or list of devices to
+ * report. If not present in either list, then remove it from
+ * the controller.
+ */
+ list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
+ pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
+ &b->bdaddr,
+ b->bdaddr_type);
+ pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
+ &b->bdaddr,
+ b->bdaddr_type);
+
+ /* If the device is not likely to connect or report,
+ * remove it from the acceptlist.
+ */
+ if (!pend_conn && !pend_report) {
+ hci_le_del_accept_list_sync(hdev, &b->bdaddr,
+ b->bdaddr_type);
+ continue;
+ }
+
+ num_entries++;
+ }
+
+ /* Since all no longer valid accept list entries have been
+ * removed, walk through the list of pending connections
+ * and ensure that any new device gets programmed into
+ * the controller.
+ *
+ * If the list of the devices is larger than the list of
+ * available accept list entries in the controller, then
+ * just abort and return filer policy value to not use the
+ * accept list.
+ */
+ list_for_each_entry(params, &hdev->pend_le_conns, action) {
+ err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
+ if (err)
+ goto done;
+ }
+
+ /* After adding all new pending connections, walk through
+ * the list of pending reports and also add these to the
+ * accept list if there is still space. Abort if space runs out.
+ */
+ list_for_each_entry(params, &hdev->pend_le_reports, action) {
+ err = hci_le_add_accept_list_sync(hdev, params, &num_entries);
+ if (err)
+ goto done;
+ }
+
+ /* Use the allowlist unless the following conditions are all true:
+ * - We are not currently suspending
+ * - There are 1 or more ADV monitors registered and it's not offloaded
+ * - Interleaved scanning is not currently using the allowlist
+ */
+ if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
+ hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
+ hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
+ err = -EINVAL;
+
+done:
+ /* Enable address resolution when LL Privacy is enabled. */
+ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
+ if (err)
+ bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
+
+ /* Resume advertising if it was paused */
+ if (use_ll_privacy(hdev))
+ hci_resume_advertising_sync(hdev);
+
+ /* Select filter policy to use accept list */
+ return err ? 0x00 : 0x01;
+}
+
+/* Returns true if an le connection is in the scanning state */
+static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)
+{
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *c;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+ if (c->type == LE_LINK && c->state == BT_CONNECT &&
+ test_bit(HCI_CONN_SCANNING, &c->flags)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return false;
+}
+
+static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ u16 interval, u16 window,
+ u8 own_addr_type, u8 filter_policy)
+{
+ struct hci_cp_le_set_ext_scan_params *cp;
+ struct hci_cp_le_scan_phy_params *phy;
+ u8 data[sizeof(*cp) + sizeof(*phy) * 2];
+ u8 num_phy = 0;
+
+ cp = (void *)data;
+ phy = (void *)cp->data;
+
+ memset(data, 0, sizeof(data));
+
+ cp->own_addr_type = own_addr_type;
+ cp->filter_policy = filter_policy;
+
+ if (scan_1m(hdev) || scan_2m(hdev)) {
+ cp->scanning_phys |= LE_SCAN_PHY_1M;
+
+ phy->type = type;
+ phy->interval = cpu_to_le16(interval);
+ phy->window = cpu_to_le16(window);
+
+ num_phy++;
+ phy++;
+ }
+
+ if (scan_coded(hdev)) {
+ cp->scanning_phys |= LE_SCAN_PHY_CODED;
+
+ phy->type = type;
+ phy->interval = cpu_to_le16(interval);
+ phy->window = cpu_to_le16(window);
+
+ num_phy++;
+ phy++;
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
+ sizeof(*cp) + sizeof(*phy) * num_phy,
+ data, HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
+ u16 interval, u16 window,
+ u8 own_addr_type, u8 filter_policy)
+{
+ struct hci_cp_le_set_scan_param cp;
+
+ if (use_ext_scan(hdev))
+ return hci_le_set_ext_scan_param_sync(hdev, type, interval,
+ window, own_addr_type,
+ filter_policy);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.type = type;
+ cp.interval = cpu_to_le16(interval);
+ cp.window = cpu_to_le16(window);
+ cp.own_address_type = own_addr_type;
+ cp.filter_policy = filter_policy;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
+ u16 window, u8 own_addr_type, u8 filter_policy,
+ u8 filter_dup)
+{
+ int err;
+
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return 0;
+ }
+
+ err = hci_le_set_scan_param_sync(hdev, type, interval, window,
+ own_addr_type, filter_policy);
+ if (err)
+ return err;
+
+ return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
+}
+
+static int hci_passive_scan_sync(struct hci_dev *hdev)
+{
+ u8 own_addr_type;
+ u8 filter_policy;
+ u16 window, interval;
+ int err;
+
+ if (hdev->scanning_paused) {
+ bt_dev_dbg(hdev, "Scanning is paused for suspend");
+ return 0;
+ }
+
+ err = hci_scan_disable_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "disable scanning failed: %d", err);
+ return err;
+ }
+
+ /* Set require_privacy to false since no SCAN_REQ are send
+ * during passive scanning. Not using an non-resolvable address
+ * here is important so that peer devices using direct
+ * advertising with our address will be correctly reported
+ * by the controller.
+ */
+ if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
+ &own_addr_type))
+ return 0;
+
+ if (hdev->enable_advmon_interleave_scan &&
+ hci_update_interleaved_scan_sync(hdev))
+ return 0;
+
+ bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
+
+ /* Adding or removing entries from the accept list must
+ * happen before enabling scanning. The controller does
+ * not allow accept list modification while scanning.
+ */
+ filter_policy = hci_update_accept_list_sync(hdev);
+
+ /* When the controller is using random resolvable addresses and
+ * with that having LE privacy enabled, then controllers with
+ * Extended Scanner Filter Policies support can now enable support
+ * for handling directed advertising.
+ *
+ * So instead of using filter polices 0x00 (no acceptlist)
+ * and 0x01 (acceptlist enabled) use the new filter policies
+ * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
+ */
+ if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
+ (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
+ filter_policy |= 0x02;
+
+ if (hdev->suspended) {
+ window = hdev->le_scan_window_suspend;
+ interval = hdev->le_scan_int_suspend;
+ } else if (hci_is_le_conn_scanning(hdev)) {
+ window = hdev->le_scan_window_connect;
+ interval = hdev->le_scan_int_connect;
+ } else if (hci_is_adv_monitoring(hdev)) {
+ window = hdev->le_scan_window_adv_monitor;
+ interval = hdev->le_scan_int_adv_monitor;
+ } else {
+ window = hdev->le_scan_window;
+ interval = hdev->le_scan_interval;
+ }
+
+ bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
+
+ return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
+ own_addr_type, filter_policy,
+ LE_SCAN_FILTER_DUP_ENABLE);
+}
+
+/* This function controls the passive scanning based on hdev->pend_le_conns
+ * list. If there are pending LE connection we start the background scanning,
+ * otherwise we stop it in the following sequence:
+ *
+ * If there are devices to scan:
+ *
+ * Disable Scanning -> Update Accept List ->
+ * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
+ * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
+ * Enable Scanning
+ *
+ * Otherwise:
+ *
+ * Disable Scanning
+ */
+int hci_update_passive_scan_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ if (!test_bit(HCI_UP, &hdev->flags) ||
+ test_bit(HCI_INIT, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG) ||
+ hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
+ hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ return 0;
+
+ /* No point in doing scanning if LE support hasn't been enabled */
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ /* If discovery is active don't interfere with it */
+ if (hdev->discovery.state != DISCOVERY_STOPPED)
+ return 0;
+
+ /* Reset RSSI and UUID filters when starting background scanning
+ * since these filters are meant for service discovery only.
+ *
+ * The Start Discovery and Start Service Discovery operations
+ * ensure to set proper values for RSSI threshold and UUID
+ * filter list. So it is safe to just reset them here.
+ */
+ hci_discovery_filter_clear(hdev);
+
+ bt_dev_dbg(hdev, "ADV monitoring is %s",
+ hci_is_adv_monitoring(hdev) ? "on" : "off");
+
+ if (list_empty(&hdev->pend_le_conns) &&
+ list_empty(&hdev->pend_le_reports) &&
+ !hci_is_adv_monitoring(hdev)) {
+ /* If there is no pending LE connections or devices
+ * to be scanned for or no ADV monitors, we should stop the
+ * background scanning.
+ */
+
+ bt_dev_dbg(hdev, "stopping background scanning");
+
+ err = hci_scan_disable_sync(hdev);
+ if (err)
+ bt_dev_err(hdev, "stop background scanning failed: %d",
+ err);
+ } else {
+ /* If there is at least one pending LE connection, we should
+ * keep the background scan running.
+ */
+
+ /* If controller is connecting, we should not start scanning
+ * since some controllers are not able to scan and connect at
+ * the same time.
+ */
+ if (hci_lookup_le_connect(hdev))
+ return 0;
+
+ bt_dev_dbg(hdev, "start background scanning");
+
+ err = hci_passive_scan_sync(hdev);
+ if (err)
+ bt_dev_err(hdev, "start background scanning failed: %d",
+ err);
+ }
+
+ return err;
+}
+
+static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_passive_scan_sync(hdev);
+}
+
+int hci_update_passive_scan(struct hci_dev *hdev)
+{
+ /* Only queue if it would have any effect */
+ if (!test_bit(HCI_UP, &hdev->flags) ||
+ test_bit(HCI_INIT, &hdev->flags) ||
+ hci_dev_test_flag(hdev, HCI_SETUP) ||
+ hci_dev_test_flag(hdev, HCI_CONFIG) ||
+ hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
+ hci_dev_test_flag(hdev, HCI_UNREGISTER))
+ return 0;
+
+ return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL);
+}
+
+int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
+{
+ int err;
+
+ if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
+ return 0;
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
+ sizeof(val), &val, HCI_CMD_TIMEOUT);
+
+ if (!err) {
+ if (val) {
+ hdev->features[1][0] |= LMP_HOST_SC;
+ hci_dev_set_flag(hdev, HCI_SC_ENABLED);
+ } else {
+ hdev->features[1][0] &= ~LMP_HOST_SC;
+ hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
+ }
+ }
+
+ return err;
+}
+
+int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
+{
+ int err;
+
+ if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
+ lmp_host_ssp_capable(hdev))
+ return 0;
+
+ if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
+ __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
+ sizeof(mode), &mode, HCI_CMD_TIMEOUT);
+ }
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
+ sizeof(mode), &mode, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+
+ return hci_write_sc_support_sync(hdev, 0x01);
+}
+
+int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
+{
+ struct hci_cp_write_le_host_supported cp;
+
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
+ !lmp_bredr_capable(hdev))
+ return 0;
+
+ /* Check first if we already have the right host state
+ * (host features set)
+ */
+ if (le == lmp_host_le_capable(hdev) &&
+ simul == lmp_host_le_br_capable(hdev))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.le = le;
+ cp.simul = simul;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_powered_update_adv_sync(struct hci_dev *hdev)
+{
+ struct adv_info *adv, *tmp;
+ int err;
+
+ if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
+ return 0;
+
+ /* If RPA Resolution has not been enable yet it means the
+ * resolving list is empty and we should attempt to program the
+ * local IRK in order to support using own_addr_type
+ * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
+ */
+ if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
+ hci_le_add_resolve_list_sync(hdev, NULL);
+ hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
+ }
+
+ /* Make sure the controller has a good default for
+ * advertising data. This also applies to the case
+ * where BR/EDR was toggled during the AUTO_OFF phase.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev)) {
+ err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+ if (!err)
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
+ } else {
+ err = hci_update_adv_data_sync(hdev, 0x00);
+ if (!err)
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ hci_enable_advertising_sync(hdev);
+ }
+
+ /* Call for each tracked instance to be scheduled */
+ list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
+ hci_schedule_adv_instance_sync(hdev, adv->instance, true);
+
+ return 0;
+}
+
+static int hci_write_auth_enable_sync(struct hci_dev *hdev)
+{
+ u8 link_sec;
+
+ link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
+ if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
+ sizeof(link_sec), &link_sec,
+ HCI_CMD_TIMEOUT);
+}
+
+int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
+{
+ struct hci_cp_write_page_scan_activity cp;
+ u8 type;
+ int err = 0;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (enable) {
+ type = PAGE_SCAN_TYPE_INTERLACED;
+
+ /* 160 msec page scan interval */
+ cp.interval = cpu_to_le16(0x0100);
+ } else {
+ type = hdev->def_page_scan_type;
+ cp.interval = cpu_to_le16(hdev->def_page_scan_int);
+ }
+
+ cp.window = cpu_to_le16(hdev->def_page_scan_window);
+
+ if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
+ __cpu_to_le16(hdev->page_scan_window) != cp.window) {
+ err = __hci_cmd_sync_status(hdev,
+ HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ if (hdev->page_scan_type != type)
+ err = __hci_cmd_sync_status(hdev,
+ HCI_OP_WRITE_PAGE_SCAN_TYPE,
+ sizeof(type), &type,
+ HCI_CMD_TIMEOUT);
+
+ return err;
+}
+
+static bool disconnected_accept_list_entries(struct hci_dev *hdev)
+{
+ struct bdaddr_list *b;
+
+ list_for_each_entry(b, &hdev->accept_list, list) {
+ struct hci_conn *conn;
+
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
+ if (!conn)
+ return true;
+
+ if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
+ return true;
+ }
+
+ return false;
+}
+
+static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
+ sizeof(val), &val,
+ HCI_CMD_TIMEOUT);
+}
+
+int hci_update_scan_sync(struct hci_dev *hdev)
+{
+ u8 scan;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ if (!hdev_is_powered(hdev))
+ return 0;
+
+ if (mgmt_powering_down(hdev))
+ return 0;
+
+ if (hdev->scanning_paused)
+ return 0;
+
+ if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
+ disconnected_accept_list_entries(hdev))
+ scan = SCAN_PAGE;
+ else
+ scan = SCAN_DISABLED;
+
+ if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
+ scan |= SCAN_INQUIRY;
+
+ if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
+ test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
+ return 0;
+
+ return hci_write_scan_enable_sync(hdev, scan);
+}
+
+int hci_update_name_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_local_name cp;
+
+ memset(&cp, 0, sizeof(cp));
+
+ memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
+ sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+/* This function perform powered update HCI command sequence after the HCI init
+ * sequence which end up resetting all states, the sequence is as follows:
+ *
+ * HCI_SSP_ENABLED(Enable SSP)
+ * HCI_LE_ENABLED(Enable LE)
+ * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
+ * Update adv data)
+ * Enable Authentication
+ * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
+ * Set Name -> Set EIR)
+ */
+int hci_powered_update_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* Register the available SMP channels (BR/EDR and LE) only when
+ * successfully powering on the controller. This late
+ * registration is required so that LE SMP can clearly decide if
+ * the public address or static address is used.
+ */
+ smp_register(hdev);
+
+ err = hci_write_ssp_mode_sync(hdev, 0x01);
+ if (err)
+ return err;
+
+ err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
+ if (err)
+ return err;
+
+ err = hci_powered_update_adv_sync(hdev);
+ if (err)
+ return err;
+
+ err = hci_write_auth_enable_sync(hdev);
+ if (err)
+ return err;
+
+ if (lmp_bredr_capable(hdev)) {
+ if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
+ hci_write_fast_connectable_sync(hdev, true);
+ else
+ hci_write_fast_connectable_sync(hdev, false);
+ hci_update_scan_sync(hdev);
+ hci_update_class_sync(hdev);
+ hci_update_name_sync(hdev);
+ hci_update_eir_sync(hdev);
+ }
+
+ return 0;
+}
+
+/**
+ * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
+ * (BD_ADDR) for a HCI device from
+ * a firmware node property.
+ * @hdev: The HCI device
+ *
+ * Search the firmware node for 'local-bd-address'.
+ *
+ * All-zero BD addresses are rejected, because those could be properties
+ * that exist in the firmware tables, but were not updated by the firmware. For
+ * example, the DTS could define 'local-bd-address', with zero BD addresses.
+ */
+static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
+ bdaddr_t ba;
+ int ret;
+
+ ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
+ (u8 *)&ba, sizeof(ba));
+ if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
+ return;
+
+ bacpy(&hdev->public_addr, &ba);
+}
+
+struct hci_init_stage {
+ int (*func)(struct hci_dev *hdev);
+};
+
+/* Run init stage NULL terminated function table */
+static int hci_init_stage_sync(struct hci_dev *hdev,
+ const struct hci_init_stage *stage)
+{
+ size_t i;
+
+ for (i = 0; stage[i].func; i++) {
+ int err;
+
+ err = stage[i].func(hdev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Read Local Version */
+static int hci_read_local_version_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read BD Address */
+static int hci_read_bd_addr_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+#define HCI_INIT(_func) \
+{ \
+ .func = _func, \
+}
+
+static const struct hci_init_stage hci_init0[] = {
+ /* HCI_OP_READ_LOCAL_VERSION */
+ HCI_INIT(hci_read_local_version_sync),
+ /* HCI_OP_READ_BD_ADDR */
+ HCI_INIT(hci_read_bd_addr_sync),
+ {}
+};
+
+int hci_reset_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ set_bit(HCI_RESET, &hdev->flags);
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
+ HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int hci_init0_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
+ err = hci_reset_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ return hci_init_stage_sync(hdev, hci_init0);
+}
+
+static int hci_unconf_init_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
+ return 0;
+
+ err = hci_init0_sync(hdev);
+ if (err < 0)
+ return err;
+
+ if (hci_dev_test_flag(hdev, HCI_SETUP))
+ hci_debugfs_create_basic(hdev);
+
+ return 0;
+}
+
+/* Read Local Supported Features. */
+static int hci_read_local_features_sync(struct hci_dev *hdev)
+{
+ /* Not all AMP controllers support this command */
+ if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* BR Controller init stage 1 command sequence */
+static const struct hci_init_stage br_init1[] = {
+ /* HCI_OP_READ_LOCAL_FEATURES */
+ HCI_INIT(hci_read_local_features_sync),
+ /* HCI_OP_READ_LOCAL_VERSION */
+ HCI_INIT(hci_read_local_version_sync),
+ /* HCI_OP_READ_BD_ADDR */
+ HCI_INIT(hci_read_bd_addr_sync),
+ {}
+};
+
+/* Read Local Commands */
+static int hci_read_local_cmds_sync(struct hci_dev *hdev)
+{
+ /* All Bluetooth 1.2 and later controllers should support the
+ * HCI command for reading the local supported commands.
+ *
+ * Unfortunately some controllers indicate Bluetooth 1.2 support,
+ * but do not have support for this command. If that is the case,
+ * the driver can quirk the behavior and skip reading the local
+ * supported commands.
+ */
+ if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
+ !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
+ return 0;
+}
+
+/* Read Local AMP Info */
+static int hci_read_local_amp_info_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Data Blk size */
+static int hci_read_data_block_size_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Flow Control Mode */
+static int hci_read_flow_control_mode_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Location Data */
+static int hci_read_location_data_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* AMP Controller init stage 1 command sequence */
+static const struct hci_init_stage amp_init1[] = {
+ /* HCI_OP_READ_LOCAL_VERSION */
+ HCI_INIT(hci_read_local_version_sync),
+ /* HCI_OP_READ_LOCAL_COMMANDS */
+ HCI_INIT(hci_read_local_cmds_sync),
+ /* HCI_OP_READ_LOCAL_AMP_INFO */
+ HCI_INIT(hci_read_local_amp_info_sync),
+ /* HCI_OP_READ_DATA_BLOCK_SIZE */
+ HCI_INIT(hci_read_data_block_size_sync),
+ /* HCI_OP_READ_FLOW_CONTROL_MODE */
+ HCI_INIT(hci_read_flow_control_mode_sync),
+ /* HCI_OP_READ_LOCATION_DATA */
+ HCI_INIT(hci_read_location_data_sync),
+};
+
+static int hci_init1_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
+ err = hci_reset_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ switch (hdev->dev_type) {
+ case HCI_PRIMARY:
+ hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
+ return hci_init_stage_sync(hdev, br_init1);
+ case HCI_AMP:
+ hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
+ return hci_init_stage_sync(hdev, amp_init1);
+ default:
+ bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
+ break;
+ }
+
+ return 0;
+}
+
+/* AMP Controller init stage 2 command sequence */
+static const struct hci_init_stage amp_init2[] = {
+ /* HCI_OP_READ_LOCAL_FEATURES */
+ HCI_INIT(hci_read_local_features_sync),
+};
+
+/* Read Buffer Size (ACL mtu, max pkt, etc.) */
+static int hci_read_buffer_size_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Class of Device */
+static int hci_read_dev_class_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Local Name */
+static int hci_read_local_name_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Voice Setting */
+static int hci_read_voice_setting_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Number of Supported IAC */
+static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read Current IAC LAP */
+static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
+ u8 cond_type, bdaddr_t *bdaddr,
+ u8 auto_accept)
+{
+ struct hci_cp_set_event_filter cp;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.flt_type = flt_type;
+
+ if (flt_type != HCI_FLT_CLEAR_ALL) {
+ cp.cond_type = cond_type;
+ bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
+ cp.addr_conn_flt.auto_accept = auto_accept;
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
+ flt_type == HCI_FLT_CLEAR_ALL ?
+ sizeof(cp.flt_type) : sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int hci_clear_event_filter_sync(struct hci_dev *hdev)
+{
+ if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
+ return 0;
+
+ return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
+ BDADDR_ANY, 0x00);
+}
+
+/* Connection accept timeout ~20 secs */
+static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
+{
+ __le16 param = cpu_to_le16(0x7d00);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
+ sizeof(param), &param, HCI_CMD_TIMEOUT);
+}
+
+/* BR Controller init stage 2 command sequence */
+static const struct hci_init_stage br_init2[] = {
+ /* HCI_OP_READ_BUFFER_SIZE */
+ HCI_INIT(hci_read_buffer_size_sync),
+ /* HCI_OP_READ_CLASS_OF_DEV */
+ HCI_INIT(hci_read_dev_class_sync),
+ /* HCI_OP_READ_LOCAL_NAME */
+ HCI_INIT(hci_read_local_name_sync),
+ /* HCI_OP_READ_VOICE_SETTING */
+ HCI_INIT(hci_read_voice_setting_sync),
+ /* HCI_OP_READ_NUM_SUPPORTED_IAC */
+ HCI_INIT(hci_read_num_supported_iac_sync),
+ /* HCI_OP_READ_CURRENT_IAC_LAP */
+ HCI_INIT(hci_read_current_iac_lap_sync),
+ /* HCI_OP_SET_EVENT_FLT */
+ HCI_INIT(hci_clear_event_filter_sync),
+ /* HCI_OP_WRITE_CA_TIMEOUT */
+ HCI_INIT(hci_write_ca_timeout_sync),
+ {}
+};
+
+static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
+{
+ u8 mode = 0x01;
+
+ if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ return 0;
+
+ /* When SSP is available, then the host features page
+ * should also be available as well. However some
+ * controllers list the max_page as 0 as long as SSP
+ * has not been enabled. To achieve proper debugging
+ * output, force the minimum max_page to 1 at least.
+ */
+ hdev->max_page = 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
+ sizeof(mode), &mode, HCI_CMD_TIMEOUT);
+}
+
+static int hci_write_eir_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_eir cp;
+
+ if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
+ return 0;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(&cp, 0, sizeof(cp));
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
+{
+ u8 mode;
+
+ if (!lmp_inq_rssi_capable(hdev) &&
+ !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
+ return 0;
+
+ /* If Extended Inquiry Result events are supported, then
+ * they are clearly preferred over Inquiry Result with RSSI
+ * events.
+ */
+ mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
+ sizeof(mode), &mode, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
+{
+ if (!lmp_inq_tx_pwr_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
+{
+ struct hci_cp_read_local_ext_features cp;
+
+ if (!lmp_ext_feat_capable(hdev))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.page = page;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
+{
+ return hci_read_local_ext_features_sync(hdev, 0x01);
+}
+
+/* HCI Controller init stage 2 command sequence */
+static const struct hci_init_stage hci_init2[] = {
+ /* HCI_OP_READ_LOCAL_COMMANDS */
+ HCI_INIT(hci_read_local_cmds_sync),
+ /* HCI_OP_WRITE_SSP_MODE */
+ HCI_INIT(hci_write_ssp_mode_1_sync),
+ /* HCI_OP_WRITE_EIR */
+ HCI_INIT(hci_write_eir_sync),
+ /* HCI_OP_WRITE_INQUIRY_MODE */
+ HCI_INIT(hci_write_inquiry_mode_sync),
+ /* HCI_OP_READ_INQ_RSP_TX_POWER */
+ HCI_INIT(hci_read_inq_rsp_tx_power_sync),
+ /* HCI_OP_READ_LOCAL_EXT_FEATURES */
+ HCI_INIT(hci_read_local_ext_features_1_sync),
+ /* HCI_OP_WRITE_AUTH_ENABLE */
+ HCI_INIT(hci_write_auth_enable_sync),
+ {}
+};
+
+/* Read LE Buffer Size */
+static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Local Supported Features */
+static int hci_le_read_local_features_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Supported States */
+static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
+{
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* LE Controller init stage 2 command sequence */
+static const struct hci_init_stage le_init2[] = {
+ /* HCI_OP_LE_READ_BUFFER_SIZE */
+ HCI_INIT(hci_le_read_buffer_size_sync),
+ /* HCI_OP_LE_READ_LOCAL_FEATURES */
+ HCI_INIT(hci_le_read_local_features_sync),
+ /* HCI_OP_LE_READ_SUPPORTED_STATES */
+ HCI_INIT(hci_le_read_supported_states_sync),
+ {}
+};
+
+static int hci_init2_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ if (hdev->dev_type == HCI_AMP)
+ return hci_init_stage_sync(hdev, amp_init2);
+
+ if (lmp_bredr_capable(hdev)) {
+ err = hci_init_stage_sync(hdev, br_init2);
+ if (err)
+ return err;
+ } else {
+ hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
+ }
+
+ if (lmp_le_capable(hdev)) {
+ err = hci_init_stage_sync(hdev, le_init2);
+ if (err)
+ return err;
+ /* LE-only controllers have LE implicitly enabled */
+ if (!lmp_bredr_capable(hdev))
+ hci_dev_set_flag(hdev, HCI_LE_ENABLED);
+ }
+
+ return hci_init_stage_sync(hdev, hci_init2);
+}
+
+static int hci_set_event_mask_sync(struct hci_dev *hdev)
+{
+ /* The second byte is 0xff instead of 0x9f (two reserved bits
+ * disabled) since a Broadcom 1.2 dongle doesn't respond to the
+ * command otherwise.
+ */
+ u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
+
+ /* CSR 1.1 dongles does not accept any bitfield so don't try to set
+ * any event mask for pre 1.2 devices.
+ */
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return 0;
+
+ if (lmp_bredr_capable(hdev)) {
+ events[4] |= 0x01; /* Flow Specification Complete */
+
+ /* Don't set Disconnect Complete when suspended as that
+ * would wakeup the host when disconnecting due to
+ * suspend.
+ */
+ if (hdev->suspended)
+ events[0] &= 0xef;
+ } else {
+ /* Use a different default for LE-only devices */
+ memset(events, 0, sizeof(events));
+ events[1] |= 0x20; /* Command Complete */
+ events[1] |= 0x40; /* Command Status */
+ events[1] |= 0x80; /* Hardware Error */
+
+ /* If the controller supports the Disconnect command, enable
+ * the corresponding event. In addition enable packet flow
+ * control related events.
+ */
+ if (hdev->commands[0] & 0x20) {
+ /* Don't set Disconnect Complete when suspended as that
+ * would wakeup the host when disconnecting due to
+ * suspend.
+ */
+ if (!hdev->suspended)
+ events[0] |= 0x10; /* Disconnection Complete */
+ events[2] |= 0x04; /* Number of Completed Packets */
+ events[3] |= 0x02; /* Data Buffer Overflow */
+ }
+
+ /* If the controller supports the Read Remote Version
+ * Information command, enable the corresponding event.
+ */
+ if (hdev->commands[2] & 0x80)
+ events[1] |= 0x08; /* Read Remote Version Information
+ * Complete
+ */
+
+ if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
+ events[0] |= 0x80; /* Encryption Change */
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+ }
+ }
+
+ if (lmp_inq_rssi_capable(hdev) ||
+ test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
+ events[4] |= 0x02; /* Inquiry Result with RSSI */
+
+ if (lmp_ext_feat_capable(hdev))
+ events[4] |= 0x04; /* Read Remote Extended Features Complete */
+
+ if (lmp_esco_capable(hdev)) {
+ events[5] |= 0x08; /* Synchronous Connection Complete */
+ events[5] |= 0x10; /* Synchronous Connection Changed */
+ }
+
+ if (lmp_sniffsubr_capable(hdev))
+ events[5] |= 0x20; /* Sniff Subrating */
+
+ if (lmp_pause_enc_capable(hdev))
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
+
+ if (lmp_ext_inq_capable(hdev))
+ events[5] |= 0x40; /* Extended Inquiry Result */
+
+ if (lmp_no_flush_capable(hdev))
+ events[7] |= 0x01; /* Enhanced Flush Complete */
+
+ if (lmp_lsto_capable(hdev))
+ events[6] |= 0x80; /* Link Supervision Timeout Changed */
+
+ if (lmp_ssp_capable(hdev)) {
+ events[6] |= 0x01; /* IO Capability Request */
+ events[6] |= 0x02; /* IO Capability Response */
+ events[6] |= 0x04; /* User Confirmation Request */
+ events[6] |= 0x08; /* User Passkey Request */
+ events[6] |= 0x10; /* Remote OOB Data Request */
+ events[6] |= 0x20; /* Simple Pairing Complete */
+ events[7] |= 0x04; /* User Passkey Notification */
+ events[7] |= 0x08; /* Keypress Notification */
+ events[7] |= 0x10; /* Remote Host Supported
+ * Features Notification
+ */
+ }
+
+ if (lmp_le_capable(hdev))
+ events[7] |= 0x20; /* LE Meta-Event */
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
+ sizeof(events), events, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_read_stored_link_key cp;
+
+ if (!(hdev->commands[6] & 0x20) ||
+ test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.read_all = 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_setup_link_policy_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_def_link_policy cp;
+ u16 link_policy = 0;
+
+ if (!(hdev->commands[5] & 0x10))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (lmp_rswitch_capable(hdev))
+ link_policy |= HCI_LP_RSWITCH;
+ if (lmp_hold_capable(hdev))
+ link_policy |= HCI_LP_HOLD;
+ if (lmp_sniff_capable(hdev))
+ link_policy |= HCI_LP_SNIFF;
+ if (lmp_park_capable(hdev))
+ link_policy |= HCI_LP_PARK;
+
+ cp.policy = cpu_to_le16(link_policy);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[8] & 0x01))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[18] & 0x04) ||
+ test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
+{
+ /* Some older Broadcom based Bluetooth 1.2 controllers do not
+ * support the Read Page Scan Type command. Check support for
+ * this command in the bit mask of supported commands.
+ */
+ if (!(hdev->commands[13] & 0x01))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read features beyond page 1 if available */
+static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
+{
+ u8 page;
+ int err;
+
+ if (!lmp_ext_feat_capable(hdev))
+ return 0;
+
+ for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
+ page++) {
+ err = hci_read_local_ext_features_sync(hdev, page);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* HCI Controller init stage 3 command sequence */
+static const struct hci_init_stage hci_init3[] = {
+ /* HCI_OP_SET_EVENT_MASK */
+ HCI_INIT(hci_set_event_mask_sync),
+ /* HCI_OP_READ_STORED_LINK_KEY */
+ HCI_INIT(hci_read_stored_link_key_sync),
+ /* HCI_OP_WRITE_DEF_LINK_POLICY */
+ HCI_INIT(hci_setup_link_policy_sync),
+ /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
+ HCI_INIT(hci_read_page_scan_activity_sync),
+ /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
+ HCI_INIT(hci_read_def_err_data_reporting_sync),
+ /* HCI_OP_READ_PAGE_SCAN_TYPE */
+ HCI_INIT(hci_read_page_scan_type_sync),
+ /* HCI_OP_READ_LOCAL_EXT_FEATURES */
+ HCI_INIT(hci_read_local_ext_features_all_sync),
+ {}
+};
+
+static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
+{
+ u8 events[8];
+
+ if (!lmp_le_capable(hdev))
+ return 0;
+
+ memset(events, 0, sizeof(events));
+
+ if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
+ events[0] |= 0x10; /* LE Long Term Key Request */
+
+ /* If controller supports the Connection Parameters Request
+ * Link Layer Procedure, enable the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
+ /* LE Remote Connection Parameter Request */
+ events[0] |= 0x20;
+
+ /* If the controller supports the Data Length Extension
+ * feature, enable the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
+ events[0] |= 0x40; /* LE Data Length Change */
+
+ /* If the controller supports LL Privacy feature, enable
+ * the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
+ events[1] |= 0x02; /* LE Enhanced Connection Complete */
+
+ /* If the controller supports Extended Scanner Filter
+ * Policies, enable the corresponding event.
+ */
+ if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
+ events[1] |= 0x04; /* LE Direct Advertising Report */
+
+ /* If the controller supports Channel Selection Algorithm #2
+ * feature, enable the corresponding event.
+ */
+ if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
+ events[2] |= 0x08; /* LE Channel Selection Algorithm */
+
+ /* If the controller supports the LE Set Scan Enable command,
+ * enable the corresponding advertising report event.
+ */
+ if (hdev->commands[26] & 0x08)
+ events[0] |= 0x02; /* LE Advertising Report */
+
+ /* If the controller supports the LE Create Connection
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[26] & 0x10)
+ events[0] |= 0x01; /* LE Connection Complete */
+
+ /* If the controller supports the LE Connection Update
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[27] & 0x04)
+ events[0] |= 0x04; /* LE Connection Update Complete */
+
+ /* If the controller supports the LE Read Remote Used Features
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[27] & 0x20)
+ /* LE Read Remote Used Features Complete */
+ events[0] |= 0x08;
+
+ /* If the controller supports the LE Read Local P-256
+ * Public Key command, enable the corresponding event.
+ */
+ if (hdev->commands[34] & 0x02)
+ /* LE Read Local P-256 Public Key Complete */
+ events[0] |= 0x80;
+
+ /* If the controller supports the LE Generate DHKey
+ * command, enable the corresponding event.
+ */
+ if (hdev->commands[34] & 0x04)
+ events[1] |= 0x01; /* LE Generate DHKey Complete */
+
+ /* If the controller supports the LE Set Default PHY or
+ * LE Set PHY commands, enable the corresponding event.
+ */
+ if (hdev->commands[35] & (0x20 | 0x40))
+ events[1] |= 0x08; /* LE PHY Update Complete */
+
+ /* If the controller supports LE Set Extended Scan Parameters
+ * and LE Set Extended Scan Enable commands, enable the
+ * corresponding event.
+ */
+ if (use_ext_scan(hdev))
+ events[1] |= 0x10; /* LE Extended Advertising Report */
+
+ /* If the controller supports the LE Extended Advertising
+ * command, enable the corresponding event.
+ */
+ if (ext_adv_capable(hdev))
+ events[2] |= 0x02; /* LE Advertising Set Terminated */
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
+ sizeof(events), events, HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Advertising Channel TX Power */
+static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
+{
+ if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
+ /* HCI TS spec forbids mixing of legacy and extended
+ * advertising commands wherein READ_ADV_TX_POWER is
+ * also included. So do not call it if extended adv
+ * is supported otherwise controller will return
+ * COMMAND_DISALLOWED for extended commands.
+ */
+ return __hci_cmd_sync_status(hdev,
+ HCI_OP_LE_READ_ADV_TX_POWER,
+ 0, NULL, HCI_CMD_TIMEOUT);
+ }
+
+ return 0;
+}
+
+/* Read LE Min/Max Tx Power*/
+static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[38] & 0x80) ||
+ test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Accept List Size */
+static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[26] & 0x40))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Clear LE Accept List */
+static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[26] & 0x80))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Resolving List Size */
+static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[34] & 0x40))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Clear LE Resolving List */
+static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[34] & 0x20))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Set RPA timeout */
+static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
+{
+ __le16 timeout = cpu_to_le16(hdev->rpa_timeout);
+
+ if (!(hdev->commands[35] & 0x04))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
+ sizeof(timeout), &timeout,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Maximum Data Length */
+static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Suggested Default Data Length */
+static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read LE Number of Supported Advertising Sets */
+static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
+{
+ if (!ext_adv_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev,
+ HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Write LE Host Supported */
+static int hci_set_le_support_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_le_host_supported cp;
+
+ /* LE-only devices do not support explicit enablement */
+ if (!lmp_bredr_capable(hdev))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
+ cp.le = 0x01;
+ cp.simul = 0x00;
+ }
+
+ if (cp.le == lmp_host_le_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+/* LE Controller init stage 3 command sequence */
+static const struct hci_init_stage le_init3[] = {
+ /* HCI_OP_LE_SET_EVENT_MASK */
+ HCI_INIT(hci_le_set_event_mask_sync),
+ /* HCI_OP_LE_READ_ADV_TX_POWER */
+ HCI_INIT(hci_le_read_adv_tx_power_sync),
+ /* HCI_OP_LE_READ_TRANSMIT_POWER */
+ HCI_INIT(hci_le_read_tx_power_sync),
+ /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
+ HCI_INIT(hci_le_read_accept_list_size_sync),
+ /* HCI_OP_LE_CLEAR_ACCEPT_LIST */
+ HCI_INIT(hci_le_clear_accept_list_sync),
+ /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
+ HCI_INIT(hci_le_read_resolv_list_size_sync),
+ /* HCI_OP_LE_CLEAR_RESOLV_LIST */
+ HCI_INIT(hci_le_clear_resolv_list_sync),
+ /* HCI_OP_LE_SET_RPA_TIMEOUT */
+ HCI_INIT(hci_le_set_rpa_timeout_sync),
+ /* HCI_OP_LE_READ_MAX_DATA_LEN */
+ HCI_INIT(hci_le_read_max_data_len_sync),
+ /* HCI_OP_LE_READ_DEF_DATA_LEN */
+ HCI_INIT(hci_le_read_def_data_len_sync),
+ /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
+ HCI_INIT(hci_le_read_num_support_adv_sets_sync),
+ /* HCI_OP_WRITE_LE_HOST_SUPPORTED */
+ HCI_INIT(hci_set_le_support_sync),
+ {}
+};
+
+static int hci_init3_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ err = hci_init_stage_sync(hdev, hci_init3);
+ if (err)
+ return err;
+
+ if (lmp_le_capable(hdev))
+ return hci_init_stage_sync(hdev, le_init3);
+
+ return 0;
+}
+
+static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_delete_stored_link_key cp;
+
+ /* Some Broadcom based Bluetooth controllers do not support the
+ * Delete Stored Link Key command. They are clearly indicating its
+ * absence in the bit mask of supported commands.
+ *
+ * Check the supported commands and only if the command is marked
+ * as supported send it. If not supported assume that the controller
+ * does not have actual support for stored link keys which makes this
+ * command redundant anyway.
+ *
+ * Some controllers indicate that they support handling deleting
+ * stored link keys, but they don't. The quirk lets a driver
+ * just disable this command.
+ */
+ if (!(hdev->commands[6] & 0x80) ||
+ test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, BDADDR_ANY);
+ cp.delete_all = 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
+{
+ u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ bool changed = false;
+
+ /* Set event mask page 2 if the HCI command for it is supported */
+ if (!(hdev->commands[22] & 0x04))
+ return 0;
+
+ /* If Connectionless Peripheral Broadcast central role is supported
+ * enable all necessary events for it.
+ */
+ if (lmp_cpb_central_capable(hdev)) {
+ events[1] |= 0x40; /* Triggered Clock Capture */
+ events[1] |= 0x80; /* Synchronization Train Complete */
+ events[2] |= 0x10; /* Peripheral Page Response Timeout */
+ events[2] |= 0x20; /* CPB Channel Map Change */
+ changed = true;
+ }
+
+ /* If Connectionless Peripheral Broadcast peripheral role is supported
+ * enable all necessary events for it.
+ */
+ if (lmp_cpb_peripheral_capable(hdev)) {
+ events[2] |= 0x01; /* Synchronization Train Received */
+ events[2] |= 0x02; /* CPB Receive */
+ events[2] |= 0x04; /* CPB Timeout */
+ events[2] |= 0x08; /* Truncated Page Complete */
+ changed = true;
+ }
+
+ /* Enable Authenticated Payload Timeout Expired event if supported */
+ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
+ events[2] |= 0x80;
+ changed = true;
+ }
+
+ /* Some Broadcom based controllers indicate support for Set Event
+ * Mask Page 2 command, but then actually do not support it. Since
+ * the default value is all bits set to zero, the command is only
+ * required if the event mask has to be changed. In case no change
+ * to the event mask is needed, skip this command.
+ */
+ if (!changed)
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
+ sizeof(events), events, HCI_CMD_TIMEOUT);
+}
+
+/* Read local codec list if the HCI command is supported */
+static int hci_read_local_codecs_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[29] & 0x20))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Read local pairing options if the HCI command is supported */
+static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[41] & 0x08))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Get MWS transport configuration if the HCI command is supported */
+static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
+{
+ if (!(hdev->commands[30] & 0x08))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Check for Synchronization Train support */
+static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
+{
+ if (!lmp_sync_train_capable(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
+ 0, NULL, HCI_CMD_TIMEOUT);
+}
+
+/* Enable Secure Connections if supported and configured */
+static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
+{
+ u8 support = 0x01;
+
+ if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
+ !bredr_sc_enabled(hdev))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
+ sizeof(support), &support,
+ HCI_CMD_TIMEOUT);
+}
+
+/* Set erroneous data reporting if supported to the wideband speech
+ * setting value
+ */
+static int hci_set_err_data_report_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_def_err_data_reporting cp;
+ bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
+
+ if (!(hdev->commands[18] & 0x08) ||
+ test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
+ return 0;
+
+ if (enabled == hdev->err_data_reporting)
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
+ ERR_DATA_REPORTING_DISABLED;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static const struct hci_init_stage hci_init4[] = {
+ /* HCI_OP_DELETE_STORED_LINK_KEY */
+ HCI_INIT(hci_delete_stored_link_key_sync),
+ /* HCI_OP_SET_EVENT_MASK_PAGE_2 */
+ HCI_INIT(hci_set_event_mask_page_2_sync),
+ /* HCI_OP_READ_LOCAL_CODECS */
+ HCI_INIT(hci_read_local_codecs_sync),
+ /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
+ HCI_INIT(hci_read_local_pairing_opts_sync),
+ /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
+ HCI_INIT(hci_get_mws_transport_config_sync),
+ /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
+ HCI_INIT(hci_read_sync_train_params_sync),
+ /* HCI_OP_WRITE_SC_SUPPORT */
+ HCI_INIT(hci_write_sc_support_1_sync),
+ /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
+ HCI_INIT(hci_set_err_data_report_sync),
+ {}
+};
+
+/* Set Suggested Default Data Length to maximum if supported */
+static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_le_write_def_data_len cp;
+
+ if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
+ cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+/* Set Default PHY parameters if command is supported */
+static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_le_set_default_phy cp;
+
+ if (!(hdev->commands[35] & 0x20))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.all_phys = 0x00;
+ cp.tx_phys = hdev->le_tx_def_phys;
+ cp.rx_phys = hdev->le_rx_def_phys;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static const struct hci_init_stage le_init4[] = {
+ /* HCI_OP_LE_WRITE_DEF_DATA_LEN */
+ HCI_INIT(hci_le_set_write_def_data_len_sync),
+ /* HCI_OP_LE_SET_DEFAULT_PHY */
+ HCI_INIT(hci_le_set_default_phy_sync),
+ {}
+};
+
+static int hci_init4_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ err = hci_init_stage_sync(hdev, hci_init4);
+ if (err)
+ return err;
+
+ if (lmp_le_capable(hdev))
+ return hci_init_stage_sync(hdev, le_init4);
+
+ return 0;
+}
+
+static int hci_init_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ err = hci_init1_sync(hdev);
+ if (err < 0)
+ return err;
+
+ if (hci_dev_test_flag(hdev, HCI_SETUP))
+ hci_debugfs_create_basic(hdev);
+
+ err = hci_init2_sync(hdev);
+ if (err < 0)
+ return err;
+
+ /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
+ * BR/EDR/LE type controllers. AMP controllers only need the
+ * first two stages of init.
+ */
+ if (hdev->dev_type != HCI_PRIMARY)
+ return 0;
+
+ err = hci_init3_sync(hdev);
+ if (err < 0)
+ return err;
+
+ err = hci_init4_sync(hdev);
+ if (err < 0)
+ return err;
+
+ /* This function is only called when the controller is actually in
+ * configured state. When the controller is marked as unconfigured,
+ * this initialization procedure is not run.
+ *
+ * It means that it is possible that a controller runs through its
+ * setup phase and then discovers missing settings. If that is the
+ * case, then this function will not be called. It then will only
+ * be called during the config phase.
+ *
+ * So only when in setup phase or config phase, create the debugfs
+ * entries and register the SMP channels.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG))
+ return 0;
+
+ hci_debugfs_create_common(hdev);
+
+ if (lmp_bredr_capable(hdev))
+ hci_debugfs_create_bredr(hdev);
+
+ if (lmp_le_capable(hdev))
+ hci_debugfs_create_le(hdev);
+
+ return 0;
+}
+
+int hci_dev_open_sync(struct hci_dev *hdev)
+{
+ int ret = 0;
+
+ bt_dev_dbg(hdev, "");
+
+ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG)) {
+ /* Check for rfkill but allow the HCI setup stage to
+ * proceed (which in itself doesn't cause any RF activity).
+ */
+ if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
+ ret = -ERFKILL;
+ goto done;
+ }
+
+ /* Check for valid public address or a configured static
+ * random address, but let the HCI setup proceed to
+ * be able to determine if there is a public address
+ * or not.
+ *
+ * In case of user channel usage, it is not important
+ * if a public address or static random address is
+ * available.
+ *
+ * This check is only valid for BR/EDR controllers
+ * since AMP controllers do not have an address.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hdev->dev_type == HCI_PRIMARY &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY)) {
+ ret = -EADDRNOTAVAIL;
+ goto done;
+ }
+ }
+
+ if (test_bit(HCI_UP, &hdev->flags)) {
+ ret = -EALREADY;
+ goto done;
+ }
+
+ if (hdev->open(hdev)) {
+ ret = -EIO;
+ goto done;
+ }
+
+ set_bit(HCI_RUNNING, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_OPEN);
+
+ atomic_set(&hdev->cmd_cnt, 1);
+ set_bit(HCI_INIT, &hdev->flags);
+
+ if (hci_dev_test_flag(hdev, HCI_SETUP) ||
+ test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ bool invalid_bdaddr;
+
+ hci_sock_dev_event(hdev, HCI_DEV_SETUP);
+
+ if (hdev->setup)
+ ret = hdev->setup(hdev);
+
+ /* The transport driver can set the quirk to mark the
+ * BD_ADDR invalid before creating the HCI device or in
+ * its setup callback.
+ */
+ invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
+ &hdev->quirks);
+
+ if (ret)
+ goto setup_failed;
+
+ if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
+ if (!bacmp(&hdev->public_addr, BDADDR_ANY))
+ hci_dev_get_bd_addr_from_property(hdev);
+
+ if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
+ hdev->set_bdaddr) {
+ ret = hdev->set_bdaddr(hdev,
+ &hdev->public_addr);
+
+ /* If setting of the BD_ADDR from the device
+ * property succeeds, then treat the address
+ * as valid even if the invalid BD_ADDR
+ * quirk indicates otherwise.
+ */
+ if (!ret)
+ invalid_bdaddr = false;
+ }
+ }
+
+setup_failed:
+ /* The transport driver can set these quirks before
+ * creating the HCI device or in its setup callback.
+ *
+ * For the invalid BD_ADDR quirk it is possible that
+ * it becomes a valid address if the bootloader does
+ * provide it (see above).
+ *
+ * In case any of them is set, the controller has to
+ * start up as unconfigured.
+ */
+ if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
+ invalid_bdaddr)
+ hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
+
+ /* For an unconfigured controller it is required to
+ * read at least the version information provided by
+ * the Read Local Version Information command.
+ *
+ * If the set_bdaddr driver callback is provided, then
+ * also the original Bluetooth public device address
+ * will be read using the Read BD Address command.
+ */
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ ret = hci_unconf_init_sync(hdev);
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
+ /* If public address change is configured, ensure that
+ * the address gets programmed. If the driver does not
+ * support changing the public address, fail the power
+ * on procedure.
+ */
+ if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
+ hdev->set_bdaddr)
+ ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
+ else
+ ret = -EADDRNOTAVAIL;
+ }
+
+ if (!ret) {
+ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ ret = hci_init_sync(hdev);
+ if (!ret && hdev->post_init)
+ ret = hdev->post_init(hdev);
+ }
+ }
+
+ /* If the HCI Reset command is clearing all diagnostic settings,
+ * then they need to be reprogrammed after the init procedure
+ * completed.
+ */
+ if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
+ ret = hdev->set_diag(hdev, true);
+
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ msft_do_open(hdev);
+ aosp_do_open(hdev);
+ }
+
+ clear_bit(HCI_INIT, &hdev->flags);
+
+ if (!ret) {
+ hci_dev_hold(hdev);
+ hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
+ hci_adv_instances_set_rpa_expired(hdev, true);
+ set_bit(HCI_UP, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_UP);
+ hci_leds_update_powered(hdev, true);
+ if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
+ !hci_dev_test_flag(hdev, HCI_CONFIG) &&
+ !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_dev_test_flag(hdev, HCI_MGMT) &&
+ hdev->dev_type == HCI_PRIMARY) {
+ ret = hci_powered_update_sync(hdev);
+ }
+ } else {
+ /* Init failed, cleanup */
+ flush_work(&hdev->tx_work);
+
+ /* Since hci_rx_work() is possible to awake new cmd_work
+ * it should be flushed first to avoid unexpected call of
+ * hci_cmd_work()
+ */
+ flush_work(&hdev->rx_work);
+ flush_work(&hdev->cmd_work);
+
+ skb_queue_purge(&hdev->cmd_q);
+ skb_queue_purge(&hdev->rx_q);
+
+ if (hdev->flush)
+ hdev->flush(hdev);
+
+ if (hdev->sent_cmd) {
+ kfree_skb(hdev->sent_cmd);
+ hdev->sent_cmd = NULL;
+ }
+
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+
+ hdev->close(hdev);
+ hdev->flags &= BIT(HCI_RAW);
+ }
+
+done:
+ return ret;
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_pend_le_actions_clear(struct hci_dev *hdev)
+{
+ struct hci_conn_params *p;
+
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ if (p->conn) {
+ hci_conn_drop(p->conn);
+ hci_conn_put(p->conn);
+ p->conn = NULL;
+ }
+ list_del_init(&p->action);
+ }
+
+ BT_DBG("All LE pending actions cleared");
+}
+
+int hci_dev_close_sync(struct hci_dev *hdev)
+{
+ bool auto_off;
+ int err = 0;
+
+ bt_dev_dbg(hdev, "");
+
+ cancel_delayed_work(&hdev->power_off);
+ cancel_delayed_work(&hdev->ncmd_timer);
+
+ hci_request_cancel_all(hdev);
+
+ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ test_bit(HCI_UP, &hdev->flags)) {
+ /* Execute vendor specific shutdown routine */
+ if (hdev->shutdown)
+ err = hdev->shutdown(hdev);
+ }
+
+ if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ return err;
+ }
+
+ hci_leds_update_powered(hdev, false);
+
+ /* Flush RX and TX works */
+ flush_work(&hdev->tx_work);
+ flush_work(&hdev->rx_work);
+
+ if (hdev->discov_timeout > 0) {
+ hdev->discov_timeout = 0;
+ hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
+ hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
+ }
+
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
+ cancel_delayed_work(&hdev->service_cache);
+
+ if (hci_dev_test_flag(hdev, HCI_MGMT)) {
+ struct adv_info *adv_instance;
+
+ cancel_delayed_work_sync(&hdev->rpa_expired);
+
+ list_for_each_entry(adv_instance, &hdev->adv_instances, list)
+ cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
+ }
+
+ /* Avoid potential lockdep warnings from the *_flush() calls by
+ * ensuring the workqueue is empty up front.
+ */
+ drain_workqueue(hdev->workqueue);
+
+ hci_dev_lock(hdev);
+
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+ auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
+
+ if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
+ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_dev_test_flag(hdev, HCI_MGMT))
+ __mgmt_power_off(hdev);
+
+ hci_inquiry_cache_flush(hdev);
+ hci_pend_le_actions_clear(hdev);
+ hci_conn_hash_flush(hdev);
+ hci_dev_unlock(hdev);
+
+ smp_unregister(hdev);
+
+ hci_sock_dev_event(hdev, HCI_DEV_DOWN);
+
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
+ aosp_do_close(hdev);
+ msft_do_close(hdev);
+ }
+
+ if (hdev->flush)
+ hdev->flush(hdev);
+
+ /* Reset device */
+ skb_queue_purge(&hdev->cmd_q);
+ atomic_set(&hdev->cmd_cnt, 1);
+ if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
+ !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
+ set_bit(HCI_INIT, &hdev->flags);
+ hci_reset_sync(hdev);
+ clear_bit(HCI_INIT, &hdev->flags);
+ }
+
+ /* flush cmd work */
+ flush_work(&hdev->cmd_work);
+
+ /* Drop queues */
+ skb_queue_purge(&hdev->rx_q);
+ skb_queue_purge(&hdev->cmd_q);
+ skb_queue_purge(&hdev->raw_q);
+
+ /* Drop last sent command */
+ if (hdev->sent_cmd) {
+ cancel_delayed_work_sync(&hdev->cmd_timer);
+ kfree_skb(hdev->sent_cmd);
+ hdev->sent_cmd = NULL;
+ }
+
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
+
+ /* After this point our queues are empty and no tasks are scheduled. */
+ hdev->close(hdev);
+
+ /* Clear flags */
+ hdev->flags &= BIT(HCI_RAW);
+ hci_dev_clear_volatile_flags(hdev);
+
+ /* Controller radio is available but is currently powered down */
+ hdev->amp_status = AMP_STATUS_POWERED_DOWN;
+
+ memset(hdev->eir, 0, sizeof(hdev->eir));
+ memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
+ bacpy(&hdev->random_addr, BDADDR_ANY);
+
+ hci_dev_put(hdev);
+ return err;
+}
+
+/* This function perform power on HCI command sequence as follows:
+ *
+ * If controller is already up (HCI_UP) performs hci_powered_update_sync
+ * sequence otherwise run hci_dev_open_sync which will follow with
+ * hci_powered_update_sync after the init sequence is completed.
+ */
+static int hci_power_on_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ if (test_bit(HCI_UP, &hdev->flags) &&
+ hci_dev_test_flag(hdev, HCI_MGMT) &&
+ hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
+ cancel_delayed_work(&hdev->power_off);
+ return hci_powered_update_sync(hdev);
+ }
+
+ err = hci_dev_open_sync(hdev);
+ if (err < 0)
+ return err;
+
+ /* During the HCI setup phase, a few error conditions are
+ * ignored and they need to be checked now. If they are still
+ * valid, it is important to return the device back off.
+ */
+ if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
+ hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
+ (hdev->dev_type == HCI_PRIMARY &&
+ !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
+ !bacmp(&hdev->static_addr, BDADDR_ANY))) {
+ hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
+ hci_dev_close_sync(hdev);
+ } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
+ queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
+ HCI_AUTO_OFF_TIMEOUT);
+ }
+
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
+ /* For unconfigured devices, set the HCI_RAW flag
+ * so that userspace can easily identify them.
+ */
+ if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ set_bit(HCI_RAW, &hdev->flags);
+
+ /* For fully configured devices, this will send
+ * the Index Added event. For unconfigured devices,
+ * it will send Unconfigued Index Added event.
+ *
+ * Devices with HCI_QUIRK_RAW_DEVICE are ignored
+ * and no event will be send.
+ */
+ mgmt_index_added(hdev);
+ } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
+ /* When the controller is now configured, then it
+ * is important to clear the HCI_RAW flag.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
+ clear_bit(HCI_RAW, &hdev->flags);
+
+ /* Powering on the controller with HCI_CONFIG set only
+ * happens with the transition from unconfigured to
+ * configured. This will send the Index Added event.
+ */
+ mgmt_index_added(hdev);
+ }
+
+ return 0;
+}
+
+static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
+{
+ struct hci_cp_remote_name_req_cancel cp;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, addr);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+int hci_stop_discovery_sync(struct hci_dev *hdev)
+{
+ struct discovery_state *d = &hdev->discovery;
+ struct inquiry_entry *e;
+ int err;
+
+ bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
+
+ if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
+ if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+ err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
+ 0, NULL, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
+ cancel_delayed_work(&hdev->le_scan_disable);
+ cancel_delayed_work(&hdev->le_scan_restart);
+
+ err = hci_scan_disable_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ } else {
+ err = hci_scan_disable_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ /* Resume advertising if it was paused */
+ if (use_ll_privacy(hdev))
+ hci_resume_advertising_sync(hdev);
+
+ /* No further actions needed for LE-only discovery */
+ if (d->type == DISCOV_TYPE_LE)
+ return 0;
+
+ if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
+ e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+ NAME_PENDING);
+ if (!e)
+ return 0;
+
+ return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
+ }
+
+ return 0;
+}
+
+static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle,
+ u8 reason)
+{
+ struct hci_cp_disconn_phy_link cp;
+
+ memset(&cp, 0, sizeof(cp));
+ cp.phy_handle = HCI_PHY_HANDLE(handle);
+ cp.reason = reason;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ struct hci_cp_disconnect cp;
+
+ if (conn->type == AMP_LINK)
+ return hci_disconnect_phy_link_sync(hdev, conn->handle, reason);
+
+ memset(&cp, 0, sizeof(cp));
+ cp.handle = cpu_to_le16(conn->handle);
+ cp.reason = reason;
+
+ /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not
+ * suspending.
+ */
+ if (!hdev->suspended)
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
+ sizeof(cp), &cp,
+ HCI_EV_DISCONN_COMPLETE,
+ HCI_CMD_TIMEOUT, NULL);
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
+ struct hci_conn *conn)
+{
+ if (test_bit(HCI_CONN_SCANNING, &conn->flags))
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
+ 6, &conn->dst, HCI_CMD_TIMEOUT);
+}
+
+static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ if (conn->type == LE_LINK)
+ return hci_le_connect_cancel_sync(hdev, conn);
+
+ if (hdev->hci_ver < BLUETOOTH_VER_1_2)
+ return 0;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
+ 6, &conn->dst, HCI_CMD_TIMEOUT);
+}
+
+static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ struct hci_cp_reject_sync_conn_req cp;
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.reason = reason;
+
+ /* SCO rejection has its own limited set of
+ * allowed error values (0x0D-0x0F).
+ */
+ if (reason < 0x0d || reason > 0x0f)
+ cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ struct hci_cp_reject_conn_req cp;
+
+ if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
+ return hci_reject_sco_sync(hdev, conn, reason);
+
+ memset(&cp, 0, sizeof(cp));
+ bacpy(&cp.bdaddr, &conn->dst);
+ cp.reason = reason;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 reason)
+{
+ switch (conn->state) {
+ case BT_CONNECTED:
+ case BT_CONFIG:
+ return hci_disconnect_sync(hdev, conn, reason);
+ case BT_CONNECT:
+ return hci_connect_cancel_sync(hdev, conn);
+ case BT_CONNECT2:
+ return hci_reject_conn_sync(hdev, conn, reason);
+ default:
+ conn->state = BT_CLOSED;
+ break;
+ }
+
+ return 0;
+}
+
+static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
+{
+ struct hci_conn *conn, *tmp;
+ int err;
+
+ list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) {
+ err = hci_abort_conn_sync(hdev, conn, reason);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/* This function perform power off HCI command sequence as follows:
+ *
+ * Clear Advertising
+ * Stop Discovery
+ * Disconnect all connections
+ * hci_dev_close_sync
+ */
+static int hci_power_off_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* If controller is already down there is nothing to do */
+ if (!test_bit(HCI_UP, &hdev->flags))
+ return 0;
+
+ if (test_bit(HCI_ISCAN, &hdev->flags) ||
+ test_bit(HCI_PSCAN, &hdev->flags)) {
+ err = hci_write_scan_enable_sync(hdev, 0x00);
+ if (err)
+ return err;
+ }
+
+ err = hci_clear_adv_sync(hdev, NULL, false);
+ if (err)
+ return err;
+
+ err = hci_stop_discovery_sync(hdev);
+ if (err)
+ return err;
+
+ /* Terminated due to Power Off */
+ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
+ if (err)
+ return err;
+
+ return hci_dev_close_sync(hdev);
+}
+
+int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
+{
+ if (val)
+ return hci_power_on_sync(hdev);
+
+ return hci_power_off_sync(hdev);
+}
+
+static int hci_write_iac_sync(struct hci_dev *hdev)
+{
+ struct hci_cp_write_current_iac_lap cp;
+
+ if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
+ /* Limited discoverable mode */
+ cp.num_iac = min_t(u8, hdev->num_iac, 2);
+ cp.iac_lap[0] = 0x00; /* LIAC */
+ cp.iac_lap[1] = 0x8b;
+ cp.iac_lap[2] = 0x9e;
+ cp.iac_lap[3] = 0x33; /* GIAC */
+ cp.iac_lap[4] = 0x8b;
+ cp.iac_lap[5] = 0x9e;
+ } else {
+ /* General discoverable mode */
+ cp.num_iac = 1;
+ cp.iac_lap[0] = 0x33; /* GIAC */
+ cp.iac_lap[1] = 0x8b;
+ cp.iac_lap[2] = 0x9e;
+ }
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
+ (cp.num_iac * 3) + 1, &cp,
+ HCI_CMD_TIMEOUT);
+}
+
+int hci_update_discoverable_sync(struct hci_dev *hdev)
+{
+ int err = 0;
+
+ if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
+ err = hci_write_iac_sync(hdev);
+ if (err)
+ return err;
+
+ err = hci_update_scan_sync(hdev);
+ if (err)
+ return err;
+
+ err = hci_update_class_sync(hdev);
+ if (err)
+ return err;
+ }
+
+ /* Advertising instances don't use the global discoverable setting, so
+ * only update AD if advertising was enabled using Set Advertising.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
+ err = hci_update_adv_data_sync(hdev, 0x00);
+ if (err)
+ return err;
+
+ /* Discoverable mode affects the local advertising
+ * address in limited privacy mode.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
+ if (ext_adv_capable(hdev))
+ err = hci_start_ext_adv_sync(hdev, 0x00);
+ else
+ err = hci_enable_advertising_sync(hdev);
+ }
+ }
+
+ return err;
+}
+
+static int update_discoverable_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_discoverable_sync(hdev);
+}
+
+int hci_update_discoverable(struct hci_dev *hdev)
+{
+ /* Only queue if it would have any effect */
+ if (hdev_is_powered(hdev) &&
+ hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
+ hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
+ hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
+ return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
+ NULL);
+
+ return 0;
+}
+
+int hci_update_connectable_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ err = hci_update_scan_sync(hdev);
+ if (err)
+ return err;
+
+ /* If BR/EDR is not enabled and we disable advertising as a
+ * by-product of disabling connectable, we need to update the
+ * advertising flags.
+ */
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
+
+ /* Update the advertising parameters if necessary */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
+ !list_empty(&hdev->adv_instances)) {
+ if (ext_adv_capable(hdev))
+ err = hci_start_ext_adv_sync(hdev,
+ hdev->cur_adv_instance);
+ else
+ err = hci_enable_advertising_sync(hdev);
+
+ if (err)
+ return err;
+ }
+
+ return hci_update_passive_scan_sync(hdev);
+}
+
+static int hci_inquiry_sync(struct hci_dev *hdev, u8 length)
+{
+ const u8 giac[3] = { 0x33, 0x8b, 0x9e };
+ const u8 liac[3] = { 0x00, 0x8b, 0x9e };
+ struct hci_cp_inquiry cp;
+
+ bt_dev_dbg(hdev, "");
+
+ if (hci_dev_test_flag(hdev, HCI_INQUIRY))
+ return 0;
+
+ hci_dev_lock(hdev);
+ hci_inquiry_cache_flush(hdev);
+ hci_dev_unlock(hdev);
+
+ memset(&cp, 0, sizeof(cp));
+
+ if (hdev->discovery.limited)
+ memcpy(&cp.lap, liac, sizeof(cp.lap));
+ else
+ memcpy(&cp.lap, giac, sizeof(cp.lap));
+
+ cp.length = length;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+}
+
+static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
+{
+ u8 own_addr_type;
+ /* Accept list is not used for discovery */
+ u8 filter_policy = 0x00;
+ /* Default is to enable duplicates filter */
+ u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ /* If controller is scanning, it means the passive scanning is
+ * running. Thus, we should temporarily stop it in order to set the
+ * discovery scanning parameters.
+ */
+ err = hci_scan_disable_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "Unable to disable scanning: %d", err);
+ return err;
+ }
+
+ cancel_interleave_scan(hdev);
+
+ /* Pause advertising since active scanning disables address resolution
+ * which advertising depend on in order to generate its RPAs.
+ */
+ if (use_ll_privacy(hdev)) {
+ err = hci_pause_advertising_sync(hdev);
+ if (err) {
+ bt_dev_err(hdev, "pause advertising failed: %d", err);
+ goto failed;
+ }
+ }
+
+ /* Disable address resolution while doing active scanning since the
+ * accept list shall not be used and all reports shall reach the host
+ * anyway.
+ */
+ err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
+ if (err) {
+ bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
+ err);
+ goto failed;
+ }
+
+ /* All active scans will be done with either a resolvable private
+ * address (when privacy feature has been enabled) or non-resolvable
+ * private address.
+ */
+ err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
+ &own_addr_type);
+ if (err < 0)
+ own_addr_type = ADDR_LE_DEV_PUBLIC;
+
+ if (hci_is_adv_monitoring(hdev)) {
+ /* Duplicate filter should be disabled when some advertisement
+ * monitor is activated, otherwise AdvMon can only receive one
+ * advertisement for one peer(*) during active scanning, and
+ * might report loss to these peers.
+ *
+ * Note that different controllers have different meanings of
+ * |duplicate|. Some of them consider packets with the same
+ * address as duplicate, and others consider packets with the
+ * same address and the same RSSI as duplicate. Although in the
+ * latter case we don't need to disable duplicate filter, but
+ * it is common to have active scanning for a short period of
+ * time, the power impact should be neglectable.
+ */
+ filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
+ }
+
+ err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
+ hdev->le_scan_window_discovery,
+ own_addr_type, filter_policy, filter_dup);
+ if (!err)
+ return err;
+
+failed:
+ /* Resume advertising if it was paused */
+ if (use_ll_privacy(hdev))
+ hci_resume_advertising_sync(hdev);
+
+ /* Resume passive scanning */
+ hci_update_passive_scan_sync(hdev);
+ return err;
+}
+
+static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ bt_dev_dbg(hdev, "");
+
+ err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
+ if (err)
+ return err;
+
+ return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
+}
+
+int hci_start_discovery_sync(struct hci_dev *hdev)
+{
+ unsigned long timeout;
+ int err;
+
+ bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
+
+ switch (hdev->discovery.type) {
+ case DISCOV_TYPE_BREDR:
+ return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN);
+ case DISCOV_TYPE_INTERLEAVED:
+ /* When running simultaneous discovery, the LE scanning time
+ * should occupy the whole discovery time sine BR/EDR inquiry
+ * and LE scanning are scheduled by the controller.
+ *
+ * For interleaving discovery in comparison, BR/EDR inquiry
+ * and LE scanning are done sequentially with separate
+ * timeouts.
+ */
+ if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
+ &hdev->quirks)) {
+ timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ /* During simultaneous discovery, we double LE scan
+ * interval. We must leave some time for the controller
+ * to do BR/EDR inquiry.
+ */
+ err = hci_start_interleaved_discovery_sync(hdev);
+ break;
+ }
+
+ timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
+ err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
+ break;
+ case DISCOV_TYPE_LE:
+ timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
+ err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ return err;
+
+ bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
+
+ /* When service discovery is used and the controller has a
+ * strict duplicate filter, it is important to remember the
+ * start and duration of the scan. This is required for
+ * restarting scanning during the discovery phase.
+ */
+ if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
+ hdev->discovery.result_filtering) {
+ hdev->discovery.scan_start = jiffies;
+ hdev->discovery.scan_duration = timeout;
+ }
+
+ queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
+ timeout);
+ return 0;
+}
+
+static void hci_suspend_monitor_sync(struct hci_dev *hdev)
+{
+ switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ case HCI_ADV_MONITOR_EXT_MSFT:
+ msft_suspend_sync(hdev);
+ break;
+ default:
+ return;
+ }
+}
+
+/* This function disables discovery and mark it as paused */
+static int hci_pause_discovery_sync(struct hci_dev *hdev)
+{
+ int old_state = hdev->discovery.state;
+ int err;
+
+ /* If discovery already stopped/stopping/paused there nothing to do */
+ if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
+ hdev->discovery_paused)
+ return 0;
+
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+ err = hci_stop_discovery_sync(hdev);
+ if (err)
+ return err;
+
+ hdev->discovery_paused = true;
+ hdev->discovery_old_state = old_state;
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+
+ return 0;
+}
+
+static int hci_update_event_filter_sync(struct hci_dev *hdev)
+{
+ struct bdaddr_list_with_flags *b;
+ u8 scan = SCAN_DISABLED;
+ bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
+ int err;
+
+ if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
+ return 0;
+
+ /* Always clear event filter when starting */
+ hci_clear_event_filter_sync(hdev);
+
+ list_for_each_entry(b, &hdev->accept_list, list) {
+ if (!test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, b->flags))
+ continue;
+
+ bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
+
+ err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
+ HCI_CONN_SETUP_ALLOW_BDADDR,
+ &b->bdaddr,
+ HCI_CONN_SETUP_AUTO_ON);
+ if (err)
+ bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
+ &b->bdaddr);
+ else
+ scan = SCAN_PAGE;
+ }
+
+ if (scan && !scanning)
+ hci_write_scan_enable_sync(hdev, scan);
+ else if (!scan && scanning)
+ hci_write_scan_enable_sync(hdev, scan);
+
+ return 0;
+}
+
+/* This function performs the HCI suspend procedures in the follow order:
+ *
+ * Pause discovery (active scanning/inquiry)
+ * Pause Directed Advertising/Advertising
+ * Disconnect all connections
+ * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
+ * otherwise:
+ * Update event mask (only set events that are allowed to wake up the host)
+ * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
+ * Update passive scanning (lower duty cycle)
+ * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
+ */
+int hci_suspend_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* If marked as suspended there nothing to do */
+ if (hdev->suspended)
+ return 0;
+
+ /* Mark device as suspended */
+ hdev->suspended = true;
+
+ /* Pause discovery if not already stopped */
+ hci_pause_discovery_sync(hdev);
+
+ /* Pause other advertisements */
+ hci_pause_advertising_sync(hdev);
+
+ /* Disable page scan if enabled */
+ if (test_bit(HCI_PSCAN, &hdev->flags))
+ hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
+
+ /* Suspend monitor filters */
+ hci_suspend_monitor_sync(hdev);
+
+ /* Prevent disconnects from causing scanning to be re-enabled */
+ hdev->scanning_paused = true;
+
+ /* Soft disconnect everything (power off) */
+ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
+ if (err) {
+ /* Set state to BT_RUNNING so resume doesn't notify */
+ hdev->suspend_state = BT_RUNNING;
+ hci_resume_sync(hdev);
+ return err;
+ }
+
+ /* Only configure accept list if disconnect succeeded and wake
+ * isn't being prevented.
+ */
+ if (!hdev->wakeup || !hdev->wakeup(hdev)) {
+ hdev->suspend_state = BT_SUSPEND_DISCONNECT;
+ return 0;
+ }
+
+ /* Unpause to take care of updating scanning params */
+ hdev->scanning_paused = false;
+
+ /* Update event mask so only the allowed event can wakeup the host */
+ hci_set_event_mask_sync(hdev);
+
+ /* Enable event filter for paired devices */
+ hci_update_event_filter_sync(hdev);
+
+ /* Update LE passive scan if enabled */
+ hci_update_passive_scan_sync(hdev);
+
+ /* Pause scan changes again. */
+ hdev->scanning_paused = true;
+
+ hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
+
+ return 0;
+}
+
+/* This function resumes discovery */
+static int hci_resume_discovery_sync(struct hci_dev *hdev)
+{
+ int err;
+
+ /* If discovery not paused there nothing to do */
+ if (!hdev->discovery_paused)
+ return 0;
+
+ hdev->discovery_paused = false;
+
+ hci_discovery_set_state(hdev, DISCOVERY_STARTING);
+
+ err = hci_start_discovery_sync(hdev);
+
+ hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
+ DISCOVERY_FINDING);
+
+ return err;
+}
+
+static void hci_resume_monitor_sync(struct hci_dev *hdev)
+{
+ switch (hci_get_adv_monitor_offload_ext(hdev)) {
+ case HCI_ADV_MONITOR_EXT_MSFT:
+ msft_resume_sync(hdev);
+ break;
+ default:
+ return;
+ }
+}
+
+/* This function performs the HCI suspend procedures in the follow order:
+ *
+ * Restore event mask
+ * Clear event filter
+ * Update passive scanning (normal duty cycle)
+ * Resume Directed Advertising/Advertising
+ * Resume discovery (active scanning/inquiry)
+ */
+int hci_resume_sync(struct hci_dev *hdev)
+{
+ /* If not marked as suspended there nothing to do */
+ if (!hdev->suspended)
+ return 0;
+
+ hdev->suspended = false;
+ hdev->scanning_paused = false;
+
+ /* Restore event mask */
+ hci_set_event_mask_sync(hdev);
+
+ /* Clear any event filters and restore scan state */
+ hci_clear_event_filter_sync(hdev);
+ hci_update_scan_sync(hdev);
+
+ /* Reset passive scanning to normal */
+ hci_update_passive_scan_sync(hdev);
+
+ /* Resume monitor filters */
+ hci_resume_monitor_sync(hdev);
+
+ /* Resume other advertisements */
+ hci_resume_advertising_sync(hdev);
+
+ /* Resume discovery */
+ hci_resume_discovery_sync(hdev);
+
+ return 0;
+}
+
+static bool conn_use_rpa(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ return hci_dev_test_flag(hdev, HCI_PRIVACY);
+}
+
+static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
+ struct hci_conn *conn)
+{
+ struct hci_cp_le_set_ext_adv_params cp;
+ int err;
+ bdaddr_t random_addr;
+ u8 own_addr_type;
+
+ err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
+ &own_addr_type);
+ if (err)
+ return err;
+
+ /* Set require_privacy to false so that the remote device has a
+ * chance of identifying us.
+ */
+ err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
+ &own_addr_type, &random_addr);
+ if (err)
+ return err;
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
+ cp.own_addr_type = own_addr_type;
+ cp.channel_map = hdev->le_adv_channel_map;
+ cp.tx_power = HCI_TX_POWER_INVALID;
+ cp.primary_phy = HCI_ADV_PHY_1M;
+ cp.secondary_phy = HCI_ADV_PHY_1M;
+ cp.handle = 0x00; /* Use instance 0 for directed adv */
+ cp.own_addr_type = own_addr_type;
+ cp.peer_addr_type = conn->dst_type;
+ bacpy(&cp.peer_addr, &conn->dst);
+
+ /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
+ * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
+ * does not supports advertising data when the advertising set already
+ * contains some, the controller shall return erroc code 'Invalid
+ * HCI Command Parameters(0x12).
+ * So it is required to remove adv set for handle 0x00. since we use
+ * instance 0 for directed adv.
+ */
+ err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
+ if (err)
+ return err;
+
+ err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (err)
+ return err;
+
+ /* Check if random address need to be updated */
+ if (own_addr_type == ADDR_LE_DEV_RANDOM &&
+ bacmp(&random_addr, BDADDR_ANY) &&
+ bacmp(&random_addr, &hdev->random_addr)) {
+ err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
+ &random_addr);
+ if (err)
+ return err;
+ }
+
+ return hci_enable_ext_advertising_sync(hdev, 0x00);
+}
+
+static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
+ struct hci_conn *conn)
+{
+ struct hci_cp_le_set_adv_param cp;
+ u8 status;
+ u8 own_addr_type;
+ u8 enable;
+
+ if (ext_adv_capable(hdev))
+ return hci_le_ext_directed_advertising_sync(hdev, conn);
+
+ /* Clear the HCI_LE_ADV bit temporarily so that the
+ * hci_update_random_address knows that it's safe to go ahead
+ * and write a new random address. The flag will be set back on
+ * as soon as the SET_ADV_ENABLE HCI command completes.
+ */
+ hci_dev_clear_flag(hdev, HCI_LE_ADV);
+
+ /* Set require_privacy to false so that the remote device has a
+ * chance of identifying us.
+ */
+ status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
+ &own_addr_type);
+ if (status)
+ return status;
+
+ memset(&cp, 0, sizeof(cp));
+
+ /* Some controllers might reject command if intervals are not
+ * within range for undirected advertising.
+ * BCM20702A0 is known to be affected by this.
+ */
+ cp.min_interval = cpu_to_le16(0x0020);
+ cp.max_interval = cpu_to_le16(0x0020);
+
+ cp.type = LE_ADV_DIRECT_IND;
+ cp.own_address_type = own_addr_type;
+ cp.direct_addr_type = conn->dst_type;
+ bacpy(&cp.direct_addr, &conn->dst);
+ cp.channel_map = hdev->le_adv_channel_map;
+
+ status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+ if (status)
+ return status;
+
+ enable = 0x01;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
+ sizeof(enable), &enable, HCI_CMD_TIMEOUT);
+}
+
+static void set_ext_conn_params(struct hci_conn *conn,
+ struct hci_cp_le_ext_conn_param *p)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ memset(p, 0, sizeof(*p));
+
+ p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
+ p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
+ p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
+ p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
+ p->conn_latency = cpu_to_le16(conn->le_conn_latency);
+ p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
+ p->min_ce_len = cpu_to_le16(0x0000);
+ p->max_ce_len = cpu_to_le16(0x0000);
+}
+
+int hci_le_ext_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ u8 own_addr_type)
+{
+ struct hci_cp_le_ext_create_conn *cp;
+ struct hci_cp_le_ext_conn_param *p;
+ u8 data[sizeof(*cp) + sizeof(*p) * 3];
+ u32 plen;
+
+ cp = (void *)data;
+ p = (void *)cp->data;
+
+ memset(cp, 0, sizeof(*cp));
+
+ bacpy(&cp->peer_addr, &conn->dst);
+ cp->peer_addr_type = conn->dst_type;
+ cp->own_addr_type = own_addr_type;
+
+ plen = sizeof(*cp);
+
+ if (scan_1m(hdev)) {
+ cp->phys |= LE_SCAN_PHY_1M;
+ set_ext_conn_params(conn, p);
+
+ p++;
+ plen += sizeof(*p);
+ }
+
+ if (scan_2m(hdev)) {
+ cp->phys |= LE_SCAN_PHY_2M;
+ set_ext_conn_params(conn, p);
+
+ p++;
+ plen += sizeof(*p);
+ }
+
+ if (scan_coded(hdev)) {
+ cp->phys |= LE_SCAN_PHY_CODED;
+ set_ext_conn_params(conn, p);
+
+ plen += sizeof(*p);
+ }
+
+ return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
+ plen, data,
+ HCI_EV_LE_ENHANCED_CONN_COMPLETE,
+ HCI_CMD_TIMEOUT, NULL);
+}
+
+int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn)
+{
+ struct hci_cp_le_create_conn cp;
+ struct hci_conn_params *params;
+ u8 own_addr_type;
+ int err;
+
+ /* If requested to connect as peripheral use directed advertising */
+ if (conn->role == HCI_ROLE_SLAVE) {
+ /* If we're active scanning and simultaneous roles is not
+ * enabled simply reject the attempt.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
+ hdev->le_scan_type == LE_SCAN_ACTIVE &&
+ !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
+ hci_conn_del(conn);
+ return -EBUSY;
+ }
+
+ /* Pause advertising while doing directed advertising. */
+ hci_pause_advertising_sync(hdev);
+
+ err = hci_le_directed_advertising_sync(hdev, conn);
+ goto done;
+ }
+
+ /* Disable advertising if simultaneous roles is not in use. */
+ if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
+ hci_pause_advertising_sync(hdev);
+
+ params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
+ if (params) {
+ conn->le_conn_min_interval = params->conn_min_interval;
+ conn->le_conn_max_interval = params->conn_max_interval;
+ conn->le_conn_latency = params->conn_latency;
+ conn->le_supv_timeout = params->supervision_timeout;
+ } else {
+ conn->le_conn_min_interval = hdev->le_conn_min_interval;
+ conn->le_conn_max_interval = hdev->le_conn_max_interval;
+ conn->le_conn_latency = hdev->le_conn_latency;
+ conn->le_supv_timeout = hdev->le_supv_timeout;
+ }
+
+ /* If controller is scanning, we stop it since some controllers are
+ * not able to scan and connect at the same time. Also set the
+ * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
+ * handler for scan disabling knows to set the correct discovery
+ * state.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
+ hci_scan_disable_sync(hdev);
+ hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
+ }
+
+ /* Update random address, but set require_privacy to false so
+ * that we never connect with an non-resolvable address.
+ */
+ err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
+ &own_addr_type);
+ if (err)
+ goto done;
+
+ if (use_ext_conn(hdev)) {
+ err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
+ goto done;
+ }
+
+ memset(&cp, 0, sizeof(cp));
+
+ cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
+ cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
+
+ bacpy(&cp.peer_addr, &conn->dst);
+ cp.peer_addr_type = conn->dst_type;
+ cp.own_address_type = own_addr_type;
+ cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
+ cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
+ cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
+ cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
+ cp.min_ce_len = cpu_to_le16(0x0000);
+ cp.max_ce_len = cpu_to_le16(0x0000);
+
+ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
+ sizeof(cp), &cp, HCI_EV_LE_CONN_COMPLETE,
+ HCI_CMD_TIMEOUT, NULL);
+
+done:
+ /* Re-enable advertising after the connection attempt is finished. */
+ hci_resume_advertising_sync(hdev);
+ return err;
+}
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 7827639ecf5c..4e3e0451b08c 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -86,6 +86,8 @@ static void bt_host_release(struct device *dev)
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
hci_release_dev(hdev);
+ else
+ kfree(hdev);
module_put(THIS_MODULE);
}
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 595fb3c9d6c3..369ed92dac99 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,6 +20,7 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/compat.h>
#include <linux/export.h>
#include <linux/file.h>
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 4f8f37599962..e817ff0607a0 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7905,7 +7905,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
hcon = hci_connect_le(hdev, dst, dst_type, false,
chan->sec_level,
HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_SLAVE, NULL);
+ HCI_ROLE_SLAVE);
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level,
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 160c016a5dfb..ca8f07f3542b 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/export.h>
+#include <linux/filter.h>
#include <linux/sched/signal.h>
#include <net/bluetooth/bluetooth.h>
@@ -161,7 +162,11 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
break;
}
- if (chan->psm && bdaddr_type_is_le(chan->src_type))
+ /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and
+ * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set.
+ */
+ if (chan->psm && bdaddr_type_is_le(chan->src_type) &&
+ chan->mode != L2CAP_MODE_EXT_FLOWCTL)
chan->mode = L2CAP_MODE_LE_FLOWCTL;
chan->state = BT_BOUND;
@@ -172,6 +177,21 @@ done:
return err;
}
+static void l2cap_sock_init_pid(struct sock *sk)
+{
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+
+ /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to
+ * group the channels being requested.
+ */
+ if (chan->mode != L2CAP_MODE_EXT_FLOWCTL)
+ return;
+
+ spin_lock(&sk->sk_peer_lock);
+ sk->sk_peer_pid = get_pid(task_tgid(current));
+ spin_unlock(&sk->sk_peer_lock);
+}
+
static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
@@ -240,9 +260,15 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
return -EINVAL;
}
- if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode)
+ /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and
+ * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set.
+ */
+ if (chan->psm && bdaddr_type_is_le(chan->src_type) &&
+ chan->mode != L2CAP_MODE_EXT_FLOWCTL)
chan->mode = L2CAP_MODE_LE_FLOWCTL;
+ l2cap_sock_init_pid(sk);
+
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
&la.l2_bdaddr, la.l2_bdaddr_type);
if (err)
@@ -298,6 +324,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
goto done;
}
+ l2cap_sock_init_pid(sk);
+
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
@@ -876,6 +904,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
struct l2cap_conn *conn;
int len, err = 0;
u32 opt;
+ u16 mtu;
+ u8 mode;
BT_DBG("sk %p", sk);
@@ -1058,16 +1088,16 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u16))) {
+ if (copy_from_sockptr(&mtu, optval, sizeof(u16))) {
err = -EFAULT;
break;
}
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
sk->sk_state == BT_CONNECTED)
- err = l2cap_chan_reconfigure(chan, opt);
+ err = l2cap_chan_reconfigure(chan, mtu);
else
- chan->imtu = opt;
+ chan->imtu = mtu;
break;
@@ -1089,14 +1119,14 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
break;
}
- if (copy_from_sockptr(&opt, optval, sizeof(u8))) {
+ if (copy_from_sockptr(&mode, optval, sizeof(u8))) {
err = -EFAULT;
break;
}
- BT_DBG("opt %u", opt);
+ BT_DBG("mode %u", mode);
- err = l2cap_set_mode(chan, opt);
+ err = l2cap_set_mode(chan, mode);
if (err)
break;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3e5283607b97..37087cf7dc5a 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -39,6 +39,7 @@
#include "mgmt_config.h"
#include "msft.h"
#include "eir.h"
+#include "aosp.h"
#define MGMT_VERSION 1
#define MGMT_REVISION 21
@@ -276,10 +277,39 @@ static const u8 mgmt_status_table[] = {
MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
};
-static u8 mgmt_status(u8 hci_status)
+static u8 mgmt_errno_status(int err)
{
- if (hci_status < ARRAY_SIZE(mgmt_status_table))
- return mgmt_status_table[hci_status];
+ switch (err) {
+ case 0:
+ return MGMT_STATUS_SUCCESS;
+ case -EPERM:
+ return MGMT_STATUS_REJECTED;
+ case -EINVAL:
+ return MGMT_STATUS_INVALID_PARAMS;
+ case -EOPNOTSUPP:
+ return MGMT_STATUS_NOT_SUPPORTED;
+ case -EBUSY:
+ return MGMT_STATUS_BUSY;
+ case -ETIMEDOUT:
+ return MGMT_STATUS_AUTH_FAILED;
+ case -ENOMEM:
+ return MGMT_STATUS_NO_RESOURCES;
+ case -EISCONN:
+ return MGMT_STATUS_ALREADY_CONNECTED;
+ case -ENOTCONN:
+ return MGMT_STATUS_DISCONNECTED;
+ }
+
+ return MGMT_STATUS_FAILED;
+}
+
+static u8 mgmt_status(int err)
+{
+ if (err < 0)
+ return mgmt_errno_status(err);
+
+ if (err < ARRAY_SIZE(mgmt_status_table))
+ return mgmt_status_table[err];
return MGMT_STATUS_FAILED;
}
@@ -305,6 +335,12 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
HCI_SOCK_TRUSTED, skip_sk);
}
+static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
+{
+ return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
+ skip_sk);
+}
+
static u8 le_addr_type(u8 mgmt_addr_type)
{
if (mgmt_addr_type == BDADDR_LE_PUBLIC)
@@ -810,12 +846,7 @@ static u32 get_supported_settings(struct hci_dev *hdev)
settings |= MGMT_SETTING_SECURE_CONN;
settings |= MGMT_SETTING_PRIVACY;
settings |= MGMT_SETTING_STATIC_ADDRESS;
-
- /* When the experimental feature for LL Privacy support is
- * enabled, then advertising is no longer supported.
- */
- if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- settings |= MGMT_SETTING_ADVERTISING;
+ settings |= MGMT_SETTING_ADVERTISING;
}
if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
@@ -903,13 +934,6 @@ static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
}
-static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
- struct hci_dev *hdev,
- const void *data)
-{
- return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
-}
-
u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
{
struct mgmt_pending_cmd *cmd;
@@ -951,32 +975,41 @@ bool mgmt_get_connectable(struct hci_dev *hdev)
return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
}
+static int service_cache_sync(struct hci_dev *hdev, void *data)
+{
+ hci_update_eir_sync(hdev);
+ hci_update_class_sync(hdev);
+
+ return 0;
+}
+
static void service_cache_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
service_cache.work);
- struct hci_request req;
if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
return;
- hci_req_init(&req, hdev);
-
- hci_dev_lock(hdev);
-
- __hci_req_update_eir(&req);
- __hci_req_update_class(&req);
-
- hci_dev_unlock(hdev);
+ hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
+}
- hci_req_run(&req, NULL);
+static int rpa_expired_sync(struct hci_dev *hdev, void *data)
+{
+ /* The generation of a new RPA and programming it into the
+ * controller happens in the hci_req_enable_advertising()
+ * function.
+ */
+ if (ext_adv_capable(hdev))
+ return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
+ else
+ return hci_enable_advertising_sync(hdev);
}
static void rpa_expired(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
rpa_expired.work);
- struct hci_request req;
bt_dev_dbg(hdev, "");
@@ -985,16 +1018,7 @@ static void rpa_expired(struct work_struct *work)
if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
return;
- /* The generation of a new RPA and programming it into the
- * controller happens in the hci_req_enable_advertising()
- * function.
- */
- hci_req_init(&req, hdev);
- if (ext_adv_capable(hdev))
- __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
- else
- __hci_req_enable_advertising(&req);
- hci_req_run(&req, NULL);
+ hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
}
static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
@@ -1131,16 +1155,6 @@ static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
sizeof(settings));
}
-static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
-{
- bt_dev_dbg(hdev, "status 0x%02x", status);
-
- if (hci_conn_count(hdev) == 0) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- }
-}
-
void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
{
struct mgmt_ev_advertising_added ev;
@@ -1168,38 +1182,77 @@ static void cancel_adv_timeout(struct hci_dev *hdev)
}
}
-static int clean_up_hci_state(struct hci_dev *hdev)
+/* This function requires the caller holds hdev->lock */
+static void restart_le_actions(struct hci_dev *hdev)
{
- struct hci_request req;
- struct hci_conn *conn;
- bool discov_stopped;
- int err;
+ struct hci_conn_params *p;
- hci_req_init(&req, hdev);
+ list_for_each_entry(p, &hdev->le_conn_params, list) {
+ /* Needed for AUTO_OFF case where might not "really"
+ * have been powered off.
+ */
+ list_del_init(&p->action);
- if (test_bit(HCI_ISCAN, &hdev->flags) ||
- test_bit(HCI_PSCAN, &hdev->flags)) {
- u8 scan = 0x00;
- hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
+ switch (p->auto_connect) {
+ case HCI_AUTO_CONN_DIRECT:
+ case HCI_AUTO_CONN_ALWAYS:
+ list_add(&p->action, &hdev->pend_le_conns);
+ break;
+ case HCI_AUTO_CONN_REPORT:
+ list_add(&p->action, &hdev->pend_le_reports);
+ break;
+ default:
+ break;
+ }
}
+}
- hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
+static int new_settings(struct hci_dev *hdev, struct sock *skip)
+{
+ __le32 ev = cpu_to_le32(get_current_settings(hdev));
- if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- __hci_req_disable_advertising(&req);
+ return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
+ sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
+}
- discov_stopped = hci_req_stop_discovery(&req);
+static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ if (!err) {
+ if (cp->val) {
+ hci_dev_lock(hdev);
+ restart_le_actions(hdev);
+ hci_update_passive_scan(hdev);
+ hci_dev_unlock(hdev);
+ }
- list_for_each_entry(conn, &hdev->conn_hash.list, list) {
- /* 0x15 == Terminated due to Power Off */
- __hci_abort_conn(&req, conn, 0x15);
+ send_settings_rsp(cmd->sk, cmd->opcode, hdev);
+
+ /* Only call new_setting for power on as power off is deferred
+ * to hdev->power_off work which does call hci_dev_do_close.
+ */
+ if (cp->val)
+ new_settings(hdev, cmd->sk);
+ } else {
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
+ mgmt_status(err));
}
- err = hci_req_run(&req, clean_up_hci_complete);
- if (!err && discov_stopped)
- hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+ mgmt_pending_free(cmd);
+}
- return err;
+static int set_powered_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+
+ BT_DBG("%s", hdev->name);
+
+ return hci_set_powered_sync(hdev, cp->val);
}
static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -1228,43 +1281,20 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- if (cp->val) {
- queue_work(hdev->req_workqueue, &hdev->power_on);
- err = 0;
- } else {
- /* Disconnect connections, stop scans, etc */
- err = clean_up_hci_state(hdev);
- if (!err)
- queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
- HCI_POWER_OFF_TIMEOUT);
-
- /* ENODATA means there were no HCI commands queued */
- if (err == -ENODATA) {
- cancel_delayed_work(&hdev->power_off);
- queue_work(hdev->req_workqueue, &hdev->power_off.work);
- err = 0;
- }
- }
+ err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
+ mgmt_set_powered_complete);
failed:
hci_dev_unlock(hdev);
return err;
}
-static int new_settings(struct hci_dev *hdev, struct sock *skip)
-{
- __le32 ev = cpu_to_le32(get_current_settings(hdev));
-
- return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
- sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
-}
-
int mgmt_new_settings(struct hci_dev *hdev)
{
return new_settings(hdev, NULL);
@@ -1346,23 +1376,20 @@ static u8 mgmt_le_support(struct hci_dev *hdev)
return MGMT_STATUS_SUCCESS;
}
-void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
+static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ bt_dev_dbg(hdev, "err %d", err);
hci_dev_lock(hdev);
- cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
- if (!cmd)
- goto unlock;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
- goto remove_cmd;
+ goto done;
}
if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
@@ -1374,13 +1401,18 @@ void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
new_settings(hdev, cmd->sk);
-remove_cmd:
- mgmt_pending_remove(cmd);
-
-unlock:
+done:
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
+static int set_discoverable_sync(struct hci_dev *hdev, void *data)
+{
+ BT_DBG("%s", hdev->name);
+
+ return hci_update_discoverable_sync(hdev);
+}
+
static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -1479,7 +1511,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1503,39 +1535,34 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
else
hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
- queue_work(hdev->req_workqueue, &hdev->discoverable_update);
- err = 0;
+ err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
+ mgmt_set_discoverable_complete);
failed:
hci_dev_unlock(hdev);
return err;
}
-void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
+static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ bt_dev_dbg(hdev, "err %d", err);
hci_dev_lock(hdev);
- cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
- if (!cmd)
- goto unlock;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
- goto remove_cmd;
+ goto done;
}
send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
new_settings(hdev, cmd->sk);
-remove_cmd:
- mgmt_pending_remove(cmd);
-
-unlock:
+done:
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
@@ -1561,13 +1588,20 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
if (changed) {
hci_req_update_scan(hdev);
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
return new_settings(hdev, sk);
}
return 0;
}
+static int set_connectable_sync(struct hci_dev *hdev, void *data)
+{
+ BT_DBG("%s", hdev->name);
+
+ return hci_update_connectable_sync(hdev);
+}
+
static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -1600,7 +1634,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
@@ -1617,8 +1651,8 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
}
- queue_work(hdev->req_workqueue, &hdev->connectable_update);
- err = 0;
+ err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
+ mgmt_set_connectable_complete);
failed:
hci_dev_unlock(hdev);
@@ -1653,12 +1687,7 @@ static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
/* In limited privacy mode the change of bondable mode
* may affect the local advertising address.
*/
- if (hdev_is_powered(hdev) &&
- hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
- hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
- hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
- queue_work(hdev->req_workqueue,
- &hdev->discoverable_update);
+ hci_update_discoverable(hdev);
err = new_settings(hdev, sk);
}
@@ -1737,6 +1766,69 @@ failed:
return err;
}
+static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct cmd_lookup match = { NULL, hdev };
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ u8 enable = cp->val;
+ bool changed;
+
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
+
+ if (enable && hci_dev_test_and_clear_flag(hdev,
+ HCI_SSP_ENABLED)) {
+ hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
+ new_settings(hdev, NULL);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
+ &mgmt_err);
+ return;
+ }
+
+ if (enable) {
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
+ } else {
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
+
+ if (!changed)
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_HS_ENABLED);
+ else
+ hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
+ }
+
+ mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
+
+ if (changed)
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
+
+ hci_update_eir_sync(hdev);
+}
+
+static int set_ssp_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ bool changed = false;
+ int err;
+
+ if (cp->val)
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
+
+ err = hci_write_ssp_mode_sync(hdev, cp->val);
+
+ if (!err && changed)
+ hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
+
+ return err;
+}
+
static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
@@ -1798,19 +1890,18 @@ static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
-
- if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
- hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
- sizeof(cp->val), &cp->val);
+ else
+ err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
+ set_ssp_complete);
- err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
if (err < 0) {
- mgmt_pending_remove(cmd);
- goto failed;
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_remove(cmd);
}
failed:
@@ -1879,18 +1970,17 @@ unlock:
return err;
}
-static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static void set_le_complete(struct hci_dev *hdev, void *data, int err)
{
struct cmd_lookup match = { NULL, hdev };
+ u8 status = mgmt_status(err);
- hci_dev_lock(hdev);
+ bt_dev_dbg(hdev, "err %d", err);
if (status) {
- u8 mgmt_err = mgmt_status(status);
-
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
- &mgmt_err);
- goto unlock;
+ &status);
+ return;
}
mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
@@ -1899,39 +1989,54 @@ static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
if (match.sk)
sock_put(match.sk);
+}
+
+static int set_le_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ u8 val = !!cp->val;
+ int err;
+
+ if (!val) {
+ if (hci_dev_test_flag(hdev, HCI_LE_ADV))
+ hci_disable_advertising_sync(hdev);
+
+ if (ext_adv_capable(hdev))
+ hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
+ } else {
+ hci_dev_set_flag(hdev, HCI_LE_ENABLED);
+ }
+
+ err = hci_write_le_host_supported_sync(hdev, val, 0);
/* Make sure the controller has a good default for
* advertising data. Restrict the update to when LE
* has actually been enabled. During power on, the
* update in powered_update_hci will take care of it.
*/
- if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
- struct hci_request req;
- hci_req_init(&req, hdev);
+ if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
if (ext_adv_capable(hdev)) {
- int err;
+ int status;
- err = __hci_req_setup_ext_adv_instance(&req, 0x00);
- if (!err)
- __hci_req_update_scan_rsp_data(&req, 0x00);
+ status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
+ if (!status)
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
} else {
- __hci_req_update_adv_data(&req, 0x00);
- __hci_req_update_scan_rsp_data(&req, 0x00);
+ hci_update_adv_data_sync(hdev, 0x00);
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
}
- hci_req_run(&req, NULL);
- hci_update_background_scan(hdev);
+
+ hci_update_passive_scan(hdev);
}
-unlock:
- hci_dev_unlock(hdev);
+ return err;
}
static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
- struct hci_cp_write_le_host_supported hci_cp;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
u8 val, enabled;
@@ -2001,33 +2106,20 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- memset(&hci_cp, 0, sizeof(hci_cp));
+ else
+ err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
+ set_le_complete);
- if (val) {
- hci_cp.le = val;
- hci_cp.simul = 0x00;
- } else {
- if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- __hci_req_disable_advertising(&req);
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
+ MGMT_STATUS_FAILED);
- if (ext_adv_capable(hdev))
- __hci_req_clear_ext_adv_sets(&req);
+ if (cmd)
+ mgmt_pending_remove(cmd);
}
- hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
- &hci_cp);
-
- err = hci_req_run(&req, le_enable_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
unlock:
hci_dev_unlock(hdev);
return err;
@@ -2075,37 +2167,33 @@ static u8 get_uuid_size(const u8 *uuid)
return 16;
}
-static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
+static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
- hci_dev_lock(hdev);
-
- cmd = pending_find(mgmt_op, hdev);
- if (!cmd)
- goto unlock;
+ bt_dev_dbg(hdev, "err %d", err);
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status), hdev->dev_class, 3);
+ mgmt_status(err), hdev->dev_class, 3);
- mgmt_pending_remove(cmd);
-
-unlock:
- hci_dev_unlock(hdev);
+ mgmt_pending_free(cmd);
}
-static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int add_uuid_sync(struct hci_dev *hdev, void *data)
{
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ int err;
- mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
+ err = hci_update_class_sync(hdev);
+ if (err)
+ return err;
+
+ return hci_update_eir_sync(hdev);
}
static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_cp_add_uuid *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
struct bt_uuid *uuid;
int err;
@@ -2131,28 +2219,17 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
list_add_tail(&uuid->list, &hdev->uuids);
- hci_req_init(&req, hdev);
-
- __hci_req_update_class(&req);
- __hci_req_update_eir(&req);
-
- err = hci_req_run(&req, add_uuid_complete);
- if (err < 0) {
- if (err != -ENODATA)
- goto failed;
-
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
- hdev->dev_class, 3);
- goto failed;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- err = 0;
+ err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto failed;
+ }
failed:
hci_dev_unlock(hdev);
@@ -2173,11 +2250,15 @@ static bool enable_service_cache(struct hci_dev *hdev)
return false;
}
-static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int remove_uuid_sync(struct hci_dev *hdev, void *data)
{
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ int err;
+
+ err = hci_update_class_sync(hdev);
+ if (err)
+ return err;
- mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
+ return hci_update_eir_sync(hdev);
}
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -2187,7 +2268,6 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
struct mgmt_pending_cmd *cmd;
struct bt_uuid *match, *tmp;
u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- struct hci_request req;
int err, found;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -2231,39 +2311,35 @@ static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
}
update_class:
- hci_req_init(&req, hdev);
-
- __hci_req_update_class(&req);
- __hci_req_update_eir(&req);
-
- err = hci_req_run(&req, remove_uuid_complete);
- if (err < 0) {
- if (err != -ENODATA)
- goto unlock;
-
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
- hdev->dev_class, 3);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- err = 0;
+ err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
+ mgmt_class_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
unlock:
hci_dev_unlock(hdev);
return err;
}
-static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int set_class_sync(struct hci_dev *hdev, void *data)
{
- bt_dev_dbg(hdev, "status 0x%02x", status);
+ int err = 0;
- mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
+ if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
+ cancel_delayed_work_sync(&hdev->service_cache);
+ err = hci_update_eir_sync(hdev);
+ }
+
+ if (err)
+ return err;
+
+ return hci_update_class_sync(hdev);
}
static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -2271,7 +2347,6 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_set_dev_class *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -2303,34 +2378,16 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- hci_req_init(&req, hdev);
-
- if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
- hci_dev_unlock(hdev);
- cancel_delayed_work_sync(&hdev->service_cache);
- hci_dev_lock(hdev);
- __hci_req_update_eir(&req);
- }
-
- __hci_req_update_class(&req);
-
- err = hci_req_run(&req, set_class_complete);
- if (err < 0) {
- if (err != -ENODATA)
- goto unlock;
-
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
- hdev->dev_class, 3);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- err = 0;
+ err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
+ mgmt_class_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -3228,65 +3285,70 @@ static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
}
-static void adv_expire(struct hci_dev *hdev, u32 flags)
+static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
{
struct adv_info *adv_instance;
- struct hci_request req;
- int err;
adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
if (!adv_instance)
- return;
+ return 0;
/* stop if current instance doesn't need to be changed */
if (!(adv_instance->flags & flags))
- return;
+ return 0;
cancel_adv_timeout(hdev);
adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
if (!adv_instance)
- return;
+ return 0;
- hci_req_init(&req, hdev);
- err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
- true);
- if (err)
- return;
+ hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
- hci_req_run(&req, NULL);
+ return 0;
}
-static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int name_changed_sync(struct hci_dev *hdev, void *data)
{
- struct mgmt_cp_set_local_name *cp;
- struct mgmt_pending_cmd *cmd;
-
- bt_dev_dbg(hdev, "status 0x%02x", status);
-
- hci_dev_lock(hdev);
+ return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
+}
- cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
- if (!cmd)
- goto unlock;
+static void set_name_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_set_local_name *cp = cmd->param;
+ u8 status = mgmt_status(err);
- cp = cmd->param;
+ bt_dev_dbg(hdev, "err %d", err);
if (status) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
- mgmt_status(status));
+ status);
} else {
mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
cp, sizeof(*cp));
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
+ hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
}
mgmt_pending_remove(cmd);
+}
-unlock:
- hci_dev_unlock(hdev);
+static int set_name_sync(struct hci_dev *hdev, void *data)
+{
+ if (lmp_bredr_capable(hdev)) {
+ hci_update_name_sync(hdev);
+ hci_update_eir_sync(hdev);
+ }
+
+ /* The name is stored in the scan response data and so
+ * no need to update the advertising data here.
+ */
+ if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
+ hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
+
+ return 0;
}
static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -3294,7 +3356,6 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_cp_set_local_name *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -3330,35 +3391,34 @@ static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
+ else
+ err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
+ set_name_complete);
- memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ MGMT_STATUS_FAILED);
- hci_req_init(&req, hdev);
+ if (cmd)
+ mgmt_pending_remove(cmd);
- if (lmp_bredr_capable(hdev)) {
- __hci_req_update_name(&req);
- __hci_req_update_eir(&req);
+ goto failed;
}
- /* The name is stored in the scan response data and so
- * no need to update the advertising data here.
- */
- if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
- __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
-
- err = hci_req_run(&req, set_name_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
failed:
hci_dev_unlock(hdev);
return err;
}
+static int appearance_changed_sync(struct hci_dev *hdev, void *data)
+{
+ return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
+}
+
static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -3380,7 +3440,8 @@ static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
hdev->appearance = appearance;
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
- adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
+ hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
+ NULL);
ext_info_changed(hdev, sk);
}
@@ -3426,23 +3487,26 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
sizeof(ev), skip);
}
-static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
+static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
+ struct sk_buff *skb = cmd->skb;
+ u8 status = mgmt_status(err);
- bt_dev_dbg(hdev, "status 0x%02x", status);
-
- hci_dev_lock(hdev);
+ if (!status) {
+ if (!skb)
+ status = MGMT_STATUS_FAILED;
+ else if (IS_ERR(skb))
+ status = mgmt_status(PTR_ERR(skb));
+ else
+ status = mgmt_status(skb->data[0]);
+ }
- cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
- if (!cmd)
- goto unlock;
+ bt_dev_dbg(hdev, "status %d", status);
if (status) {
mgmt_cmd_status(cmd->sk, hdev->id,
- MGMT_OP_SET_PHY_CONFIGURATION,
- mgmt_status(status));
+ MGMT_OP_SET_PHY_CONFIGURATION, status);
} else {
mgmt_cmd_complete(cmd->sk, hdev->id,
MGMT_OP_SET_PHY_CONFIGURATION, 0,
@@ -3451,19 +3515,56 @@ static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
mgmt_phy_configuration_changed(hdev, cmd->sk);
}
+ if (skb && !IS_ERR(skb))
+ kfree_skb(skb);
+
mgmt_pending_remove(cmd);
+}
-unlock:
- hci_dev_unlock(hdev);
+static int set_default_phy_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_set_phy_configuration *cp = cmd->param;
+ struct hci_cp_le_set_default_phy cp_phy;
+ u32 selected_phys = __le32_to_cpu(cp->selected_phys);
+
+ memset(&cp_phy, 0, sizeof(cp_phy));
+
+ if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
+ cp_phy.all_phys |= 0x01;
+
+ if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
+ cp_phy.all_phys |= 0x02;
+
+ if (selected_phys & MGMT_PHY_LE_1M_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
+
+ if (selected_phys & MGMT_PHY_LE_2M_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
+
+ if (selected_phys & MGMT_PHY_LE_CODED_TX)
+ cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
+
+ if (selected_phys & MGMT_PHY_LE_1M_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
+
+ if (selected_phys & MGMT_PHY_LE_2M_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
+
+ if (selected_phys & MGMT_PHY_LE_CODED_RX)
+ cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
+
+ cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
+ sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
+
+ return 0;
}
static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
struct mgmt_cp_set_phy_configuration *cp = data;
- struct hci_cp_le_set_default_phy cp_phy;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
u16 pkt_type = (HCI_DH1 | HCI_DM1);
bool changed = false;
@@ -3567,44 +3668,20 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- memset(&cp_phy, 0, sizeof(cp_phy));
-
- if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
- cp_phy.all_phys |= 0x01;
-
- if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
- cp_phy.all_phys |= 0x02;
-
- if (selected_phys & MGMT_PHY_LE_1M_TX)
- cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
-
- if (selected_phys & MGMT_PHY_LE_2M_TX)
- cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
-
- if (selected_phys & MGMT_PHY_LE_CODED_TX)
- cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
-
- if (selected_phys & MGMT_PHY_LE_1M_RX)
- cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
-
- if (selected_phys & MGMT_PHY_LE_2M_RX)
- cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
-
- if (selected_phys & MGMT_PHY_LE_CODED_RX)
- cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
+ else
+ err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
+ set_default_phy_complete);
- hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_PHY_CONFIGURATION,
+ MGMT_STATUS_FAILED);
- err = hci_req_run_skb(&req, set_default_phy_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ if (cmd)
+ mgmt_pending_remove(cmd);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -3805,7 +3882,7 @@ static const u8 offload_codecs_uuid[16] = {
};
/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
-static const u8 simult_central_periph_uuid[16] = {
+static const u8 le_simultaneous_roles_uuid[16] = {
0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
};
@@ -3838,21 +3915,18 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
}
#endif
- if (hdev) {
- if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
- (hdev->le_states[4] & 0x08) && /* Central */
- (hdev->le_states[4] & 0x40) && /* Peripheral */
- (hdev->le_states[3] & 0x10)) /* Simultaneous */
+ if (hdev && hci_dev_le_state_simultaneous(hdev)) {
+ if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
flags = BIT(0);
else
flags = 0;
- memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
+ memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
rp->features[idx].flags = cpu_to_le32(flags);
idx++;
}
- if (hdev && use_ll_privacy(hdev)) {
+ if (hdev && ll_privacy_capable(hdev)) {
if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
flags = BIT(0) | BIT(1);
else
@@ -3863,7 +3937,8 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
idx++;
}
- if (hdev && hdev->set_quality_report) {
+ if (hdev && (aosp_has_quality_report(hdev) ||
+ hdev->set_quality_report)) {
if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
flags = BIT(0);
else
@@ -3906,36 +3981,27 @@ static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
memcpy(ev.uuid, rpa_resolution_uuid, 16);
ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
+ if (enabled && privacy_mode_capable(hdev))
+ set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
+ else
+ clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
+
return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
&ev, sizeof(ev),
HCI_MGMT_EXP_FEATURE_EVENTS, skip);
}
-#ifdef CONFIG_BT_FEATURE_DEBUG
-static int exp_debug_feature_changed(bool enabled, struct sock *skip)
-{
- struct mgmt_ev_exp_feature_changed ev;
-
- memset(&ev, 0, sizeof(ev));
- memcpy(ev.uuid, debug_uuid, 16);
- ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
-
- return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
- &ev, sizeof(ev),
- HCI_MGMT_EXP_FEATURE_EVENTS, skip);
-}
-#endif
-
-static int exp_quality_report_feature_changed(bool enabled, struct sock *skip)
+static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
+ bool enabled, struct sock *skip)
{
struct mgmt_ev_exp_feature_changed ev;
memset(&ev, 0, sizeof(ev));
- memcpy(ev.uuid, quality_report_uuid, 16);
+ memcpy(ev.uuid, uuid, 16);
ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
- return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
+ return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
&ev, sizeof(ev),
HCI_MGMT_EXP_FEATURE_EVENTS, skip);
}
@@ -3962,17 +4028,18 @@ static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
bt_dbg_set(false);
if (changed)
- exp_debug_feature_changed(false, sk);
+ exp_feature_changed(NULL, ZERO_KEY, false, sk);
}
#endif
if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
- bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
-
- hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
+ bool changed;
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_ENABLE_LL_PRIVACY);
if (changed)
- exp_ll_privacy_feature_changed(false, hdev, sk);
+ exp_feature_changed(hdev, rpa_resolution_uuid, false,
+ sk);
}
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
@@ -4023,7 +4090,7 @@ static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
&rp, sizeof(rp));
if (changed)
- exp_debug_feature_changed(val, sk);
+ exp_feature_changed(hdev, debug_uuid, val, sk);
return err;
}
@@ -4065,15 +4132,15 @@ static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
val = !!cp->param[0];
if (val) {
- changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
- hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
+ changed = !hci_dev_test_and_set_flag(hdev,
+ HCI_ENABLE_LL_PRIVACY);
hci_dev_clear_flag(hdev, HCI_ADVERTISING);
/* Enable LL privacy + supported settings changed */
flags = BIT(0) | BIT(1);
} else {
- changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
- hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
+ changed = hci_dev_test_and_clear_flag(hdev,
+ HCI_ENABLE_LL_PRIVACY);
/* Disable LL privacy + supported settings changed */
flags = BIT(1);
@@ -4125,7 +4192,7 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
val = !!cp->param[0];
changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
- if (!hdev->set_quality_report) {
+ if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_NOT_SUPPORTED);
@@ -4133,13 +4200,18 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
}
if (changed) {
- err = hdev->set_quality_report(hdev, val);
+ if (hdev->set_quality_report)
+ err = hdev->set_quality_report(hdev, val);
+ else
+ err = aosp_set_quality_report(hdev, val);
+
if (err) {
err = mgmt_cmd_status(sk, hdev->id,
MGMT_OP_SET_EXP_FEATURE,
MGMT_STATUS_FAILED);
goto unlock_quality_report;
}
+
if (val)
hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
else
@@ -4151,31 +4223,18 @@ static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
memcpy(rp.uuid, quality_report_uuid, 16);
rp.flags = cpu_to_le32(val ? BIT(0) : 0);
hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_SET_EXP_FEATURE, 0,
+
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
&rp, sizeof(rp));
if (changed)
- exp_quality_report_feature_changed(val, sk);
+ exp_feature_changed(hdev, quality_report_uuid, val, sk);
unlock_quality_report:
hci_req_sync_unlock(hdev);
return err;
}
-static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip)
-{
- struct mgmt_ev_exp_feature_changed ev;
-
- memset(&ev, 0, sizeof(ev));
- memcpy(ev.uuid, offload_codecs_uuid, 16);
- ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
-
- return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
- &ev, sizeof(ev),
- HCI_MGMT_EXP_FEATURE_EVENTS, skip);
-}
-
static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
struct mgmt_cp_set_exp_feature *cp,
u16 data_len)
@@ -4229,7 +4288,65 @@ static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
&rp, sizeof(rp));
if (changed)
- exp_offload_codec_feature_changed(val, sk);
+ exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
+
+ return err;
+}
+
+static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
+ struct mgmt_cp_set_exp_feature *cp,
+ u16 data_len)
+{
+ bool val, changed;
+ int err;
+ struct mgmt_rp_set_exp_feature rp;
+
+ /* Command requires to use a valid controller index */
+ if (!hdev)
+ return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_INDEX);
+
+ /* Parameters are limited to a single octet */
+ if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ /* Only boolean on/off is supported */
+ if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_INVALID_PARAMS);
+
+ val = !!cp->param[0];
+ changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
+
+ if (!hci_dev_le_state_simultaneous(hdev)) {
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE,
+ MGMT_STATUS_NOT_SUPPORTED);
+ }
+
+ if (changed) {
+ if (val)
+ hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
+ else
+ hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
+ }
+
+ bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
+ val, changed);
+
+ memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
+ rp.flags = cpu_to_le32(val ? BIT(0) : 0);
+ hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_SET_EXP_FEATURE, 0,
+ &rp, sizeof(rp));
+
+ if (changed)
+ exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
return err;
}
@@ -4246,6 +4363,7 @@ static const struct mgmt_exp_feature {
EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
EXP_FEAT(quality_report_uuid, set_quality_report_func),
EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
+ EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
/* end with a null feature */
EXP_FEAT(NULL, NULL)
@@ -4269,8 +4387,6 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_NOT_SUPPORTED);
}
-#define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
-
static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
@@ -4278,7 +4394,7 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
struct mgmt_rp_get_device_flags rp;
struct bdaddr_list_with_flags *br_params;
struct hci_conn_params *params;
- u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
+ u32 supported_flags;
u32 current_flags = 0;
u8 status = MGMT_STATUS_INVALID_PARAMS;
@@ -4287,6 +4403,9 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
hci_dev_lock(hdev);
+ bitmap_to_arr32(&supported_flags, hdev->conn_flags,
+ __HCI_CONN_NUM_FLAGS);
+
memset(&rp, 0, sizeof(rp));
if (cp->addr.type == BDADDR_BREDR) {
@@ -4296,7 +4415,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
if (!br_params)
goto done;
- current_flags = br_params->current_flags;
+ bitmap_to_arr32(&current_flags, br_params->flags,
+ __HCI_CONN_NUM_FLAGS);
} else {
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type));
@@ -4304,7 +4424,8 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
if (!params)
goto done;
- current_flags = params->current_flags;
+ bitmap_to_arr32(&current_flags, params->flags,
+ __HCI_CONN_NUM_FLAGS);
}
bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
@@ -4342,13 +4463,16 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
struct bdaddr_list_with_flags *br_params;
struct hci_conn_params *params;
u8 status = MGMT_STATUS_INVALID_PARAMS;
- u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
+ u32 supported_flags;
u32 current_flags = __le32_to_cpu(cp->current_flags);
bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
&cp->addr.bdaddr, cp->addr.type,
__le32_to_cpu(current_flags));
+ bitmap_to_arr32(&supported_flags, hdev->conn_flags,
+ __HCI_CONN_NUM_FLAGS);
+
if ((supported_flags | current_flags) != supported_flags) {
bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
current_flags, supported_flags);
@@ -4363,7 +4487,7 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
cp->addr.type);
if (br_params) {
- br_params->current_flags = current_flags;
+ bitmap_from_u64(br_params->flags, current_flags);
status = MGMT_STATUS_SUCCESS;
} else {
bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
@@ -4373,8 +4497,15 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type));
if (params) {
- params->current_flags = current_flags;
+ bitmap_from_u64(params->flags, current_flags);
status = MGMT_STATUS_SUCCESS;
+
+ /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
+ * has been set.
+ */
+ if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
+ params->flags))
+ hci_update_passive_scan(hdev);
} else {
bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
&cp->addr.bdaddr,
@@ -4496,7 +4627,7 @@ int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
hdev->adv_monitors_cnt++;
if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
monitor->state = ADV_MONITOR_STATE_REGISTERED;
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
}
err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
@@ -4722,7 +4853,7 @@ int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
rp.monitor_handle = cp->monitor_handle;
if (!status)
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
mgmt_status(status), &rp, sizeof(rp));
@@ -4801,28 +4932,33 @@ unlock:
status);
}
-static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
+static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
{
struct mgmt_rp_read_local_oob_data mgmt_rp;
size_t rp_size = sizeof(mgmt_rp);
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
+ struct sk_buff *skb = cmd->skb;
+ u8 status = mgmt_status(err);
- bt_dev_dbg(hdev, "status %u", status);
+ if (!status) {
+ if (!skb)
+ status = MGMT_STATUS_FAILED;
+ else if (IS_ERR(skb))
+ status = mgmt_status(PTR_ERR(skb));
+ else
+ status = mgmt_status(skb->data[0]);
+ }
- cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
- if (!cmd)
- return;
+ bt_dev_dbg(hdev, "status %d", status);
- if (status || !skb) {
- mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
- status ? mgmt_status(status) : MGMT_STATUS_FAILED);
+ if (status) {
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
goto remove;
}
memset(&mgmt_rp, 0, sizeof(mgmt_rp));
- if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+ if (!bredr_sc_enabled(hdev)) {
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
if (skb->len < sizeof(*rp)) {
@@ -4857,14 +4993,31 @@ static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
remove:
- mgmt_pending_remove(cmd);
+ if (skb && !IS_ERR(skb))
+ kfree_skb(skb);
+
+ mgmt_pending_free(cmd);
+}
+
+static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ if (bredr_sc_enabled(hdev))
+ cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
+ else
+ cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
+
+ if (IS_ERR(cmd->skb))
+ return PTR_ERR(cmd->skb);
+ else
+ return 0;
}
static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -4889,22 +5042,20 @@ static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- if (bredr_sc_enabled(hdev))
- hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
else
- hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+ err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
+ read_local_oob_data_complete);
- err = hci_req_run_skb(&req, read_local_oob_data_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
+ if (err < 0) {
+ err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_free(cmd);
+ }
unlock:
hci_dev_unlock(hdev);
@@ -5077,13 +5228,6 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
}
hci_dev_unlock(hdev);
-
- /* Handle suspend notifier */
- if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
- hdev->suspend_tasks)) {
- bt_dev_dbg(hdev, "Unpaused discovery");
- wake_up(&hdev->suspend_wait_q);
- }
}
static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
@@ -5113,6 +5257,25 @@ static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
return true;
}
+static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
+ cmd->param, 1);
+ mgmt_pending_free(cmd);
+
+ hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
+ DISCOVERY_FINDING);
+}
+
+static int start_discovery_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_start_discovery_sync(hdev);
+}
+
static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
u16 op, void *data, u16 len)
{
@@ -5164,17 +5327,20 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
else
hdev->discovery.limited = false;
- cmd = mgmt_pending_add(sk, op, hdev, data, len);
+ cmd = mgmt_pending_new(sk, op, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- cmd->cmd_complete = generic_cmd_complete;
+ err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
+ start_discovery_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto failed;
+ }
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- err = 0;
failed:
hci_dev_unlock(hdev);
@@ -5196,13 +5362,6 @@ static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
data, len);
}
-static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
- u8 status)
-{
- return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
- cmd->param, 1);
-}
-
static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -5271,15 +5430,13 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
+ cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto failed;
}
- cmd->cmd_complete = service_discovery_cmd_complete;
-
/* Clear the discovery filter first to free any previously
* allocated memory for the UUID list.
*/
@@ -5303,9 +5460,14 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
}
}
+ err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
+ start_discovery_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto failed;
+ }
+
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- err = 0;
failed:
hci_dev_unlock(hdev);
@@ -5327,12 +5489,25 @@ void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
}
hci_dev_unlock(hdev);
+}
- /* Handle suspend notifier */
- if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
- bt_dev_dbg(hdev, "Paused discovery");
- wake_up(&hdev->suspend_wait_q);
- }
+static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
+ cmd->param, 1);
+ mgmt_pending_free(cmd);
+
+ if (!err)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+}
+
+static int stop_discovery_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_stop_discovery_sync(hdev);
}
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -5360,17 +5535,20 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- cmd->cmd_complete = generic_cmd_complete;
+ err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
+ stop_discovery_complete);
+ if (err < 0) {
+ mgmt_pending_free(cmd);
+ goto unlock;
+ }
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
- queue_work(hdev->req_workqueue, &hdev->discov_update);
- err = 0;
unlock:
hci_dev_unlock(hdev);
@@ -5491,11 +5669,15 @@ done:
return err;
}
+static int set_device_id_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_eir_sync(hdev);
+}
+
static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
struct mgmt_cp_set_device_id *cp = data;
- struct hci_request req;
int err;
__u16 source;
@@ -5517,38 +5699,32 @@ static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
NULL, 0);
- hci_req_init(&req, hdev);
- __hci_req_update_eir(&req);
- hci_req_run(&req, NULL);
+ hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
hci_dev_unlock(hdev);
return err;
}
-static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void enable_advertising_instance(struct hci_dev *hdev, int err)
{
- bt_dev_dbg(hdev, "status %u", status);
+ if (err)
+ bt_dev_err(hdev, "failed to re-configure advertising %d", err);
+ else
+ bt_dev_dbg(hdev, "status %d", err);
}
-static void set_advertising_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
{
struct cmd_lookup match = { NULL, hdev };
- struct hci_request req;
u8 instance;
struct adv_info *adv_instance;
- int err;
-
- hci_dev_lock(hdev);
+ u8 status = mgmt_status(err);
if (status) {
- u8 mgmt_err = mgmt_status(status);
-
mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
- cmd_status_rsp, &mgmt_err);
- goto unlock;
+ cmd_status_rsp, &status);
+ return;
}
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
@@ -5564,46 +5740,60 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
if (match.sk)
sock_put(match.sk);
- /* Handle suspend notifier */
- if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
- hdev->suspend_tasks)) {
- bt_dev_dbg(hdev, "Paused advertising");
- wake_up(&hdev->suspend_wait_q);
- } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
- hdev->suspend_tasks)) {
- bt_dev_dbg(hdev, "Unpaused advertising");
- wake_up(&hdev->suspend_wait_q);
- }
-
/* If "Set Advertising" was just disabled and instance advertising was
* set up earlier, then re-enable multi-instance advertising.
*/
if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
list_empty(&hdev->adv_instances))
- goto unlock;
+ return;
instance = hdev->cur_adv_instance;
if (!instance) {
adv_instance = list_first_entry_or_null(&hdev->adv_instances,
struct adv_info, list);
if (!adv_instance)
- goto unlock;
+ return;
instance = adv_instance->instance;
}
- hci_req_init(&req, hdev);
+ err = hci_schedule_adv_instance_sync(hdev, instance, true);
- err = __hci_req_schedule_adv_instance(&req, instance, true);
+ enable_advertising_instance(hdev, err);
+}
- if (!err)
- err = hci_req_run(&req, enable_advertising_instance);
+static int set_adv_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ u8 val = !!cp->val;
- if (err)
- bt_dev_err(hdev, "failed to re-configure advertising");
+ if (cp->val == 0x02)
+ hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+ else
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
-unlock:
- hci_dev_unlock(hdev);
+ cancel_adv_timeout(hdev);
+
+ if (val) {
+ /* Switch to instance "0" for the Set Advertising setting.
+ * We cannot use update_[adv|scan_rsp]_data() here as the
+ * HCI_ADVERTISING flag is not yet set.
+ */
+ hdev->cur_adv_instance = 0x00;
+
+ if (ext_adv_capable(hdev)) {
+ hci_start_ext_adv_sync(hdev, 0x00);
+ } else {
+ hci_update_adv_data_sync(hdev, 0x00);
+ hci_update_scan_rsp_data_sync(hdev, 0x00);
+ hci_enable_advertising_sync(hdev);
+ }
+ } else {
+ hci_disable_advertising_sync(hdev);
+ }
+
+ return 0;
}
static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -5611,7 +5801,6 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
u8 val, status;
int err;
@@ -5622,13 +5811,6 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
status);
- /* Enabling the experimental LL Privay support disables support for
- * advertising.
- */
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
- MGMT_STATUS_NOT_SUPPORTED);
-
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -5684,40 +5866,13 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
}
cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
- if (!cmd) {
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- if (cp->val == 0x02)
- hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
else
- hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
-
- cancel_adv_timeout(hdev);
-
- if (val) {
- /* Switch to instance "0" for the Set Advertising setting.
- * We cannot use update_[adv|scan_rsp]_data() here as the
- * HCI_ADVERTISING flag is not yet set.
- */
- hdev->cur_adv_instance = 0x00;
+ err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
+ set_advertising_complete);
- if (ext_adv_capable(hdev)) {
- __hci_req_start_ext_adv(&req, 0x00);
- } else {
- __hci_req_update_adv_data(&req, 0x00);
- __hci_req_update_scan_rsp_data(&req, 0x00);
- __hci_req_enable_advertising(&req);
- }
- } else {
- __hci_req_disable_advertising(&req);
- }
-
- err = hci_req_run(&req, set_advertising_complete);
- if (err < 0)
+ if (err < 0 && cmd)
mgmt_pending_remove(cmd);
unlock:
@@ -5810,38 +5965,23 @@ static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
* loaded.
*/
if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
- hdev->discovery.state == DISCOVERY_STOPPED) {
- struct hci_request req;
-
- hci_req_init(&req, hdev);
-
- hci_req_add_le_scan_disable(&req, false);
- hci_req_add_le_passive_scan(&req);
-
- hci_req_run(&req, NULL);
- }
+ hdev->discovery.state == DISCOVERY_STOPPED)
+ hci_update_passive_scan(hdev);
hci_dev_unlock(hdev);
return err;
}
-static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
-
- bt_dev_dbg(hdev, "status 0x%02x", status);
-
- hci_dev_lock(hdev);
+ struct mgmt_pending_cmd *cmd = data;
- cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
- if (!cmd)
- goto unlock;
+ bt_dev_dbg(hdev, "err %d", err);
- if (status) {
+ if (err) {
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- mgmt_status(status));
+ mgmt_status(err));
} else {
struct mgmt_mode *cp = cmd->param;
@@ -5854,10 +5994,15 @@ static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
new_settings(hdev, cmd->sk);
}
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
+}
-unlock:
- hci_dev_unlock(hdev);
+static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+
+ return hci_write_fast_connectable_sync(hdev, cp->val);
}
static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
@@ -5865,58 +6010,49 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
hdev->hci_ver < BLUETOOTH_VER_1_2)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_FAST_CONNECTABLE,
MGMT_STATUS_NOT_SUPPORTED);
if (cp->val != 0x00 && cp->val != 0x01)
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ return mgmt_cmd_status(sk, hdev->id,
+ MGMT_OP_SET_FAST_CONNECTABLE,
MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
- if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
- err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
- hdev);
+ err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
goto unlock;
}
if (!hdev_is_powered(hdev)) {
hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
- err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
- hdev);
+ err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
new_settings(hdev, sk);
goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
- data, len);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
+ len);
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- hci_req_init(&req, hdev);
-
- __hci_req_write_fast_connectable(&req, cp->val);
+ else
+ err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
+ fast_connectable_complete);
- err = hci_req_run(&req, fast_connectable_complete);
if (err < 0) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
- MGMT_STATUS_FAILED);
- mgmt_pending_remove(cmd);
+ mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
+ MGMT_STATUS_FAILED);
+
+ if (cmd)
+ mgmt_pending_free(cmd);
}
unlock:
@@ -5925,20 +6061,14 @@ unlock:
return err;
}
-static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
- bt_dev_dbg(hdev, "status 0x%02x", status);
-
- hci_dev_lock(hdev);
-
- cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
- if (!cmd)
- goto unlock;
+ bt_dev_dbg(hdev, "err %d", err);
- if (status) {
- u8 mgmt_err = mgmt_status(status);
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
/* We need to restore the flag if related HCI commands
* failed.
@@ -5951,17 +6081,31 @@ static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
new_settings(hdev, cmd->sk);
}
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
+}
-unlock:
- hci_dev_unlock(hdev);
+static int set_bredr_sync(struct hci_dev *hdev, void *data)
+{
+ int status;
+
+ status = hci_write_fast_connectable_sync(hdev, false);
+
+ if (!status)
+ status = hci_update_scan_sync(hdev);
+
+ /* Since only the advertising data flags will change, there
+ * is no need to update the scan response data.
+ */
+ if (!status)
+ status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
+
+ return status;
}
static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -6033,15 +6177,19 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
}
}
- if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
- MGMT_STATUS_BUSY);
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
+ if (!cmd)
err = -ENOMEM;
+ else
+ err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
+ set_bredr_complete);
+
+ if (err < 0) {
+ mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
+ MGMT_STATUS_FAILED);
+ if (cmd)
+ mgmt_pending_free(cmd);
+
goto unlock;
}
@@ -6050,42 +6198,23 @@ static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
*/
hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
- hci_req_init(&req, hdev);
-
- __hci_req_write_fast_connectable(&req, false);
- __hci_req_update_scan(&req);
-
- /* Since only the advertising data flags will change, there
- * is no need to update the scan response data.
- */
- __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
-
- err = hci_req_run(&req, set_bredr_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
-
unlock:
hci_dev_unlock(hdev);
return err;
}
-static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
{
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
struct mgmt_mode *cp;
- bt_dev_dbg(hdev, "status %u", status);
-
- hci_dev_lock(hdev);
+ bt_dev_dbg(hdev, "err %d", err);
- cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
- if (!cmd)
- goto unlock;
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
- if (status) {
- mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status));
- goto remove;
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
+ goto done;
}
cp = cmd->param;
@@ -6105,13 +6234,23 @@ static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
break;
}
- send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
+ send_settings_rsp(cmd->sk, cmd->opcode, hdev);
new_settings(hdev, cmd->sk);
-remove:
- mgmt_pending_remove(cmd);
-unlock:
- hci_dev_unlock(hdev);
+done:
+ mgmt_pending_free(cmd);
+}
+
+static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_mode *cp = cmd->param;
+ u8 val = !!cp->val;
+
+ /* Force write of val */
+ hci_dev_set_flag(hdev, HCI_SC_ENABLED);
+
+ return hci_write_sc_support_sync(hdev, val);
}
static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
@@ -6119,7 +6258,6 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
{
struct mgmt_mode *cp = data;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
u8 val;
int err;
@@ -6138,7 +6276,7 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
- MGMT_STATUS_INVALID_PARAMS);
+ MGMT_STATUS_INVALID_PARAMS);
hci_dev_lock(hdev);
@@ -6169,12 +6307,6 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
- MGMT_STATUS_BUSY);
- goto failed;
- }
-
val = !!cp->val;
if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
@@ -6183,18 +6315,18 @@ static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
goto failed;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
+ if (!cmd)
err = -ENOMEM;
- goto failed;
- }
+ else
+ err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
+ set_secure_conn_complete);
- hci_req_init(&req, hdev);
- hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
- err = hci_req_run(&req, sc_enable_complete);
if (err < 0) {
- mgmt_pending_remove(cmd);
- goto failed;
+ mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
+ MGMT_STATUS_FAILED);
+ if (cmd)
+ mgmt_pending_free(cmd);
}
failed:
@@ -6508,14 +6640,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
return err;
}
-static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
+static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
{
+ struct mgmt_pending_cmd *cmd = data;
struct hci_conn *conn = cmd->user_data;
+ struct mgmt_cp_get_conn_info *cp = cmd->param;
struct mgmt_rp_get_conn_info rp;
- int err;
+ u8 status;
+
+ bt_dev_dbg(hdev, "err %d", err);
- memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
+ memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
+ status = mgmt_status(err);
if (status == MGMT_STATUS_SUCCESS) {
rp.rssi = conn->rssi;
rp.tx_power = conn->tx_power;
@@ -6526,67 +6663,58 @@ static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
rp.max_tx_power = HCI_TX_POWER_INVALID;
}
- err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
- status, &rp, sizeof(rp));
+ mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
+ &rp, sizeof(rp));
- hci_conn_drop(conn);
- hci_conn_put(conn);
+ if (conn) {
+ hci_conn_drop(conn);
+ hci_conn_put(conn);
+ }
- return err;
+ mgmt_pending_free(cmd);
}
-static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
- u16 opcode)
+static int get_conn_info_sync(struct hci_dev *hdev, void *data)
{
- struct hci_cp_read_rssi *cp;
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_get_conn_info *cp = cmd->param;
struct hci_conn *conn;
- u16 handle;
- u8 status;
-
- bt_dev_dbg(hdev, "status 0x%02x", hci_status);
+ int err;
+ __le16 handle;
- hci_dev_lock(hdev);
+ /* Make sure we are still connected */
+ if (cp->addr.type == BDADDR_BREDR)
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
+ else
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
- /* Commands sent in request are either Read RSSI or Read Transmit Power
- * Level so we check which one was last sent to retrieve connection
- * handle. Both commands have handle as first parameter so it's safe to
- * cast data on the same command struct.
- *
- * First command sent is always Read RSSI and we fail only if it fails.
- * In other case we simply override error to indicate success as we
- * already remembered if TX power value is actually valid.
- */
- cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
- if (!cp) {
- cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
- status = MGMT_STATUS_SUCCESS;
- } else {
- status = mgmt_status(hci_status);
+ if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
+ if (cmd->user_data) {
+ hci_conn_drop(cmd->user_data);
+ hci_conn_put(cmd->user_data);
+ cmd->user_data = NULL;
+ }
+ return MGMT_STATUS_NOT_CONNECTED;
}
- if (!cp) {
- bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
- goto unlock;
- }
+ handle = cpu_to_le16(conn->handle);
- handle = __le16_to_cpu(cp->handle);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- if (!conn) {
- bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
- handle);
- goto unlock;
- }
+ /* Refresh RSSI each time */
+ err = hci_read_rssi_sync(hdev, handle);
- cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
- if (!cmd)
- goto unlock;
+ /* For LE links TX power does not change thus we don't need to
+ * query for it once value is known.
+ */
+ if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
+ conn->tx_power == HCI_TX_POWER_INVALID))
+ err = hci_read_tx_power_sync(hdev, handle, 0x00);
- cmd->cmd_complete(cmd, status);
- mgmt_pending_remove(cmd);
+ /* Max TX power needs to be read only once per connection */
+ if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
+ err = hci_read_tx_power_sync(hdev, handle, 0x01);
-unlock:
- hci_dev_unlock(hdev);
+ return err;
}
static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
@@ -6631,12 +6759,6 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
goto unlock;
}
- if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
- err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
- MGMT_STATUS_BUSY, &rp, sizeof(rp));
- goto unlock;
- }
-
/* To avoid client trying to guess when to poll again for information we
* calculate conn info age as random value between min/max set in hdev.
*/
@@ -6650,49 +6772,28 @@ static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
if (time_after(jiffies, conn->conn_info_timestamp +
msecs_to_jiffies(conn_info_age)) ||
!conn->conn_info_timestamp) {
- struct hci_request req;
- struct hci_cp_read_tx_power req_txp_cp;
- struct hci_cp_read_rssi req_rssi_cp;
struct mgmt_pending_cmd *cmd;
- hci_req_init(&req, hdev);
- req_rssi_cp.handle = cpu_to_le16(conn->handle);
- hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
- &req_rssi_cp);
-
- /* For LE links TX power does not change thus we don't need to
- * query for it once value is known.
- */
- if (!bdaddr_type_is_le(cp->addr.type) ||
- conn->tx_power == HCI_TX_POWER_INVALID) {
- req_txp_cp.handle = cpu_to_le16(conn->handle);
- req_txp_cp.type = 0x00;
- hci_req_add(&req, HCI_OP_READ_TX_POWER,
- sizeof(req_txp_cp), &req_txp_cp);
- }
+ cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
+ len);
+ if (!cmd)
+ err = -ENOMEM;
+ else
+ err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
+ cmd, get_conn_info_complete);
- /* Max TX power needs to be read only once per connection */
- if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
- req_txp_cp.handle = cpu_to_le16(conn->handle);
- req_txp_cp.type = 0x01;
- hci_req_add(&req, HCI_OP_READ_TX_POWER,
- sizeof(req_txp_cp), &req_txp_cp);
- }
+ if (err < 0) {
+ mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
+ MGMT_STATUS_FAILED, &rp, sizeof(rp));
- err = hci_req_run(&req, conn_info_refresh_complete);
- if (err < 0)
- goto unlock;
+ if (cmd)
+ mgmt_pending_free(cmd);
- cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
- data, len);
- if (!cmd) {
- err = -ENOMEM;
goto unlock;
}
hci_conn_hold(conn);
cmd->user_data = hci_conn_get(conn);
- cmd->cmd_complete = conn_info_cmd_complete;
conn->conn_info_timestamp = jiffies;
} else {
@@ -6710,82 +6811,76 @@ unlock:
return err;
}
-static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
+static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
{
- struct hci_conn *conn = cmd->user_data;
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_get_clock_info *cp = cmd->param;
struct mgmt_rp_get_clock_info rp;
- struct hci_dev *hdev;
- int err;
+ struct hci_conn *conn = cmd->user_data;
+ u8 status = mgmt_status(err);
+
+ bt_dev_dbg(hdev, "err %d", err);
memset(&rp, 0, sizeof(rp));
- memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
+ bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
+ rp.addr.type = cp->addr.type;
- if (status)
+ if (err)
goto complete;
- hdev = hci_dev_get(cmd->index);
- if (hdev) {
- rp.local_clock = cpu_to_le32(hdev->clock);
- hci_dev_put(hdev);
- }
+ rp.local_clock = cpu_to_le32(hdev->clock);
if (conn) {
rp.piconet_clock = cpu_to_le32(conn->clock);
rp.accuracy = cpu_to_le16(conn->clock_accuracy);
- }
-
-complete:
- err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
- sizeof(rp));
-
- if (conn) {
hci_conn_drop(conn);
hci_conn_put(conn);
}
- return err;
+complete:
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
+ sizeof(rp));
+
+ mgmt_pending_free(cmd);
}
-static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+static int get_clock_info_sync(struct hci_dev *hdev, void *data)
{
- struct hci_cp_read_clock *hci_cp;
- struct mgmt_pending_cmd *cmd;
- struct hci_conn *conn;
-
- bt_dev_dbg(hdev, "status %u", status);
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_get_clock_info *cp = cmd->param;
+ struct hci_cp_read_clock hci_cp;
+ struct hci_conn *conn = cmd->user_data;
+ int err;
- hci_dev_lock(hdev);
+ memset(&hci_cp, 0, sizeof(hci_cp));
+ err = hci_read_clock_sync(hdev, &hci_cp);
- hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
- if (!hci_cp)
- goto unlock;
+ if (conn) {
+ /* Make sure connection still exists */
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
- if (hci_cp->which) {
- u16 handle = __le16_to_cpu(hci_cp->handle);
- conn = hci_conn_hash_lookup_handle(hdev, handle);
- } else {
- conn = NULL;
+ if (conn && conn == cmd->user_data &&
+ conn->state == BT_CONNECTED) {
+ hci_cp.handle = cpu_to_le16(conn->handle);
+ hci_cp.which = 0x01; /* Piconet clock */
+ err = hci_read_clock_sync(hdev, &hci_cp);
+ } else if (cmd->user_data) {
+ hci_conn_drop(cmd->user_data);
+ hci_conn_put(cmd->user_data);
+ cmd->user_data = NULL;
+ }
}
- cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
- if (!cmd)
- goto unlock;
-
- cmd->cmd_complete(cmd, mgmt_status(status));
- mgmt_pending_remove(cmd);
-
-unlock:
- hci_dev_unlock(hdev);
+ return err;
}
static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+ u16 len)
{
struct mgmt_cp_get_clock_info *cp = data;
struct mgmt_rp_get_clock_info rp;
- struct hci_cp_read_clock hci_cp;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
struct hci_conn *conn;
int err;
@@ -6823,31 +6918,25 @@ static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
conn = NULL;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
- if (!cmd) {
+ cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
+ if (!cmd)
err = -ENOMEM;
- goto unlock;
- }
-
- cmd->cmd_complete = clock_info_cmd_complete;
+ else
+ err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
+ get_clock_info_complete);
- hci_req_init(&req, hdev);
+ if (err < 0) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
+ MGMT_STATUS_FAILED, &rp, sizeof(rp));
- memset(&hci_cp, 0, sizeof(hci_cp));
- hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
+ if (cmd)
+ mgmt_pending_free(cmd);
- if (conn) {
+ } else if (conn) {
hci_conn_hold(conn);
cmd->user_data = hci_conn_get(conn);
-
- hci_cp.handle = cpu_to_le16(conn->handle);
- hci_cp.which = 0x01; /* Piconet clock */
- hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
}
- err = hci_req_run(&req, get_clock_info_complete);
- if (err < 0)
- mgmt_pending_remove(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -6928,6 +7017,11 @@ static void device_added(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
}
+static int add_device_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_passive_scan_sync(hdev);
+}
+
static int add_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -6936,6 +7030,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
struct hci_conn_params *params;
int err;
u32 current_flags = 0;
+ u32 supported_flags;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -7007,15 +7102,20 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
addr_type);
if (params)
- current_flags = params->current_flags;
+ bitmap_to_arr32(&current_flags, params->flags,
+ __HCI_CONN_NUM_FLAGS);
}
- hci_update_background_scan(hdev);
+ err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
+ if (err < 0)
+ goto unlock;
added:
device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
+ bitmap_to_arr32(&supported_flags, hdev->conn_flags,
+ __HCI_CONN_NUM_FLAGS);
device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
- SUPPORTED_DEVICE_FLAGS(), current_flags);
+ supported_flags, current_flags);
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
MGMT_STATUS_SUCCESS, &cp->addr,
@@ -7037,6 +7137,11 @@ static void device_removed(struct sock *sk, struct hci_dev *hdev,
mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
}
+static int remove_device_sync(struct hci_dev *hdev, void *data)
+{
+ return hci_update_passive_scan_sync(hdev);
+}
+
static int remove_device(struct sock *sk, struct hci_dev *hdev,
void *data, u16 len)
{
@@ -7116,7 +7221,6 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
list_del(&params->action);
list_del(&params->list);
kfree(params);
- hci_update_background_scan(hdev);
device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
} else {
@@ -7153,10 +7257,10 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
}
bt_dev_dbg(hdev, "All LE connection parameters were removed");
-
- hci_update_background_scan(hdev);
}
+ hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
+
complete:
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
MGMT_STATUS_SUCCESS, &cp->addr,
@@ -7359,21 +7463,27 @@ unlock:
return err;
}
-static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
- u16 opcode, struct sk_buff *skb)
+static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
+ int err)
{
const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
u8 *h192, *r192, *h256, *r256;
- struct mgmt_pending_cmd *cmd;
+ struct mgmt_pending_cmd *cmd = data;
+ struct sk_buff *skb = cmd->skb;
+ u8 status = mgmt_status(err);
u16 eir_len;
- int err;
- bt_dev_dbg(hdev, "status %u", status);
+ if (!status) {
+ if (!skb)
+ status = MGMT_STATUS_FAILED;
+ else if (IS_ERR(skb))
+ status = mgmt_status(PTR_ERR(skb));
+ else
+ status = mgmt_status(skb->data[0]);
+ }
- cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
- if (!cmd)
- return;
+ bt_dev_dbg(hdev, "status %u", status);
mgmt_cp = cmd->param;
@@ -7385,7 +7495,7 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
r192 = NULL;
h256 = NULL;
r256 = NULL;
- } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
+ } else if (!bredr_sc_enabled(hdev)) {
struct hci_rp_read_local_oob_data *rp;
if (skb->len != sizeof(*rp)) {
@@ -7466,6 +7576,9 @@ send_rsp:
mgmt_rp, sizeof(*mgmt_rp) + eir_len,
HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
done:
+ if (skb && !IS_ERR(skb))
+ kfree_skb(skb);
+
kfree(mgmt_rp);
mgmt_pending_remove(cmd);
}
@@ -7474,7 +7587,6 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
struct mgmt_cp_read_local_oob_ext_data *cp)
{
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
@@ -7482,14 +7594,9 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
if (!cmd)
return -ENOMEM;
- hci_req_init(&req, hdev);
-
- if (bredr_sc_enabled(hdev))
- hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
- else
- hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
+ err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
+ read_local_oob_ext_data_complete);
- err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
if (err < 0) {
mgmt_pending_remove(cmd);
return err;
@@ -7713,13 +7820,6 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
MGMT_STATUS_REJECTED);
- /* Enabling the experimental LL Privay support disables support for
- * advertising.
- */
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
- MGMT_STATUS_NOT_SUPPORTED);
-
hci_dev_lock(hdev);
rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
@@ -7876,58 +7976,66 @@ static bool adv_busy(struct hci_dev *hdev)
pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
}
-static void add_advertising_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
- struct mgmt_cp_add_advertising *cp;
- struct mgmt_rp_add_advertising rp;
- struct adv_info *adv_instance, *n;
- u8 instance;
+ struct adv_info *adv, *n;
- bt_dev_dbg(hdev, "status %u", status);
+ bt_dev_dbg(hdev, "err %d", err);
hci_dev_lock(hdev);
- cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
- if (!cmd)
- cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
+ list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
+ u8 instance;
- list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
- if (!adv_instance->pending)
+ if (!adv->pending)
continue;
- if (!status) {
- adv_instance->pending = false;
+ if (!err) {
+ adv->pending = false;
continue;
}
- instance = adv_instance->instance;
+ instance = adv->instance;
if (hdev->cur_adv_instance == instance)
cancel_adv_timeout(hdev);
hci_remove_adv_instance(hdev, instance);
- mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
+ mgmt_advertising_removed(sk, hdev, instance);
}
- if (!cmd)
- goto unlock;
+ hci_dev_unlock(hdev);
+}
+
+static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_advertising *cp = cmd->param;
+ struct mgmt_rp_add_advertising rp;
+
+ memset(&rp, 0, sizeof(rp));
- cp = cmd->param;
rp.instance = cp->instance;
- if (status)
+ if (err)
mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status));
+ mgmt_status(err));
else
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status), &rp, sizeof(rp));
+ mgmt_status(err), &rp, sizeof(rp));
- mgmt_pending_remove(cmd);
+ add_adv_complete(hdev, cmd->sk, cp->instance, err);
-unlock:
- hci_dev_unlock(hdev);
+ mgmt_pending_free(cmd);
+}
+
+static int add_advertising_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_advertising *cp = cmd->param;
+
+ return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
}
static int add_advertising(struct sock *sk, struct hci_dev *hdev,
@@ -7943,7 +8051,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
struct adv_info *next_instance;
int err;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
bt_dev_dbg(hdev, "sock %p", sk);
@@ -7952,13 +8059,6 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
status);
- /* Enabling the experimental LL Privay support disables support for
- * advertising.
- */
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
- MGMT_STATUS_NOT_SUPPORTED);
-
if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -8051,25 +8151,19 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
/* We're good to go, update advertising data, parameters, and start
* advertising.
*/
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
data_len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- hci_req_init(&req, hdev);
-
- err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
+ cp->instance = schedule_instance;
- if (!err)
- err = hci_req_run(&req, add_advertising_complete);
-
- if (err < 0) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
- MGMT_STATUS_FAILED);
- mgmt_pending_remove(cmd);
- }
+ err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
+ add_advertising_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -8077,30 +8171,25 @@ unlock:
return err;
}
-static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
- struct mgmt_cp_add_ext_adv_params *cp;
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
struct mgmt_rp_add_ext_adv_params rp;
- struct adv_info *adv_instance;
+ struct adv_info *adv;
u32 flags;
BT_DBG("%s", hdev->name);
hci_dev_lock(hdev);
- cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
- if (!cmd)
- goto unlock;
-
- cp = cmd->param;
- adv_instance = hci_find_adv_instance(hdev, cp->instance);
- if (!adv_instance)
+ adv = hci_find_adv_instance(hdev, cp->instance);
+ if (!adv)
goto unlock;
rp.instance = cp->instance;
- rp.tx_power = adv_instance->tx_power;
+ rp.tx_power = adv->tx_power;
/* While we're at it, inform userspace of the available space for this
* advertisement, given the flags that will be used.
@@ -8109,39 +8198,44 @@ static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
- if (status) {
+ if (err) {
/* If this advertisement was previously advertising and we
* failed to update it, we signal that it has been removed and
* delete its structure
*/
- if (!adv_instance->pending)
+ if (!adv->pending)
mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
hci_remove_adv_instance(hdev, cp->instance);
mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status));
-
+ mgmt_status(err));
} else {
mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
- mgmt_status(status), &rp, sizeof(rp));
+ mgmt_status(err), &rp, sizeof(rp));
}
unlock:
if (cmd)
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
hci_dev_unlock(hdev);
}
+static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
+
+ return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
+}
+
static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
struct mgmt_cp_add_ext_adv_params *cp = data;
struct mgmt_rp_add_ext_adv_params rp;
struct mgmt_pending_cmd *cmd = NULL;
- struct adv_info *adv_instance;
- struct hci_request req;
u32 flags, min_interval, max_interval;
u16 timeout, duration;
u8 status;
@@ -8223,29 +8317,18 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
/* Submit request for advertising params if ext adv available */
if (ext_adv_capable(hdev)) {
- hci_req_init(&req, hdev);
- adv_instance = hci_find_adv_instance(hdev, cp->instance);
-
- /* Updating parameters of an active instance will return a
- * Command Disallowed error, so we must first disable the
- * instance if it is active.
- */
- if (!adv_instance->pending)
- __hci_req_disable_ext_adv_instance(&req, cp->instance);
-
- __hci_req_setup_ext_adv_instance(&req, cp->instance);
-
- err = hci_req_run(&req, add_ext_adv_params_complete);
-
- if (!err)
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
- hdev, data, data_len);
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
+ data, data_len);
if (!cmd) {
err = -ENOMEM;
hci_remove_adv_instance(hdev, cp->instance);
goto unlock;
}
+ err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
+ add_ext_adv_params_complete);
+ if (err < 0)
+ mgmt_pending_free(cmd);
} else {
rp.instance = cp->instance;
rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
@@ -8262,6 +8345,49 @@ unlock:
return err;
}
+static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
+ struct mgmt_rp_add_advertising rp;
+
+ add_adv_complete(hdev, cmd->sk, cp->instance, err);
+
+ memset(&rp, 0, sizeof(rp));
+
+ rp.instance = cp->instance;
+
+ if (err)
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(err));
+ else
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(err), &rp, sizeof(rp));
+
+ mgmt_pending_free(cmd);
+}
+
+static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
+ int err;
+
+ if (ext_adv_capable(hdev)) {
+ err = hci_update_adv_data_sync(hdev, cp->instance);
+ if (err)
+ return err;
+
+ err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
+ if (err)
+ return err;
+
+ return hci_enable_ext_advertising_sync(hdev, cp->instance);
+ }
+
+ return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
+}
+
static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
@@ -8272,7 +8398,6 @@ static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
struct adv_info *adv_instance;
int err = 0;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
BT_DBG("%s", hdev->name);
@@ -8314,78 +8439,52 @@ static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
cp->data, cp->scan_rsp_len,
cp->data + cp->adv_data_len);
- /* We're good to go, update advertising data, parameters, and start
- * advertising.
- */
-
- hci_req_init(&req, hdev);
-
- hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
-
- if (ext_adv_capable(hdev)) {
- __hci_req_update_adv_data(&req, cp->instance);
- __hci_req_update_scan_rsp_data(&req, cp->instance);
- __hci_req_enable_ext_advertising(&req, cp->instance);
-
- } else {
- /* If using software rotation, determine next instance to use */
-
- if (hdev->cur_adv_instance == cp->instance) {
- /* If the currently advertised instance is being changed
- * then cancel the current advertising and schedule the
- * next instance. If there is only one instance then the
- * overridden advertising data will be visible right
- * away
- */
- cancel_adv_timeout(hdev);
-
- next_instance = hci_get_next_instance(hdev,
- cp->instance);
- if (next_instance)
- schedule_instance = next_instance->instance;
- } else if (!hdev->adv_instance_timeout) {
- /* Immediately advertise the new instance if no other
- * instance is currently being advertised.
- */
- schedule_instance = cp->instance;
- }
+ /* If using software rotation, determine next instance to use */
+ if (hdev->cur_adv_instance == cp->instance) {
+ /* If the currently advertised instance is being changed
+ * then cancel the current advertising and schedule the
+ * next instance. If there is only one instance then the
+ * overridden advertising data will be visible right
+ * away
+ */
+ cancel_adv_timeout(hdev);
- /* If the HCI_ADVERTISING flag is set or there is no instance to
- * be advertised then we have no HCI communication to make.
- * Simply return.
+ next_instance = hci_get_next_instance(hdev, cp->instance);
+ if (next_instance)
+ schedule_instance = next_instance->instance;
+ } else if (!hdev->adv_instance_timeout) {
+ /* Immediately advertise the new instance if no other
+ * instance is currently being advertised.
*/
- if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
- !schedule_instance) {
- if (adv_instance->pending) {
- mgmt_advertising_added(sk, hdev, cp->instance);
- adv_instance->pending = false;
- }
- rp.instance = cp->instance;
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_ADD_EXT_ADV_DATA,
- MGMT_STATUS_SUCCESS, &rp,
- sizeof(rp));
- goto unlock;
- }
+ schedule_instance = cp->instance;
+ }
- err = __hci_req_schedule_adv_instance(&req, schedule_instance,
- true);
+ /* If the HCI_ADVERTISING flag is set or there is no instance to
+ * be advertised then we have no HCI communication to make.
+ * Simply return.
+ */
+ if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
+ if (adv_instance->pending) {
+ mgmt_advertising_added(sk, hdev, cp->instance);
+ adv_instance->pending = false;
+ }
+ rp.instance = cp->instance;
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
+ goto unlock;
}
- cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
+ cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
data_len);
if (!cmd) {
err = -ENOMEM;
goto clear_new_instance;
}
- if (!err)
- err = hci_req_run(&req, add_advertising_complete);
-
+ err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
+ add_ext_adv_data_complete);
if (err < 0) {
- err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
- MGMT_STATUS_FAILED);
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
goto clear_new_instance;
}
@@ -8408,54 +8507,53 @@ unlock:
return err;
}
-static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
- u16 opcode)
+static void remove_advertising_complete(struct hci_dev *hdev, void *data,
+ int err)
{
- struct mgmt_pending_cmd *cmd;
- struct mgmt_cp_remove_advertising *cp;
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_remove_advertising *cp = cmd->param;
struct mgmt_rp_remove_advertising rp;
- bt_dev_dbg(hdev, "status %u", status);
+ bt_dev_dbg(hdev, "err %d", err);
- hci_dev_lock(hdev);
+ memset(&rp, 0, sizeof(rp));
+ rp.instance = cp->instance;
- /* A failure status here only means that we failed to disable
- * advertising. Otherwise, the advertising instance has been removed,
- * so report success.
- */
- cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
- if (!cmd)
- goto unlock;
+ if (err)
+ mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
+ mgmt_status(err));
+ else
+ mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
+ MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
- cp = cmd->param;
- rp.instance = cp->instance;
+ mgmt_pending_free(cmd);
+}
- mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
- &rp, sizeof(rp));
- mgmt_pending_remove(cmd);
+static int remove_advertising_sync(struct hci_dev *hdev, void *data)
+{
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_remove_advertising *cp = cmd->param;
+ int err;
-unlock:
- hci_dev_unlock(hdev);
+ err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
+ if (err)
+ return err;
+
+ if (list_empty(&hdev->adv_instances))
+ err = hci_disable_advertising_sync(hdev);
+
+ return err;
}
static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
void *data, u16 data_len)
{
struct mgmt_cp_remove_advertising *cp = data;
- struct mgmt_rp_remove_advertising rp;
struct mgmt_pending_cmd *cmd;
- struct hci_request req;
int err;
bt_dev_dbg(hdev, "sock %p", sk);
- /* Enabling the experimental LL Privay support disables support for
- * advertising.
- */
- if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
- MGMT_STATUS_NOT_SUPPORTED);
-
hci_dev_lock(hdev);
if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
@@ -8479,44 +8577,17 @@ static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
goto unlock;
}
- hci_req_init(&req, hdev);
-
- /* If we use extended advertising, instance is disabled and removed */
- if (ext_adv_capable(hdev)) {
- __hci_req_disable_ext_adv_instance(&req, cp->instance);
- __hci_req_remove_ext_adv_instance(&req, cp->instance);
- }
-
- hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
-
- if (list_empty(&hdev->adv_instances))
- __hci_req_disable_advertising(&req);
-
- /* If no HCI commands have been collected so far or the HCI_ADVERTISING
- * flag is set or the device isn't powered then we have no HCI
- * communication to make. Simply return.
- */
- if (skb_queue_empty(&req.cmd_q) ||
- !hdev_is_powered(hdev) ||
- hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
- hci_req_purge(&req);
- rp.instance = cp->instance;
- err = mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_REMOVE_ADVERTISING,
- MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
- goto unlock;
- }
-
- cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
+ cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
data_len);
if (!cmd) {
err = -ENOMEM;
goto unlock;
}
- err = hci_req_run(&req, remove_advertising_complete);
+ err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
+ remove_advertising_complete);
if (err < 0)
- mgmt_pending_remove(cmd);
+ mgmt_pending_free(cmd);
unlock:
hci_dev_unlock(hdev);
@@ -8758,31 +8829,6 @@ void mgmt_index_removed(struct hci_dev *hdev)
HCI_MGMT_EXT_INDEX_EVENTS);
}
-/* This function requires the caller holds hdev->lock */
-static void restart_le_actions(struct hci_dev *hdev)
-{
- struct hci_conn_params *p;
-
- list_for_each_entry(p, &hdev->le_conn_params, list) {
- /* Needed for AUTO_OFF case where might not "really"
- * have been powered off.
- */
- list_del_init(&p->action);
-
- switch (p->auto_connect) {
- case HCI_AUTO_CONN_DIRECT:
- case HCI_AUTO_CONN_ALWAYS:
- list_add(&p->action, &hdev->pend_le_conns);
- break;
- case HCI_AUTO_CONN_REPORT:
- list_add(&p->action, &hdev->pend_le_reports);
- break;
- default:
- break;
- }
- }
-}
-
void mgmt_power_on(struct hci_dev *hdev, int err)
{
struct cmd_lookup match = { NULL, hdev };
@@ -8793,7 +8839,7 @@ void mgmt_power_on(struct hci_dev *hdev, int err)
if (!err) {
restart_le_actions(hdev);
- hci_update_background_scan(hdev);
+ hci_update_passive_scan(hdev);
}
mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
@@ -9008,11 +9054,19 @@ void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
u8 *name, u8 name_len)
{
- char buf[512];
- struct mgmt_ev_device_connected *ev = (void *) buf;
+ struct sk_buff *skb;
+ struct mgmt_ev_device_connected *ev;
u16 eir_len = 0;
u32 flags = 0;
+ if (conn->le_adv_data_len > 0)
+ skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
+ conn->le_adv_data_len);
+ else
+ skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
+ 2 + name_len + 5);
+
+ ev = skb_put(skb, sizeof(*ev));
bacpy(&ev->addr.bdaddr, &conn->dst);
ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
@@ -9026,24 +9080,26 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
* adding any BR/EDR data to the LE adv.
*/
if (conn->le_adv_data_len > 0) {
- memcpy(&ev->eir[eir_len],
- conn->le_adv_data, conn->le_adv_data_len);
+ skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
eir_len = conn->le_adv_data_len;
} else {
- if (name_len > 0)
+ if (name_len > 0) {
eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
name, name_len);
+ skb_put(skb, eir_len);
+ }
- if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
+ if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) {
eir_len = eir_append_data(ev->eir, eir_len,
EIR_CLASS_OF_DEV,
conn->dev_class, 3);
+ skb_put(skb, 5);
+ }
}
ev->eir_len = cpu_to_le16(eir_len);
- mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
- sizeof(*ev) + eir_len, NULL);
+ mgmt_event_skb(skb, NULL);
}
static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
@@ -9349,74 +9405,6 @@ void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
sock_put(match.sk);
}
-static void clear_eir(struct hci_request *req)
-{
- struct hci_dev *hdev = req->hdev;
- struct hci_cp_write_eir cp;
-
- if (!lmp_ext_inq_capable(hdev))
- return;
-
- memset(hdev->eir, 0, sizeof(hdev->eir));
-
- memset(&cp, 0, sizeof(cp));
-
- hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
-}
-
-void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
-{
- struct cmd_lookup match = { NULL, hdev };
- struct hci_request req;
- bool changed = false;
-
- if (status) {
- u8 mgmt_err = mgmt_status(status);
-
- if (enable && hci_dev_test_and_clear_flag(hdev,
- HCI_SSP_ENABLED)) {
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
- new_settings(hdev, NULL);
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
- &mgmt_err);
- return;
- }
-
- if (enable) {
- changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
- } else {
- changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
- if (!changed)
- changed = hci_dev_test_and_clear_flag(hdev,
- HCI_HS_ENABLED);
- else
- hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
- }
-
- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
-
- if (changed)
- new_settings(hdev, match.sk);
-
- if (match.sk)
- sock_put(match.sk);
-
- hci_req_init(&req, hdev);
-
- if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
- if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
- hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
- sizeof(enable), &enable);
- __hci_req_update_eir(&req);
- } else {
- clear_eir(&req);
- }
-
- hci_req_run(&req, NULL);
-}
-
static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
{
struct cmd_lookup *match = data;
@@ -9605,9 +9593,8 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
{
- char buf[512];
- struct mgmt_ev_device_found *ev = (void *)buf;
- size_t ev_size;
+ struct sk_buff *skb;
+ struct mgmt_ev_device_found *ev;
/* Don't send events for a non-kernel initiated discovery. With
* LE one exception is if we have pend_le_reports > 0 in which
@@ -9642,13 +9629,13 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
}
}
- /* Make sure that the buffer is big enough. The 5 extra bytes
- * are for the potential CoD field.
- */
- if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
+ /* Allocate skb. The 5 extra bytes are for the potential CoD field */
+ skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
+ sizeof(*ev) + eir_len + scan_rsp_len + 5);
+ if (!skb)
return;
- memset(buf, 0, sizeof(buf));
+ ev = skb_put(skb, sizeof(*ev));
/* In case of device discovery with BR/EDR devices (pre 1.2), the
* RSSI value was reported as 0 when not available. This behavior
@@ -9669,44 +9656,57 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
if (eir_len > 0)
/* Copy EIR or advertising data into event */
- memcpy(ev->eir, eir, eir_len);
+ skb_put_data(skb, eir, eir_len);
- if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
- NULL))
- eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
- dev_class, 3);
+ if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
+ u8 eir_cod[5];
+
+ eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
+ dev_class, 3);
+ skb_put_data(skb, eir_cod, sizeof(eir_cod));
+ }
if (scan_rsp_len > 0)
/* Append scan response data to event */
- memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
+ skb_put_data(skb, scan_rsp, scan_rsp_len);
ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
- ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
- mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
+ mgmt_event_skb(skb, NULL);
}
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len)
{
+ struct sk_buff *skb;
struct mgmt_ev_device_found *ev;
- char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
u16 eir_len;
+ u32 flags;
- ev = (struct mgmt_ev_device_found *) buf;
-
- memset(buf, 0, sizeof(buf));
+ if (name_len)
+ skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len);
+ else
+ skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0);
+ ev = skb_put(skb, sizeof(*ev));
bacpy(&ev->addr.bdaddr, bdaddr);
ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
- eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
- name_len);
+ if (name) {
+ eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
+ name_len);
+ flags = 0;
+ skb_put(skb, eir_len);
+ } else {
+ eir_len = 0;
+ flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
+ }
ev->eir_len = cpu_to_le16(eir_len);
+ ev->flags = cpu_to_le32(flags);
- mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
+ mgmt_event_skb(skb, NULL);
}
void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
index 0d0a6d77b9e8..edee60bbc7b4 100644
--- a/net/bluetooth/mgmt_util.c
+++ b/net/bluetooth/mgmt_util.c
@@ -56,40 +56,72 @@ static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie,
return skb;
}
-int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
- void *data, u16 data_len, int flag, struct sock *skip_sk)
+struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode,
+ unsigned int size)
{
struct sk_buff *skb;
- struct mgmt_hdr *hdr;
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
+ skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL);
if (!skb)
- return -ENOMEM;
+ return skb;
- hdr = skb_put(skb, sizeof(*hdr));
- hdr->opcode = cpu_to_le16(event);
- if (hdev)
- hdr->index = cpu_to_le16(hdev->id);
- else
- hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
- hdr->len = cpu_to_le16(data_len);
+ skb_reserve(skb, sizeof(struct mgmt_hdr));
+ bt_cb(skb)->mgmt.hdev = hdev;
+ bt_cb(skb)->mgmt.opcode = opcode;
- if (data)
- skb_put_data(skb, data, data_len);
+ return skb;
+}
+
+int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
+ struct sock *skip_sk)
+{
+ struct hci_dev *hdev;
+ struct mgmt_hdr *hdr;
+ int len = skb->len;
+
+ if (!skb)
+ return -EINVAL;
+
+ hdev = bt_cb(skb)->mgmt.hdev;
/* Time stamp */
__net_timestamp(skb);
- hci_send_to_channel(channel, skb, flag, skip_sk);
-
+ /* Send just the data, without headers, to the monitor */
if (channel == HCI_CHANNEL_CONTROL)
- hci_send_monitor_ctrl_event(hdev, event, data, data_len,
+ hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode,
+ skb->data, skb->len,
skb_get_ktime(skb), flag, skip_sk);
+ hdr = skb_push(skb, sizeof(*hdr));
+ hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode);
+ if (hdev)
+ hdr->index = cpu_to_le16(hdev->id);
+ else
+ hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
+ hdr->len = cpu_to_le16(len);
+
+ hci_send_to_channel(channel, skb, flag, skip_sk);
+
kfree_skb(skb);
return 0;
}
+int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
+ void *data, u16 data_len, int flag, struct sock *skip_sk)
+{
+ struct sk_buff *skb;
+
+ skb = mgmt_alloc_skb(hdev, event, data_len);
+ if (!skb)
+ return -ENOMEM;
+
+ if (data)
+ skb_put_data(skb, data, data_len);
+
+ return mgmt_send_event_skb(channel, skb, flag, skip_sk);
+}
+
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
{
struct sk_buff *skb, *mskb;
@@ -227,7 +259,7 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
}
}
-struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
struct hci_dev *hdev,
void *data, u16 len)
{
@@ -251,6 +283,19 @@ struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
cmd->sk = sk;
sock_hold(sk);
+ return cmd;
+}
+
+struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
+ struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_pending_cmd *cmd;
+
+ cmd = mgmt_pending_new(sk, opcode, hdev, data, len);
+ if (!cmd)
+ return NULL;
+
list_add(&cmd->list, &hdev->mgmt_pending);
return cmd;
diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
index 6559f189213c..98e40395a383 100644
--- a/net/bluetooth/mgmt_util.h
+++ b/net/bluetooth/mgmt_util.h
@@ -27,10 +27,15 @@ struct mgmt_pending_cmd {
void *param;
size_t param_len;
struct sock *sk;
+ struct sk_buff *skb;
void *user_data;
int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
};
+struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode,
+ unsigned int size);
+int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag,
+ struct sock *skip_sk);
int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel,
void *data, u16 data_len, int flag, struct sock *skip_sk);
int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status);
@@ -49,5 +54,8 @@ void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
struct hci_dev *hdev,
void *data, u16 len);
+struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
+ struct hci_dev *hdev,
+ void *data, u16 len);
void mgmt_pending_free(struct mgmt_pending_cmd *cmd);
void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);
diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c
index 255cffa554ee..6a943634b31a 100644
--- a/net/bluetooth/msft.c
+++ b/net/bluetooth/msft.c
@@ -93,7 +93,7 @@ struct msft_data {
struct list_head handle_map;
__u16 pending_add_handle;
__u16 pending_remove_handle;
- __u8 reregistering;
+ __u8 resuming;
__u8 suspending;
__u8 filter_enabled;
};
@@ -156,7 +156,6 @@ failed:
return false;
}
-/* This function requires the caller holds hdev->lock */
static void reregister_monitor(struct hci_dev *hdev, int handle)
{
struct adv_monitor *monitor;
@@ -166,9 +165,9 @@ static void reregister_monitor(struct hci_dev *hdev, int handle)
while (1) {
monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
if (!monitor) {
- /* All monitors have been reregistered */
- msft->reregistering = false;
- hci_update_background_scan(hdev);
+ /* All monitors have been resumed */
+ msft->resuming = false;
+ hci_update_passive_scan(hdev);
return;
}
@@ -185,67 +184,317 @@ static void reregister_monitor(struct hci_dev *hdev, int handle)
}
}
-/* This function requires the caller holds hdev->lock */
-static void remove_monitor_on_suspend(struct hci_dev *hdev, int handle)
+/* is_mgmt = true matches the handle exposed to userspace via mgmt.
+ * is_mgmt = false matches the handle used by the msft controller.
+ * This function requires the caller holds hdev->lock
+ */
+static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
+ (struct hci_dev *hdev, u16 handle, bool is_mgmt)
+{
+ struct msft_monitor_advertisement_handle_data *entry;
+ struct msft_data *msft = hdev->msft_data;
+
+ list_for_each_entry(entry, &msft->handle_map, list) {
+ if (is_mgmt && entry->mgmt_handle == handle)
+ return entry;
+ if (!is_mgmt && entry->msft_handle == handle)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev,
+ u8 status, u16 opcode,
+ struct sk_buff *skb)
+{
+ struct msft_rp_le_monitor_advertisement *rp;
+ struct adv_monitor *monitor;
+ struct msft_monitor_advertisement_handle_data *handle_data;
+ struct msft_data *msft = hdev->msft_data;
+
+ hci_dev_lock(hdev);
+
+ monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle);
+ if (!monitor) {
+ bt_dev_err(hdev, "msft add advmon: monitor %u is not found!",
+ msft->pending_add_handle);
+ status = HCI_ERROR_UNSPECIFIED;
+ goto unlock;
+ }
+
+ if (status)
+ goto unlock;
+
+ rp = (struct msft_rp_le_monitor_advertisement *)skb->data;
+ if (skb->len < sizeof(*rp)) {
+ status = HCI_ERROR_UNSPECIFIED;
+ goto unlock;
+ }
+
+ handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL);
+ if (!handle_data) {
+ status = HCI_ERROR_UNSPECIFIED;
+ goto unlock;
+ }
+
+ handle_data->mgmt_handle = monitor->handle;
+ handle_data->msft_handle = rp->handle;
+ INIT_LIST_HEAD(&handle_data->list);
+ list_add(&handle_data->list, &msft->handle_map);
+
+ monitor->state = ADV_MONITOR_STATE_OFFLOADED;
+
+unlock:
+ if (status && monitor)
+ hci_free_adv_monitor(hdev, monitor);
+
+ hci_dev_unlock(hdev);
+
+ if (!msft->resuming)
+ hci_add_adv_patterns_monitor_complete(hdev, status);
+}
+
+static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
+ u8 status, u16 opcode,
+ struct sk_buff *skb)
{
+ struct msft_cp_le_cancel_monitor_advertisement *cp;
+ struct msft_rp_le_cancel_monitor_advertisement *rp;
struct adv_monitor *monitor;
+ struct msft_monitor_advertisement_handle_data *handle_data;
struct msft_data *msft = hdev->msft_data;
int err;
+ bool pending;
- while (1) {
- monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
- if (!monitor) {
- /* All monitors have been removed */
- msft->suspending = false;
- hci_update_background_scan(hdev);
+ if (status)
+ goto done;
+
+ rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
+ if (skb->len < sizeof(*rp)) {
+ status = HCI_ERROR_UNSPECIFIED;
+ goto done;
+ }
+
+ hci_dev_lock(hdev);
+
+ cp = hci_sent_cmd_data(hdev, hdev->msft_opcode);
+ handle_data = msft_find_handle_data(hdev, cp->handle, false);
+
+ if (handle_data) {
+ monitor = idr_find(&hdev->adv_monitors_idr,
+ handle_data->mgmt_handle);
+
+ if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED)
+ monitor->state = ADV_MONITOR_STATE_REGISTERED;
+
+ /* Do not free the monitor if it is being removed due to
+ * suspend. It will be re-monitored on resume.
+ */
+ if (monitor && !msft->suspending)
+ hci_free_adv_monitor(hdev, monitor);
+
+ list_del(&handle_data->list);
+ kfree(handle_data);
+ }
+
+ /* If remove all monitors is required, we need to continue the process
+ * here because the earlier it was paused when waiting for the
+ * response from controller.
+ */
+ if (msft->pending_remove_handle == 0) {
+ pending = hci_remove_all_adv_monitor(hdev, &err);
+ if (pending) {
+ hci_dev_unlock(hdev);
return;
}
- msft->pending_remove_handle = (u16)handle;
- err = __msft_remove_monitor(hdev, monitor, handle);
+ if (err)
+ status = HCI_ERROR_UNSPECIFIED;
+ }
- /* If success, return and wait for monitor removed callback */
- if (!err)
- return;
+ hci_dev_unlock(hdev);
+
+done:
+ if (!msft->suspending)
+ hci_remove_adv_monitor_complete(hdev, status);
+}
+
+static int msft_remove_monitor_sync(struct hci_dev *hdev,
+ struct adv_monitor *monitor)
+{
+ struct msft_cp_le_cancel_monitor_advertisement cp;
+ struct msft_monitor_advertisement_handle_data *handle_data;
+ struct sk_buff *skb;
+ u8 status;
+
+ handle_data = msft_find_handle_data(hdev, monitor->handle, true);
+
+ /* If no matched handle, just remove without telling controller */
+ if (!handle_data)
+ return -ENOENT;
+
+ cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT;
+ cp.handle = handle_data->msft_handle;
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp,
+ HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ status = skb->data[0];
+ skb_pull(skb, 1);
+
+ msft_le_cancel_monitor_advertisement_cb(hdev, status, hdev->msft_opcode,
+ skb);
+
+ return status;
+}
+
+/* This function requires the caller holds hci_req_sync_lock */
+int msft_suspend_sync(struct hci_dev *hdev)
+{
+ struct msft_data *msft = hdev->msft_data;
+ struct adv_monitor *monitor;
+ int handle = 0;
+
+ if (!msft || !msft_monitor_supported(hdev))
+ return 0;
+
+ msft->suspending = true;
+
+ while (1) {
+ monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
+ if (!monitor)
+ break;
+
+ msft_remove_monitor_sync(hdev, monitor);
- /* Otherwise free the monitor and keep removing */
- hci_free_adv_monitor(hdev, monitor);
handle++;
}
+
+ /* All monitors have been removed */
+ msft->suspending = false;
+
+ return 0;
}
-/* This function requires the caller holds hdev->lock */
-void msft_suspend(struct hci_dev *hdev)
+static bool msft_monitor_rssi_valid(struct adv_monitor *monitor)
{
- struct msft_data *msft = hdev->msft_data;
+ struct adv_rssi_thresholds *r = &monitor->rssi;
- if (!msft)
- return;
+ if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
+ r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX ||
+ r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
+ r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX)
+ return false;
- if (msft_monitor_supported(hdev)) {
- msft->suspending = true;
- /* Quitely remove all monitors on suspend to avoid waking up
- * the system.
- */
- remove_monitor_on_suspend(hdev, 0);
+ /* High_threshold_timeout is not supported,
+ * once high_threshold is reached, events are immediately reported.
+ */
+ if (r->high_threshold_timeout != 0)
+ return false;
+
+ if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX)
+ return false;
+
+ /* Sampling period from 0x00 to 0xFF are all allowed */
+ return true;
+}
+
+static bool msft_monitor_pattern_valid(struct adv_monitor *monitor)
+{
+ return msft_monitor_rssi_valid(monitor);
+ /* No additional check needed for pattern-based monitor */
+}
+
+static int msft_add_monitor_sync(struct hci_dev *hdev,
+ struct adv_monitor *monitor)
+{
+ struct msft_cp_le_monitor_advertisement *cp;
+ struct msft_le_monitor_advertisement_pattern_data *pattern_data;
+ struct msft_le_monitor_advertisement_pattern *pattern;
+ struct adv_pattern *entry;
+ size_t total_size = sizeof(*cp) + sizeof(*pattern_data);
+ ptrdiff_t offset = 0;
+ u8 pattern_count = 0;
+ struct sk_buff *skb;
+ u8 status;
+
+ if (!msft_monitor_pattern_valid(monitor))
+ return -EINVAL;
+
+ list_for_each_entry(entry, &monitor->patterns, list) {
+ pattern_count++;
+ total_size += sizeof(*pattern) + entry->length;
}
+
+ cp = kmalloc(total_size, GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT;
+ cp->rssi_high = monitor->rssi.high_threshold;
+ cp->rssi_low = monitor->rssi.low_threshold;
+ cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout;
+ cp->rssi_sampling_period = monitor->rssi.sampling_period;
+
+ cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN;
+
+ pattern_data = (void *)cp->data;
+ pattern_data->count = pattern_count;
+
+ list_for_each_entry(entry, &monitor->patterns, list) {
+ pattern = (void *)(pattern_data->data + offset);
+ /* the length also includes data_type and offset */
+ pattern->length = entry->length + 2;
+ pattern->data_type = entry->ad_type;
+ pattern->start_byte = entry->offset;
+ memcpy(pattern->pattern, entry->value, entry->length);
+ offset += sizeof(*pattern) + entry->length;
+ }
+
+ skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp,
+ HCI_CMD_TIMEOUT);
+ kfree(cp);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ status = skb->data[0];
+ skb_pull(skb, 1);
+
+ msft_le_monitor_advertisement_cb(hdev, status, hdev->msft_opcode, skb);
+
+ return status;
}
-/* This function requires the caller holds hdev->lock */
-void msft_resume(struct hci_dev *hdev)
+/* This function requires the caller holds hci_req_sync_lock */
+int msft_resume_sync(struct hci_dev *hdev)
{
struct msft_data *msft = hdev->msft_data;
+ struct adv_monitor *monitor;
+ int handle = 0;
- if (!msft)
- return;
+ if (!msft || !msft_monitor_supported(hdev))
+ return 0;
- if (msft_monitor_supported(hdev)) {
- msft->reregistering = true;
- /* Monitors are removed on suspend, so we need to add all
- * monitors on resume.
- */
- reregister_monitor(hdev, 0);
+ msft->resuming = true;
+
+ while (1) {
+ monitor = idr_get_next(&hdev->adv_monitors_idr, &handle);
+ if (!monitor)
+ break;
+
+ msft_add_monitor_sync(hdev, monitor);
+
+ handle++;
}
+
+ /* All monitors have been resumed */
+ msft->resuming = false;
+
+ return 0;
}
void msft_do_open(struct hci_dev *hdev)
@@ -275,7 +524,7 @@ void msft_do_open(struct hci_dev *hdev)
}
if (msft_monitor_supported(hdev)) {
- msft->reregistering = true;
+ msft->resuming = true;
msft_set_filter_enable(hdev, true);
/* Monitors get removed on power off, so we need to explicitly
* tell the controller to re-monitor.
@@ -341,7 +590,7 @@ void msft_unregister(struct hci_dev *hdev)
kfree(msft);
}
-void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb)
+void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb)
{
struct msft_data *msft = hdev->msft_data;
u8 event;
@@ -381,151 +630,6 @@ __u64 msft_get_features(struct hci_dev *hdev)
return msft ? msft->features : 0;
}
-/* is_mgmt = true matches the handle exposed to userspace via mgmt.
- * is_mgmt = false matches the handle used by the msft controller.
- * This function requires the caller holds hdev->lock
- */
-static struct msft_monitor_advertisement_handle_data *msft_find_handle_data
- (struct hci_dev *hdev, u16 handle, bool is_mgmt)
-{
- struct msft_monitor_advertisement_handle_data *entry;
- struct msft_data *msft = hdev->msft_data;
-
- list_for_each_entry(entry, &msft->handle_map, list) {
- if (is_mgmt && entry->mgmt_handle == handle)
- return entry;
- if (!is_mgmt && entry->msft_handle == handle)
- return entry;
- }
-
- return NULL;
-}
-
-static void msft_le_monitor_advertisement_cb(struct hci_dev *hdev,
- u8 status, u16 opcode,
- struct sk_buff *skb)
-{
- struct msft_rp_le_monitor_advertisement *rp;
- struct adv_monitor *monitor;
- struct msft_monitor_advertisement_handle_data *handle_data;
- struct msft_data *msft = hdev->msft_data;
-
- hci_dev_lock(hdev);
-
- monitor = idr_find(&hdev->adv_monitors_idr, msft->pending_add_handle);
- if (!monitor) {
- bt_dev_err(hdev, "msft add advmon: monitor %u is not found!",
- msft->pending_add_handle);
- status = HCI_ERROR_UNSPECIFIED;
- goto unlock;
- }
-
- if (status)
- goto unlock;
-
- rp = (struct msft_rp_le_monitor_advertisement *)skb->data;
- if (skb->len < sizeof(*rp)) {
- status = HCI_ERROR_UNSPECIFIED;
- goto unlock;
- }
-
- handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL);
- if (!handle_data) {
- status = HCI_ERROR_UNSPECIFIED;
- goto unlock;
- }
-
- handle_data->mgmt_handle = monitor->handle;
- handle_data->msft_handle = rp->handle;
- INIT_LIST_HEAD(&handle_data->list);
- list_add(&handle_data->list, &msft->handle_map);
-
- monitor->state = ADV_MONITOR_STATE_OFFLOADED;
-
-unlock:
- if (status && monitor)
- hci_free_adv_monitor(hdev, monitor);
-
- /* If in restart/reregister sequence, keep registering. */
- if (msft->reregistering)
- reregister_monitor(hdev, msft->pending_add_handle + 1);
-
- hci_dev_unlock(hdev);
-
- if (!msft->reregistering)
- hci_add_adv_patterns_monitor_complete(hdev, status);
-}
-
-static void msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev,
- u8 status, u16 opcode,
- struct sk_buff *skb)
-{
- struct msft_cp_le_cancel_monitor_advertisement *cp;
- struct msft_rp_le_cancel_monitor_advertisement *rp;
- struct adv_monitor *monitor;
- struct msft_monitor_advertisement_handle_data *handle_data;
- struct msft_data *msft = hdev->msft_data;
- int err;
- bool pending;
-
- if (status)
- goto done;
-
- rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data;
- if (skb->len < sizeof(*rp)) {
- status = HCI_ERROR_UNSPECIFIED;
- goto done;
- }
-
- hci_dev_lock(hdev);
-
- cp = hci_sent_cmd_data(hdev, hdev->msft_opcode);
- handle_data = msft_find_handle_data(hdev, cp->handle, false);
-
- if (handle_data) {
- monitor = idr_find(&hdev->adv_monitors_idr,
- handle_data->mgmt_handle);
-
- if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED)
- monitor->state = ADV_MONITOR_STATE_REGISTERED;
-
- /* Do not free the monitor if it is being removed due to
- * suspend. It will be re-monitored on resume.
- */
- if (monitor && !msft->suspending)
- hci_free_adv_monitor(hdev, monitor);
-
- list_del(&handle_data->list);
- kfree(handle_data);
- }
-
- /* If in suspend/remove sequence, keep removing. */
- if (msft->suspending)
- remove_monitor_on_suspend(hdev,
- msft->pending_remove_handle + 1);
-
- /* If remove all monitors is required, we need to continue the process
- * here because the earlier it was paused when waiting for the
- * response from controller.
- */
- if (msft->pending_remove_handle == 0) {
- pending = hci_remove_all_adv_monitor(hdev, &err);
- if (pending) {
- hci_dev_unlock(hdev);
- return;
- }
-
- if (err)
- status = HCI_ERROR_UNSPECIFIED;
- }
-
- hci_dev_unlock(hdev);
-
-done:
- if (!msft->suspending)
- hci_remove_adv_monitor_complete(hdev, status);
-}
-
static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev,
u8 status, u16 opcode,
struct sk_buff *skb)
@@ -560,35 +664,6 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static bool msft_monitor_rssi_valid(struct adv_monitor *monitor)
-{
- struct adv_rssi_thresholds *r = &monitor->rssi;
-
- if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
- r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX ||
- r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN ||
- r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX)
- return false;
-
- /* High_threshold_timeout is not supported,
- * once high_threshold is reached, events are immediately reported.
- */
- if (r->high_threshold_timeout != 0)
- return false;
-
- if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX)
- return false;
-
- /* Sampling period from 0x00 to 0xFF are all allowed */
- return true;
-}
-
-static bool msft_monitor_pattern_valid(struct adv_monitor *monitor)
-{
- return msft_monitor_rssi_valid(monitor);
- /* No additional check needed for pattern-based monitor */
-}
-
/* This function requires the caller holds hdev->lock */
static int __msft_add_monitor_pattern(struct hci_dev *hdev,
struct adv_monitor *monitor)
@@ -656,7 +731,7 @@ int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor)
if (!msft)
return -EOPNOTSUPP;
- if (msft->reregistering || msft->suspending)
+ if (msft->resuming || msft->suspending)
return -EBUSY;
return __msft_add_monitor_pattern(hdev, monitor);
@@ -700,7 +775,7 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
if (!msft)
return -EOPNOTSUPP;
- if (msft->reregistering || msft->suspending)
+ if (msft->resuming || msft->suspending)
return -EBUSY;
return __msft_remove_monitor(hdev, monitor, handle);
diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h
index 59c6e081c789..afcaf7d3b1cb 100644
--- a/net/bluetooth/msft.h
+++ b/net/bluetooth/msft.h
@@ -17,15 +17,15 @@ void msft_register(struct hci_dev *hdev);
void msft_unregister(struct hci_dev *hdev);
void msft_do_open(struct hci_dev *hdev);
void msft_do_close(struct hci_dev *hdev);
-void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb);
+void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb);
__u64 msft_get_features(struct hci_dev *hdev);
int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor);
int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
u16 handle);
void msft_req_add_set_filter_enable(struct hci_request *req, bool enable);
int msft_set_filter_enable(struct hci_dev *hdev, bool enable);
-void msft_suspend(struct hci_dev *hdev);
-void msft_resume(struct hci_dev *hdev);
+int msft_suspend_sync(struct hci_dev *hdev);
+int msft_resume_sync(struct hci_dev *hdev);
bool msft_curve_validity(struct hci_dev *hdev);
#else
@@ -39,7 +39,8 @@ static inline void msft_register(struct hci_dev *hdev) {}
static inline void msft_unregister(struct hci_dev *hdev) {}
static inline void msft_do_open(struct hci_dev *hdev) {}
static inline void msft_do_close(struct hci_dev *hdev) {}
-static inline void msft_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb) {}
+static inline void msft_vendor_evt(struct hci_dev *hdev, void *data,
+ struct sk_buff *skb) {}
static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; }
static inline int msft_add_monitor_pattern(struct hci_dev *hdev,
struct adv_monitor *monitor)
@@ -61,8 +62,15 @@ static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable)
return -EOPNOTSUPP;
}
-static inline void msft_suspend(struct hci_dev *hdev) {}
-static inline void msft_resume(struct hci_dev *hdev) {}
+static inline int msft_suspend_sync(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int msft_resume_sync(struct hci_dev *hdev)
+{
+ return -EOPNOTSUPP;
+}
static inline bool msft_curve_validity(struct hci_dev *hdev)
{
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c1183fef1f21..a52ad81596b7 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -274,7 +274,7 @@ static void destroy_nbp(struct net_bridge_port *p)
p->br = NULL;
p->dev = NULL;
- dev_put(dev);
+ dev_put_track(dev, &p->dev_tracker);
kobject_put(&p->kobj);
}
@@ -397,10 +397,10 @@ static int find_portno(struct net_bridge *br)
if (!inuse)
return -ENOMEM;
- set_bit(0, inuse); /* zero is reserved */
- list_for_each_entry(p, &br->port_list, list) {
- set_bit(p->port_no, inuse);
- }
+ __set_bit(0, inuse); /* zero is reserved */
+ list_for_each_entry(p, &br->port_list, list)
+ __set_bit(p->port_no, inuse);
+
index = find_first_zero_bit(inuse, BR_MAX_PORTS);
bitmap_free(inuse);
@@ -423,7 +423,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
return ERR_PTR(-ENOMEM);
p->br = br;
- dev_hold(dev);
+ dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL);
p->dev = dev;
p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS;
@@ -434,7 +434,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
br_stp_port_timer_init(p);
err = br_multicast_add_port(p);
if (err) {
- dev_put(dev);
+ dev_put_track(dev, &p->dev_tracker);
kfree(p);
p = ERR_PTR(err);
}
@@ -525,8 +525,8 @@ static void br_set_gso_limits(struct net_bridge *br)
gso_max_size = min(gso_max_size, p->dev->gso_max_size);
gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
}
- br->dev->gso_max_size = gso_max_size;
- br->dev->gso_max_segs = gso_max_segs;
+ netif_set_gso_max_size(br->dev, gso_max_size);
+ netif_set_gso_max_segs(br->dev, gso_max_segs);
}
/*
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 891cfcf45644..f213ed108361 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -8,6 +8,7 @@
*/
#include <linux/capability.h>
+#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/if_bridge.h>
#include <linux/netdevice.h>
@@ -102,37 +103,56 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
return ret;
}
+#define BR_UARGS_MAX 4
+static int br_dev_read_uargs(unsigned long *args, size_t nr_args,
+ void __user **argp, void __user *data)
+{
+ int ret;
+
+ if (nr_args < 2 || nr_args > BR_UARGS_MAX)
+ return -EINVAL;
+
+ if (in_compat_syscall()) {
+ unsigned int cargs[BR_UARGS_MAX];
+ int i;
+
+ ret = copy_from_user(cargs, data, nr_args * sizeof(*cargs));
+ if (ret)
+ goto fault;
+
+ for (i = 0; i < nr_args; ++i)
+ args[i] = cargs[i];
+
+ *argp = compat_ptr(args[1]);
+ } else {
+ ret = copy_from_user(args, data, nr_args * sizeof(*args));
+ if (ret)
+ goto fault;
+ *argp = (void __user *)args[1];
+ }
+
+ return 0;
+fault:
+ return -EFAULT;
+}
+
/*
* Legacy ioctl's through SIOCDEVPRIVATE
* This interface is deprecated because it was too difficult
* to do the translation for 32/64bit ioctl compatibility.
*/
-int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
+int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p = NULL;
unsigned long args[4];
void __user *argp;
- int ret = -EOPNOTSUPP;
-
- if (in_compat_syscall()) {
- unsigned int cargs[4];
-
- if (copy_from_user(cargs, data, sizeof(cargs)))
- return -EFAULT;
-
- args[0] = cargs[0];
- args[1] = cargs[1];
- args[2] = cargs[2];
- args[3] = cargs[3];
-
- argp = compat_ptr(args[1]);
- } else {
- if (copy_from_user(args, data, sizeof(args)))
- return -EFAULT;
+ int ret;
- argp = (void __user *)args[1];
- }
+ ret = br_dev_read_uargs(args, ARRAY_SIZE(args), &argp, data);
+ if (ret)
+ return ret;
switch (args[0]) {
case BRCTL_ADD_IF:
@@ -301,6 +321,9 @@ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user
case BRCTL_GET_FDB_ENTRIES:
return get_fdb_entries(br, argp, args[2], args[3]);
+
+ default:
+ ret = -EOPNOTSUPP;
}
if (!ret) {
@@ -313,12 +336,15 @@ int br_dev_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user
return ret;
}
-static int old_deviceless(struct net *net, void __user *uarg)
+static int old_deviceless(struct net *net, void __user *data)
{
unsigned long args[3];
+ void __user *argp;
+ int ret;
- if (copy_from_user(args, uarg, sizeof(args)))
- return -EFAULT;
+ ret = br_dev_read_uargs(args, ARRAY_SIZE(args), &argp, data);
+ if (ret)
+ return ret;
switch (args[0]) {
case BRCTL_GET_VERSION:
@@ -337,7 +363,7 @@ static int old_deviceless(struct net *net, void __user *uarg)
args[2] = get_bridge_ifindices(net, indices, args[2]);
- ret = copy_to_user((void __user *)args[1], indices,
+ ret = copy_to_user(argp, indices,
array_size(args[2], sizeof(int)))
? -EFAULT : args[2];
@@ -353,7 +379,7 @@ static int old_deviceless(struct net *net, void __user *uarg)
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- if (copy_from_user(buf, (void __user *)args[1], IFNAMSIZ))
+ if (copy_from_user(buf, argp, IFNAMSIZ))
return -EFAULT;
buf[IFNAMSIZ-1] = 0;
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index b5af68c105a8..4fd882686b04 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -743,6 +743,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu)
mtu = nf_bridge->frag_max_size;
+ nf_bridge_update_protocol(skb);
+ nf_bridge_push_encap_header(skb);
+
if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) {
nf_bridge_info_free(skb);
return br_dev_queue_push_xmit(net, sk, skb);
@@ -760,8 +763,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
IPCB(skb)->frag_max_size = nf_bridge->frag_max_size;
- nf_bridge_update_protocol(skb);
-
data = this_cpu_ptr(&brnf_frag_data_storage);
if (skb_vlan_tag_present(skb)) {
@@ -789,8 +790,6 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff
IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size;
- nf_bridge_update_protocol(skb);
-
data = this_cpu_ptr(&brnf_frag_data_storage);
data->encap_size = nf_bridge_encap_header_len(skb);
data->size = ETH_HLEN + data->encap_size;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index e8c6ee322c71..2661dda1a92b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -346,6 +346,7 @@ struct net_bridge_mdb_entry {
struct net_bridge_port {
struct net_bridge *br;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
struct list_head list;
unsigned long flags;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 7b0c19772111..3f7ca88c2aa3 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -36,15 +36,14 @@ static ssize_t store_bridge_parm(struct device *d,
struct net_bridge *br = to_bridge(d);
struct netlink_ext_ack extack = {0};
unsigned long val;
- char *endp;
int err;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- val = simple_strtoul(buf, &endp, 0);
- if (endp == buf)
- return -EINVAL;
+ err = kstrtoul(buf, 0, &val);
+ if (err != 0)
+ return err;
if (!rtnl_trylock())
return restart_syscall();
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 49e105e0a447..84ba456a78cc 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1063,7 +1063,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
if (br_vlan_delete(br, old_pvid))
br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
- set_bit(0, changed);
+ __set_bit(0, changed);
}
list_for_each_entry(p, &br->port_list, list) {
@@ -1085,7 +1085,7 @@ int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
if (nbp_vlan_delete(p, old_pvid))
br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
- set_bit(p->port_no, changed);
+ __set_bit(p->port_no, changed);
}
br->default_pvid = pvid;
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 97805ec424c1..c1ef9cc89b78 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -100,6 +100,25 @@ static const struct nft_expr_ops nft_meta_bridge_get_ops = {
.dump = nft_meta_get_dump,
};
+static bool nft_meta_bridge_set_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ int i;
+
+ for (i = 0; i < NFT_REG32_NUM; i++) {
+ if (!track->regs[i].selector)
+ continue;
+
+ if (track->regs[i].selector->ops != &nft_meta_bridge_get_ops)
+ continue;
+
+ track->regs[i].selector = NULL;
+ track->regs[i].bitwise = NULL;
+ }
+
+ return false;
+}
+
static const struct nft_expr_ops nft_meta_bridge_set_ops = {
.type = &nft_meta_bridge_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
@@ -107,6 +126,7 @@ static const struct nft_expr_ops nft_meta_bridge_set_ops = {
.init = nft_meta_set_init,
.destroy = nft_meta_set_destroy,
.dump = nft_meta_set_dump,
+ .reduce = nft_meta_bridge_set_reduce,
.validate = nft_meta_set_validate,
};
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index e12fd3cad619..2b8892d502f7 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+#include <linux/filter.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 40cd57ad0a0f..aee11c74d3c8 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -128,7 +128,6 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
if (pkt != NULL)
cfpkt_destroy(pkt);
layr->incomplete_frm = NULL;
- expectlen = 0;
spin_unlock(&layr->sync);
return -EPROTO;
}
diff --git a/net/can/isotp.c b/net/can/isotp.c
index df6968b28bf4..02cbcb2ecf0d 100644
--- a/net/can/isotp.c
+++ b/net/can/isotp.c
@@ -119,8 +119,8 @@ enum {
};
struct tpcon {
- int idx;
- int len;
+ unsigned int idx;
+ unsigned int len;
u32 state;
u8 bs;
u8 sn;
diff --git a/net/core/Makefile b/net/core/Makefile
index 4268846f2f47..a8e4f737692b 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -11,7 +11,9 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
obj-y += dev.o dev_addr_lists.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o \
sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \
- fib_notifier.o xdp.o flow_offload.o
+ fib_notifier.o xdp.o flow_offload.o gro.o
+
+obj-$(CONFIG_NETDEV_ADDR_LIST_TEST) += dev_addr_lists_test.o
obj-y += net-sysfs.o
obj-$(CONFIG_PAGE_POOL) += page_pool.o
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 68d2cbf8331a..d9c37fd10809 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -13,6 +13,7 @@
#include <net/sock.h>
#include <uapi/linux/sock_diag.h>
#include <uapi/linux/btf.h>
+#include <linux/rcupdate_trace.h>
DEFINE_BPF_STORAGE_CACHE(sk_cache);
@@ -22,7 +23,8 @@ bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
struct bpf_local_storage *sk_storage;
struct bpf_local_storage_map *smap;
- sk_storage = rcu_dereference(sk->sk_bpf_storage);
+ sk_storage =
+ rcu_dereference_check(sk->sk_bpf_storage, bpf_rcu_lock_held());
if (!sk_storage)
return NULL;
@@ -258,6 +260,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
{
struct bpf_local_storage_data *sdata;
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
return (unsigned long)NULL;
@@ -288,6 +291,7 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
{
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
if (!sk || !sk_fullsock(sk))
return -EINVAL;
@@ -416,6 +420,7 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
void *, value, u64, flags)
{
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
if (in_hardirq() || in_nmi())
return (unsigned long)NULL;
@@ -425,6 +430,7 @@ BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
struct sock *, sk)
{
+ WARN_ON_ONCE(!bpf_rcu_lock_held());
if (in_hardirq() || in_nmi())
return -EPERM;
@@ -929,7 +935,7 @@ static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
PTR_TO_BTF_ID_OR_NULL },
{ offsetof(struct bpf_iter__bpf_sk_storage_map, value),
- PTR_TO_RDWR_BUF_OR_NULL },
+ PTR_TO_BUF | PTR_MAYBE_NULL },
},
.seq_info = &iter_seq_info,
};
diff --git a/net/core/dev.c b/net/core/dev.c
index c4708e2487fb..84a0d9542fe9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -153,16 +153,10 @@
#include "net-sysfs.h"
-#define MAX_GRO_SKBS 8
-
-/* This should be increased if a protocol with a bigger head is added. */
-#define GRO_MAX_HEAD (MAX_HEADER + 128)
static DEFINE_SPINLOCK(ptype_lock);
-static DEFINE_SPINLOCK(offload_lock);
struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
struct list_head ptype_all __read_mostly; /* Taps */
-static struct list_head offload_base __read_mostly;
static int netif_rx_internal(struct sk_buff *skb);
static int call_netdevice_notifiers_info(unsigned long val,
@@ -371,12 +365,12 @@ static void list_netdevice(struct net_device *dev)
ASSERT_RTNL();
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
netdev_name_node_add(net, dev->name_node);
hlist_add_head_rcu(&dev->index_hlist,
dev_index_hash(net, dev->ifindex));
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
dev_base_seq_inc(net);
}
@@ -389,11 +383,11 @@ static void unlist_netdevice(struct net_device *dev)
ASSERT_RTNL();
/* Unlink dev from the device chain */
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
list_del_rcu(&dev->dev_list);
netdev_name_node_del(dev->name_node);
hlist_del_rcu(&dev->index_hlist);
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
dev_base_seq_inc(dev_net(dev));
}
@@ -604,84 +598,6 @@ void dev_remove_pack(struct packet_type *pt)
EXPORT_SYMBOL(dev_remove_pack);
-/**
- * dev_add_offload - register offload handlers
- * @po: protocol offload declaration
- *
- * Add protocol offload handlers to the networking stack. The passed
- * &proto_offload is linked into kernel lists and may not be freed until
- * it has been removed from the kernel lists.
- *
- * This call does not sleep therefore it can not
- * guarantee all CPU's that are in middle of receiving packets
- * will see the new offload handlers (until the next received packet).
- */
-void dev_add_offload(struct packet_offload *po)
-{
- struct packet_offload *elem;
-
- spin_lock(&offload_lock);
- list_for_each_entry(elem, &offload_base, list) {
- if (po->priority < elem->priority)
- break;
- }
- list_add_rcu(&po->list, elem->list.prev);
- spin_unlock(&offload_lock);
-}
-EXPORT_SYMBOL(dev_add_offload);
-
-/**
- * __dev_remove_offload - remove offload handler
- * @po: packet offload declaration
- *
- * Remove a protocol offload handler that was previously added to the
- * kernel offload handlers by dev_add_offload(). The passed &offload_type
- * is removed from the kernel lists and can be freed or reused once this
- * function returns.
- *
- * The packet type might still be in use by receivers
- * and must not be freed until after all the CPU's have gone
- * through a quiescent state.
- */
-static void __dev_remove_offload(struct packet_offload *po)
-{
- struct list_head *head = &offload_base;
- struct packet_offload *po1;
-
- spin_lock(&offload_lock);
-
- list_for_each_entry(po1, head, list) {
- if (po == po1) {
- list_del_rcu(&po->list);
- goto out;
- }
- }
-
- pr_warn("dev_remove_offload: %p not found\n", po);
-out:
- spin_unlock(&offload_lock);
-}
-
-/**
- * dev_remove_offload - remove packet offload handler
- * @po: packet offload declaration
- *
- * Remove a packet offload handler that was previously added to the kernel
- * offload handlers by dev_add_offload(). The passed &offload_type is
- * removed from the kernel lists and can be freed or reused once this
- * function returns.
- *
- * This call sleeps to guarantee that no CPU is looking at the packet
- * type after return.
- */
-void dev_remove_offload(struct packet_offload *po)
-{
- __dev_remove_offload(po);
-
- synchronize_net();
-}
-EXPORT_SYMBOL(dev_remove_offload);
-
/*******************************************************************************
*
* Device Interface Subroutines
@@ -1272,15 +1188,15 @@ rollback:
netdev_adjacent_rename_links(dev, oldname);
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
netdev_name_node_del(dev->name_node);
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
synchronize_rcu();
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
netdev_name_node_add(net, dev->name_node);
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
ret = notifier_to_errno(ret);
@@ -1461,6 +1377,7 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
int ret;
ASSERT_RTNL();
+ dev_addr_check(dev);
if (!netif_device_present(dev)) {
/* may be detached because parent is runtime-suspended */
@@ -3315,40 +3232,6 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
return __vlan_get_protocol(skb, type, depth);
}
-/**
- * skb_mac_gso_segment - mac layer segmentation handler.
- * @skb: buffer to segment
- * @features: features for the output path (see dev->features)
- */
-struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
- netdev_features_t features)
-{
- struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
- struct packet_offload *ptype;
- int vlan_depth = skb->mac_len;
- __be16 type = skb_network_protocol(skb, &vlan_depth);
-
- if (unlikely(!type))
- return ERR_PTR(-EINVAL);
-
- __skb_pull(skb, vlan_depth);
-
- rcu_read_lock();
- list_for_each_entry_rcu(ptype, &offload_base, list) {
- if (ptype->type == type && ptype->callbacks.gso_segment) {
- segs = ptype->callbacks.gso_segment(skb, features);
- break;
- }
- }
- rcu_read_unlock();
-
- __skb_push(skb, skb->data - skb_mac_header(skb));
-
- return segs;
-}
-EXPORT_SYMBOL(skb_mac_gso_segment);
-
-
/* openvswitch calls this on rx path, so we need a different check.
*/
static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
@@ -3513,7 +3396,7 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
{
u16 gso_segs = skb_shinfo(skb)->gso_segs;
- if (gso_segs > dev->gso_max_segs)
+ if (gso_segs > READ_ONCE(dev->gso_max_segs))
return features & ~NETIF_F_GSO_MASK;
if (!skb_shinfo(skb)->gso_type) {
@@ -3836,8 +3719,12 @@ no_lock_out:
* separate lock before trying to get qdisc main lock.
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
+ * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
+ * and then other tasks will only enqueue packets. The packets will be
+ * sent after the qdisc owner is scheduled again. To prevent this
+ * scenario the task always serialize on the lock.
*/
- contended = qdisc_is_running(q);
+ contended = qdisc_is_running(q) || IS_ENABLED(CONFIG_PREEMPT_RT);
if (unlikely(contended))
spin_lock(&q->busylock);
@@ -4323,8 +4210,6 @@ int dev_weight_rx_bias __read_mostly = 1; /* bias for backlog weight */
int dev_weight_tx_bias __read_mostly = 1; /* bias for output_queue quota */
int dev_rx_weight __read_mostly = 64;
int dev_tx_weight __read_mostly = 64;
-/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
-int gro_normal_batch __read_mostly = 8;
/* Called with irq disabled */
static inline void ____napi_schedule(struct softnet_data *sd,
@@ -4827,7 +4712,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
case XDP_PASS:
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(skb->dev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(skb->dev, xdp_prog, act);
@@ -5014,7 +4899,8 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
trace_consume_skb(skb);
else
- trace_kfree_skb(skb, net_tx_action);
+ trace_kfree_skb(skb, net_tx_action,
+ SKB_DROP_REASON_NOT_SPECIFIED);
if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
__kfree_skb(skb);
@@ -5667,7 +5553,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
return ret;
}
-static void netif_receive_skb_list_internal(struct list_head *head)
+void netif_receive_skb_list_internal(struct list_head *head)
{
struct sk_buff *skb, *next;
struct list_head sublist;
@@ -5845,550 +5731,6 @@ static void flush_all_backlogs(void)
cpus_read_unlock();
}
-/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
-static void gro_normal_list(struct napi_struct *napi)
-{
- if (!napi->rx_count)
- return;
- netif_receive_skb_list_internal(&napi->rx_list);
- INIT_LIST_HEAD(&napi->rx_list);
- napi->rx_count = 0;
-}
-
-/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
- * pass the whole batch up to the stack.
- */
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
-{
- list_add_tail(&skb->list, &napi->rx_list);
- napi->rx_count += segs;
- if (napi->rx_count >= gro_normal_batch)
- gro_normal_list(napi);
-}
-
-static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
-{
- struct packet_offload *ptype;
- __be16 type = skb->protocol;
- struct list_head *head = &offload_base;
- int err = -ENOENT;
-
- BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
-
- if (NAPI_GRO_CB(skb)->count == 1) {
- skb_shinfo(skb)->gso_size = 0;
- goto out;
- }
-
- rcu_read_lock();
- list_for_each_entry_rcu(ptype, head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_complete)
- continue;
-
- err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
- ipv6_gro_complete, inet_gro_complete,
- skb, 0);
- break;
- }
- rcu_read_unlock();
-
- if (err) {
- WARN_ON(&ptype->list == head);
- kfree_skb(skb);
- return;
- }
-
-out:
- gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
-}
-
-static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
- bool flush_old)
-{
- struct list_head *head = &napi->gro_hash[index].list;
- struct sk_buff *skb, *p;
-
- list_for_each_entry_safe_reverse(skb, p, head, list) {
- if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
- return;
- skb_list_del_init(skb);
- napi_gro_complete(napi, skb);
- napi->gro_hash[index].count--;
- }
-
- if (!napi->gro_hash[index].count)
- __clear_bit(index, &napi->gro_bitmask);
-}
-
-/* napi->gro_hash[].list contains packets ordered by age.
- * youngest packets at the head of it.
- * Complete skbs in reverse order to reduce latencies.
- */
-void napi_gro_flush(struct napi_struct *napi, bool flush_old)
-{
- unsigned long bitmask = napi->gro_bitmask;
- unsigned int i, base = ~0U;
-
- while ((i = ffs(bitmask)) != 0) {
- bitmask >>= i;
- base += i;
- __napi_gro_flush_chain(napi, base, flush_old);
- }
-}
-EXPORT_SYMBOL(napi_gro_flush);
-
-static void gro_list_prepare(const struct list_head *head,
- const struct sk_buff *skb)
-{
- unsigned int maclen = skb->dev->hard_header_len;
- u32 hash = skb_get_hash_raw(skb);
- struct sk_buff *p;
-
- list_for_each_entry(p, head, list) {
- unsigned long diffs;
-
- NAPI_GRO_CB(p)->flush = 0;
-
- if (hash != skb_get_hash_raw(p)) {
- NAPI_GRO_CB(p)->same_flow = 0;
- continue;
- }
-
- diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
- diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
- if (skb_vlan_tag_present(p))
- diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
- diffs |= skb_metadata_differs(p, skb);
- if (maclen == ETH_HLEN)
- diffs |= compare_ether_header(skb_mac_header(p),
- skb_mac_header(skb));
- else if (!diffs)
- diffs = memcmp(skb_mac_header(p),
- skb_mac_header(skb),
- maclen);
-
- /* in most common scenarions 'slow_gro' is 0
- * otherwise we are already on some slower paths
- * either skip all the infrequent tests altogether or
- * avoid trying too hard to skip each of them individually
- */
- if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
-#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- struct tc_skb_ext *skb_ext;
- struct tc_skb_ext *p_ext;
-#endif
-
- diffs |= p->sk != skb->sk;
- diffs |= skb_metadata_dst_cmp(p, skb);
- diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
-
-#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- skb_ext = skb_ext_find(skb, TC_SKB_EXT);
- p_ext = skb_ext_find(p, TC_SKB_EXT);
-
- diffs |= (!!p_ext) ^ (!!skb_ext);
- if (!diffs && unlikely(skb_ext))
- diffs |= p_ext->chain ^ skb_ext->chain;
-#endif
- }
-
- NAPI_GRO_CB(p)->same_flow = !diffs;
- }
-}
-
-static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
-{
- const struct skb_shared_info *pinfo = skb_shinfo(skb);
- const skb_frag_t *frag0 = &pinfo->frags[0];
-
- NAPI_GRO_CB(skb)->data_offset = 0;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
-
- if (!skb_headlen(skb) && pinfo->nr_frags &&
- !PageHighMem(skb_frag_page(frag0)) &&
- (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
- NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
- NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
- skb_frag_size(frag0),
- skb->end - skb->tail);
- }
-}
-
-static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
-{
- struct skb_shared_info *pinfo = skb_shinfo(skb);
-
- BUG_ON(skb->end - skb->tail < grow);
-
- memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
-
- skb->data_len -= grow;
- skb->tail += grow;
-
- skb_frag_off_add(&pinfo->frags[0], grow);
- skb_frag_size_sub(&pinfo->frags[0], grow);
-
- if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
- skb_frag_unref(skb, 0);
- memmove(pinfo->frags, pinfo->frags + 1,
- --pinfo->nr_frags * sizeof(pinfo->frags[0]));
- }
-}
-
-static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
-{
- struct sk_buff *oldest;
-
- oldest = list_last_entry(head, struct sk_buff, list);
-
- /* We are called with head length >= MAX_GRO_SKBS, so this is
- * impossible.
- */
- if (WARN_ON_ONCE(!oldest))
- return;
-
- /* Do not adjust napi->gro_hash[].count, caller is adding a new
- * SKB to the chain.
- */
- skb_list_del_init(oldest);
- napi_gro_complete(napi, oldest);
-}
-
-static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
-{
- u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
- struct gro_list *gro_list = &napi->gro_hash[bucket];
- struct list_head *head = &offload_base;
- struct packet_offload *ptype;
- __be16 type = skb->protocol;
- struct sk_buff *pp = NULL;
- enum gro_result ret;
- int same_flow;
- int grow;
-
- if (netif_elide_gro(skb->dev))
- goto normal;
-
- gro_list_prepare(&gro_list->list, skb);
-
- rcu_read_lock();
- list_for_each_entry_rcu(ptype, head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_receive)
- continue;
-
- skb_set_network_header(skb, skb_gro_offset(skb));
- skb_reset_mac_len(skb);
- NAPI_GRO_CB(skb)->same_flow = 0;
- NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
- NAPI_GRO_CB(skb)->free = 0;
- NAPI_GRO_CB(skb)->encap_mark = 0;
- NAPI_GRO_CB(skb)->recursion_counter = 0;
- NAPI_GRO_CB(skb)->is_fou = 0;
- NAPI_GRO_CB(skb)->is_atomic = 1;
- NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
-
- /* Setup for GRO checksum validation */
- switch (skb->ip_summed) {
- case CHECKSUM_COMPLETE:
- NAPI_GRO_CB(skb)->csum = skb->csum;
- NAPI_GRO_CB(skb)->csum_valid = 1;
- NAPI_GRO_CB(skb)->csum_cnt = 0;
- break;
- case CHECKSUM_UNNECESSARY:
- NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
- NAPI_GRO_CB(skb)->csum_valid = 0;
- break;
- default:
- NAPI_GRO_CB(skb)->csum_cnt = 0;
- NAPI_GRO_CB(skb)->csum_valid = 0;
- }
-
- pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
- ipv6_gro_receive, inet_gro_receive,
- &gro_list->list, skb);
- break;
- }
- rcu_read_unlock();
-
- if (&ptype->list == head)
- goto normal;
-
- if (PTR_ERR(pp) == -EINPROGRESS) {
- ret = GRO_CONSUMED;
- goto ok;
- }
-
- same_flow = NAPI_GRO_CB(skb)->same_flow;
- ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
-
- if (pp) {
- skb_list_del_init(pp);
- napi_gro_complete(napi, pp);
- gro_list->count--;
- }
-
- if (same_flow)
- goto ok;
-
- if (NAPI_GRO_CB(skb)->flush)
- goto normal;
-
- if (unlikely(gro_list->count >= MAX_GRO_SKBS))
- gro_flush_oldest(napi, &gro_list->list);
- else
- gro_list->count++;
-
- NAPI_GRO_CB(skb)->count = 1;
- NAPI_GRO_CB(skb)->age = jiffies;
- NAPI_GRO_CB(skb)->last = skb;
- skb_shinfo(skb)->gso_size = skb_gro_len(skb);
- list_add(&skb->list, &gro_list->list);
- ret = GRO_HELD;
-
-pull:
- grow = skb_gro_offset(skb) - skb_headlen(skb);
- if (grow > 0)
- gro_pull_from_frag0(skb, grow);
-ok:
- if (gro_list->count) {
- if (!test_bit(bucket, &napi->gro_bitmask))
- __set_bit(bucket, &napi->gro_bitmask);
- } else if (test_bit(bucket, &napi->gro_bitmask)) {
- __clear_bit(bucket, &napi->gro_bitmask);
- }
-
- return ret;
-
-normal:
- ret = GRO_NORMAL;
- goto pull;
-}
-
-struct packet_offload *gro_find_receive_by_type(__be16 type)
-{
- struct list_head *offload_head = &offload_base;
- struct packet_offload *ptype;
-
- list_for_each_entry_rcu(ptype, offload_head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_receive)
- continue;
- return ptype;
- }
- return NULL;
-}
-EXPORT_SYMBOL(gro_find_receive_by_type);
-
-struct packet_offload *gro_find_complete_by_type(__be16 type)
-{
- struct list_head *offload_head = &offload_base;
- struct packet_offload *ptype;
-
- list_for_each_entry_rcu(ptype, offload_head, list) {
- if (ptype->type != type || !ptype->callbacks.gro_complete)
- continue;
- return ptype;
- }
- return NULL;
-}
-EXPORT_SYMBOL(gro_find_complete_by_type);
-
-static gro_result_t napi_skb_finish(struct napi_struct *napi,
- struct sk_buff *skb,
- gro_result_t ret)
-{
- switch (ret) {
- case GRO_NORMAL:
- gro_normal_one(napi, skb, 1);
- break;
-
- case GRO_MERGED_FREE:
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
- napi_skb_free_stolen_head(skb);
- else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
- __kfree_skb(skb);
- else
- __kfree_skb_defer(skb);
- break;
-
- case GRO_HELD:
- case GRO_MERGED:
- case GRO_CONSUMED:
- break;
- }
-
- return ret;
-}
-
-gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
-{
- gro_result_t ret;
-
- skb_mark_napi_id(skb, napi);
- trace_napi_gro_receive_entry(skb);
-
- skb_gro_reset_offset(skb, 0);
-
- ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
- trace_napi_gro_receive_exit(ret);
-
- return ret;
-}
-EXPORT_SYMBOL(napi_gro_receive);
-
-static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
-{
- if (unlikely(skb->pfmemalloc)) {
- consume_skb(skb);
- return;
- }
- __skb_pull(skb, skb_headlen(skb));
- /* restore the reserve we had after netdev_alloc_skb_ip_align() */
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
- __vlan_hwaccel_clear_tag(skb);
- skb->dev = napi->dev;
- skb->skb_iif = 0;
-
- /* eth_type_trans() assumes pkt_type is PACKET_HOST */
- skb->pkt_type = PACKET_HOST;
-
- skb->encapsulation = 0;
- skb_shinfo(skb)->gso_type = 0;
- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
- if (unlikely(skb->slow_gro)) {
- skb_orphan(skb);
- skb_ext_reset(skb);
- nf_reset_ct(skb);
- skb->slow_gro = 0;
- }
-
- napi->skb = skb;
-}
-
-struct sk_buff *napi_get_frags(struct napi_struct *napi)
-{
- struct sk_buff *skb = napi->skb;
-
- if (!skb) {
- skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
- if (skb) {
- napi->skb = skb;
- skb_mark_napi_id(skb, napi);
- }
- }
- return skb;
-}
-EXPORT_SYMBOL(napi_get_frags);
-
-static gro_result_t napi_frags_finish(struct napi_struct *napi,
- struct sk_buff *skb,
- gro_result_t ret)
-{
- switch (ret) {
- case GRO_NORMAL:
- case GRO_HELD:
- __skb_push(skb, ETH_HLEN);
- skb->protocol = eth_type_trans(skb, skb->dev);
- if (ret == GRO_NORMAL)
- gro_normal_one(napi, skb, 1);
- break;
-
- case GRO_MERGED_FREE:
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
- napi_skb_free_stolen_head(skb);
- else
- napi_reuse_skb(napi, skb);
- break;
-
- case GRO_MERGED:
- case GRO_CONSUMED:
- break;
- }
-
- return ret;
-}
-
-/* Upper GRO stack assumes network header starts at gro_offset=0
- * Drivers could call both napi_gro_frags() and napi_gro_receive()
- * We copy ethernet header into skb->data to have a common layout.
- */
-static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
-{
- struct sk_buff *skb = napi->skb;
- const struct ethhdr *eth;
- unsigned int hlen = sizeof(*eth);
-
- napi->skb = NULL;
-
- skb_reset_mac_header(skb);
- skb_gro_reset_offset(skb, hlen);
-
- if (unlikely(skb_gro_header_hard(skb, hlen))) {
- eth = skb_gro_header_slow(skb, hlen, 0);
- if (unlikely(!eth)) {
- net_warn_ratelimited("%s: dropping impossible skb from %s\n",
- __func__, napi->dev->name);
- napi_reuse_skb(napi, skb);
- return NULL;
- }
- } else {
- eth = (const struct ethhdr *)skb->data;
- gro_pull_from_frag0(skb, hlen);
- NAPI_GRO_CB(skb)->frag0 += hlen;
- NAPI_GRO_CB(skb)->frag0_len -= hlen;
- }
- __skb_pull(skb, hlen);
-
- /*
- * This works because the only protocols we care about don't require
- * special handling.
- * We'll fix it up properly in napi_frags_finish()
- */
- skb->protocol = eth->h_proto;
-
- return skb;
-}
-
-gro_result_t napi_gro_frags(struct napi_struct *napi)
-{
- gro_result_t ret;
- struct sk_buff *skb = napi_frags_skb(napi);
-
- trace_napi_gro_frags_entry(skb);
-
- ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
- trace_napi_gro_frags_exit(ret);
-
- return ret;
-}
-EXPORT_SYMBOL(napi_gro_frags);
-
-/* Compute the checksum from gro_offset and return the folded value
- * after adding in any pseudo checksum.
- */
-__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
-{
- __wsum wsum;
- __sum16 sum;
-
- wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
-
- /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
- sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
- /* See comments in __skb_checksum_complete(). */
- if (likely(!sum)) {
- if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
- !skb->csum_complete_sw)
- netdev_rx_csum_fault(skb->dev, skb);
- }
-
- NAPI_GRO_CB(skb)->csum = wsum;
- NAPI_GRO_CB(skb)->csum_valid = 1;
-
- return sum;
-}
-EXPORT_SYMBOL(__skb_gro_checksum_complete);
-
static void net_rps_send_ipi(struct softnet_data *remsd)
{
#ifdef CONFIG_RPS
@@ -7200,6 +6542,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
struct netdev_adjacent {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
/* upper master flag, there can only be one master device per list */
bool master;
@@ -7964,7 +7307,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
adj->ref_nr = 1;
adj->private = private;
adj->ignore = false;
- dev_hold(adj_dev);
+ dev_hold_track(adj_dev, &adj->dev_tracker, GFP_KERNEL);
pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
dev->name, adj_dev->name, adj->ref_nr, adj_dev->name);
@@ -7993,8 +7336,8 @@ remove_symlinks:
if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
free_adj:
+ dev_put_track(adj_dev, &adj->dev_tracker);
kfree(adj);
- dev_put(adj_dev);
return ret;
}
@@ -8035,7 +7378,7 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
list_del_rcu(&adj->list);
pr_debug("adjacency: dev_put for %s, because link removed from %s to %s\n",
adj_dev->name, dev->name, adj_dev->name);
- dev_put(adj_dev);
+ dev_put_track(adj_dev, &adj->dev_tracker);
kfree_rcu(adj, rcu);
}
@@ -9224,35 +8567,17 @@ bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b)
EXPORT_SYMBOL(netdev_port_same_parent_id);
/**
- * dev_change_proto_down - update protocol port state information
+ * dev_change_proto_down - set carrier according to proto_down.
+ *
* @dev: device
* @proto_down: new value
- *
- * This info can be used by switch drivers to set the phys state of the
- * port.
*/
int dev_change_proto_down(struct net_device *dev, bool proto_down)
{
- const struct net_device_ops *ops = dev->netdev_ops;
-
- if (!ops->ndo_change_proto_down)
+ if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN))
return -EOPNOTSUPP;
if (!netif_device_present(dev))
return -ENODEV;
- return ops->ndo_change_proto_down(dev, proto_down);
-}
-EXPORT_SYMBOL(dev_change_proto_down);
-
-/**
- * dev_change_proto_down_generic - generic implementation for
- * ndo_change_proto_down that sets carrier according to
- * proto_down.
- *
- * @dev: device
- * @proto_down: new value
- */
-int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
-{
if (proto_down)
netif_carrier_off(dev);
else
@@ -9260,7 +8585,7 @@ int dev_change_proto_down_generic(struct net_device *dev, bool proto_down)
dev->proto_down = proto_down;
return 0;
}
-EXPORT_SYMBOL(dev_change_proto_down_generic);
+EXPORT_SYMBOL(dev_change_proto_down);
/**
* dev_change_proto_down_reason - proto down reason
@@ -10545,6 +9870,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
netdev_unregister_timeout_secs * HZ)) {
pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
dev->name, refcnt);
+ ref_tracker_dir_print(&dev->refcnt_tracker, 10);
warning_time = jiffies;
}
}
@@ -10835,6 +10161,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev = PTR_ALIGN(p, NETDEV_ALIGN);
dev->padded = (char *)dev - (char *)p;
+ ref_tracker_dir_init(&dev->refcnt_tracker, 128);
#ifdef CONFIG_PCPU_DEV_REFCNT
dev->pcpu_refcnt = alloc_percpu(int);
if (!dev->pcpu_refcnt)
@@ -10854,6 +10181,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->gso_max_size = GSO_MAX_SIZE;
dev->gso_max_segs = GSO_MAX_SEGS;
+ dev->gro_max_size = GRO_MAX_SIZE;
dev->upper_level = 1;
dev->lower_level = 1;
#ifdef CONFIG_LOCKDEP
@@ -10951,6 +10279,7 @@ void free_netdev(struct net_device *dev)
list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
netif_napi_del(p);
+ ref_tracker_dir_exit(&dev->refcnt_tracker);
#ifdef CONFIG_PCPU_DEV_REFCNT
free_percpu(dev->pcpu_refcnt);
dev->pcpu_refcnt = NULL;
@@ -11643,8 +10972,6 @@ static int __init net_dev_init(void)
for (i = 0; i < PTYPE_HASH_SIZE; i++)
INIT_LIST_HEAD(&ptype_base[i]);
- INIT_LIST_HEAD(&offload_base);
-
if (register_pernet_subsys(&netdev_net_ops))
goto out;
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index f0cb38344126..bead38ca50bd 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -16,6 +16,35 @@
* General list handling functions
*/
+static int __hw_addr_insert(struct netdev_hw_addr_list *list,
+ struct netdev_hw_addr *new, int addr_len)
+{
+ struct rb_node **ins_point = &list->tree.rb_node, *parent = NULL;
+ struct netdev_hw_addr *ha;
+
+ while (*ins_point) {
+ int diff;
+
+ ha = rb_entry(*ins_point, struct netdev_hw_addr, node);
+ diff = memcmp(new->addr, ha->addr, addr_len);
+ if (diff == 0)
+ diff = memcmp(&new->type, &ha->type, sizeof(new->type));
+
+ parent = *ins_point;
+ if (diff < 0)
+ ins_point = &parent->rb_left;
+ else if (diff > 0)
+ ins_point = &parent->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node_rcu(&new->node, parent, ins_point);
+ rb_insert_color(&new->node, &list->tree);
+
+ return 0;
+}
+
static struct netdev_hw_addr*
__hw_addr_create(const unsigned char *addr, int addr_len,
unsigned char addr_type, bool global, bool sync)
@@ -50,11 +79,6 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
if (addr_len > MAX_ADDR_LEN)
return -EINVAL;
- ha = list_first_entry(&list->list, struct netdev_hw_addr, list);
- if (ha && !memcmp(addr, ha->addr, addr_len) &&
- (!addr_type || addr_type == ha->type))
- goto found_it;
-
while (*ins_point) {
int diff;
@@ -69,7 +93,6 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
} else if (diff > 0) {
ins_point = &parent->rb_right;
} else {
-found_it:
if (exclusive)
return -EEXIST;
if (global) {
@@ -94,16 +117,8 @@ found_it:
if (!ha)
return -ENOMEM;
- /* The first address in dev->dev_addrs is pointed to by dev->dev_addr
- * and mutated freely by device drivers and netdev ops, so if we insert
- * it into the tree we'll end up with an invalid rbtree.
- */
- if (list->count > 0) {
- rb_link_node(&ha->node, parent, ins_point);
- rb_insert_color(&ha->node, &list->tree);
- } else {
- RB_CLEAR_NODE(&ha->node);
- }
+ rb_link_node(&ha->node, parent, ins_point);
+ rb_insert_color(&ha->node, &list->tree);
list_add_tail_rcu(&ha->list, &list->list);
list->count++;
@@ -138,8 +153,7 @@ static int __hw_addr_del_entry(struct netdev_hw_addr_list *list,
if (--ha->refcount)
return 0;
- if (!RB_EMPTY_NODE(&ha->node))
- rb_erase(&ha->node, &list->tree);
+ rb_erase(&ha->node, &list->tree);
list_del_rcu(&ha->list);
kfree_rcu(ha, rcu_head);
@@ -151,18 +165,8 @@ static struct netdev_hw_addr *__hw_addr_lookup(struct netdev_hw_addr_list *list,
const unsigned char *addr, int addr_len,
unsigned char addr_type)
{
- struct netdev_hw_addr *ha;
struct rb_node *node;
- /* The first address isn't inserted into the tree because in the dev->dev_addrs
- * list it's the address pointed to by dev->dev_addr which is freely mutated
- * in place, so we need to check it separately.
- */
- ha = list_first_entry(&list->list, struct netdev_hw_addr, list);
- if (ha && !memcmp(addr, ha->addr, addr_len) &&
- (!addr_type || addr_type == ha->type))
- return ha;
-
node = list->tree.rb_node;
while (node) {
@@ -498,6 +502,21 @@ EXPORT_SYMBOL(__hw_addr_init);
* Device addresses handling functions
*/
+/* Check that netdev->dev_addr is not written to directly as this would
+ * break the rbtree layout. All changes should go thru dev_addr_set() and co.
+ * Remove this check in mid-2024.
+ */
+void dev_addr_check(struct net_device *dev)
+{
+ if (!memcmp(dev->dev_addr, dev->dev_addr_shadow, MAX_ADDR_LEN))
+ return;
+
+ netdev_warn(dev, "Current addr: %*ph\n", MAX_ADDR_LEN, dev->dev_addr);
+ netdev_warn(dev, "Expected addr: %*ph\n",
+ MAX_ADDR_LEN, dev->dev_addr_shadow);
+ netdev_WARN(dev, "Incorrect netdev->dev_addr\n");
+}
+
/**
* dev_addr_flush - Flush device address list
* @dev: device
@@ -509,11 +528,11 @@ EXPORT_SYMBOL(__hw_addr_init);
void dev_addr_flush(struct net_device *dev)
{
/* rtnl_mutex must be held here */
+ dev_addr_check(dev);
__hw_addr_flush(&dev->dev_addrs);
dev->dev_addr = NULL;
}
-EXPORT_SYMBOL(dev_addr_flush);
/**
* dev_addr_init - Init device address list
@@ -547,7 +566,21 @@ int dev_addr_init(struct net_device *dev)
}
return err;
}
-EXPORT_SYMBOL(dev_addr_init);
+
+void dev_addr_mod(struct net_device *dev, unsigned int offset,
+ const void *addr, size_t len)
+{
+ struct netdev_hw_addr *ha;
+
+ dev_addr_check(dev);
+
+ ha = container_of(dev->dev_addr, struct netdev_hw_addr, addr[0]);
+ rb_erase(&ha->node, &dev->dev_addrs.tree);
+ memcpy(&ha->addr[offset], addr, len);
+ memcpy(&dev->dev_addr_shadow[offset], addr, len);
+ WARN_ON(__hw_addr_insert(&dev->dev_addrs, ha, dev->addr_len));
+}
+EXPORT_SYMBOL(dev_addr_mod);
/**
* dev_addr_add - Add a device address
diff --git a/net/core/dev_addr_lists_test.c b/net/core/dev_addr_lists_test.c
new file mode 100644
index 000000000000..049cfbc58aa9
--- /dev/null
+++ b/net/core/dev_addr_lists_test.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <kunit/test.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+static const struct net_device_ops dummy_netdev_ops = {
+};
+
+struct dev_addr_test_priv {
+ u32 addr_seen;
+};
+
+static int dev_addr_test_sync(struct net_device *netdev, const unsigned char *a)
+{
+ struct dev_addr_test_priv *datp = netdev_priv(netdev);
+
+ if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN))
+ datp->addr_seen |= 1 << a[0];
+ return 0;
+}
+
+static int dev_addr_test_unsync(struct net_device *netdev,
+ const unsigned char *a)
+{
+ struct dev_addr_test_priv *datp = netdev_priv(netdev);
+
+ if (a[0] < 31 && !memchr_inv(a, a[0], ETH_ALEN))
+ datp->addr_seen &= ~(1 << a[0]);
+ return 0;
+}
+
+static int dev_addr_test_init(struct kunit *test)
+{
+ struct dev_addr_test_priv *datp;
+ struct net_device *netdev;
+ int err;
+
+ netdev = alloc_etherdev(sizeof(*datp));
+ KUNIT_ASSERT_TRUE(test, !!netdev);
+
+ test->priv = netdev;
+ netdev->netdev_ops = &dummy_netdev_ops;
+
+ err = register_netdev(netdev);
+ if (err) {
+ free_netdev(netdev);
+ KUNIT_FAIL(test, "Can't register netdev %d", err);
+ }
+
+ rtnl_lock();
+ return 0;
+}
+
+static void dev_addr_test_exit(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+
+ rtnl_unlock();
+ unregister_netdev(netdev);
+ free_netdev(netdev);
+}
+
+static void dev_addr_test_basic(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ u8 addr[ETH_ALEN];
+
+ KUNIT_EXPECT_TRUE(test, !!netdev->dev_addr);
+
+ memset(addr, 2, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+ KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr)));
+
+ memset(addr, 3, sizeof(addr));
+ dev_addr_set(netdev, addr);
+ KUNIT_EXPECT_EQ(test, 0, memcmp(netdev->dev_addr, addr, sizeof(addr)));
+}
+
+static void dev_addr_test_sync_one(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ struct dev_addr_test_priv *datp;
+ u8 addr[ETH_ALEN];
+
+ datp = netdev_priv(netdev);
+
+ memset(addr, 1, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 2, datp->addr_seen);
+
+ memset(addr, 2, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+
+ datp->addr_seen = 0;
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ /* It's not going to sync anything because the main address is
+ * considered synced and we overwrite in place.
+ */
+ KUNIT_EXPECT_EQ(test, 0, datp->addr_seen);
+}
+
+static void dev_addr_test_add_del(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ struct dev_addr_test_priv *datp;
+ u8 addr[ETH_ALEN];
+ int i;
+
+ datp = netdev_priv(netdev);
+
+ for (i = 1; i < 4; i++) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ }
+ /* Add 3 again */
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 0xf, datp->addr_seen);
+
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 0xf, datp->addr_seen);
+
+ for (i = 1; i < 4; i++) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ }
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 1, datp->addr_seen);
+}
+
+static void dev_addr_test_del_main(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ u8 addr[ETH_ALEN];
+
+ memset(addr, 1, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+
+ KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ KUNIT_EXPECT_EQ(test, -ENOENT, dev_addr_del(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+}
+
+static void dev_addr_test_add_set(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ struct dev_addr_test_priv *datp;
+ u8 addr[ETH_ALEN];
+ int i;
+
+ datp = netdev_priv(netdev);
+
+ /* There is no external API like dev_addr_add_excl(),
+ * so shuffle the tree a little bit and exploit aliasing.
+ */
+ for (i = 1; i < 16; i++) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ }
+
+ memset(addr, i, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+ KUNIT_EXPECT_EQ(test, 0, dev_addr_add(netdev, addr,
+ NETDEV_HW_ADDR_T_LAN));
+ memset(addr, 0, sizeof(addr));
+ eth_hw_addr_set(netdev, addr);
+
+ __hw_addr_sync_dev(&netdev->dev_addrs, netdev, dev_addr_test_sync,
+ dev_addr_test_unsync);
+ KUNIT_EXPECT_EQ(test, 0xffff, datp->addr_seen);
+}
+
+static void dev_addr_test_add_excl(struct kunit *test)
+{
+ struct net_device *netdev = test->priv;
+ u8 addr[ETH_ALEN];
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_uc_add_excl(netdev, addr));
+ }
+ KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr));
+
+ for (i = 0; i < 10; i += 2) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, 0, dev_uc_del(netdev, addr));
+ }
+ for (i = 1; i < 10; i += 2) {
+ memset(addr, i, sizeof(addr));
+ KUNIT_EXPECT_EQ(test, -EEXIST, dev_uc_add_excl(netdev, addr));
+ }
+}
+
+static struct kunit_case dev_addr_test_cases[] = {
+ KUNIT_CASE(dev_addr_test_basic),
+ KUNIT_CASE(dev_addr_test_sync_one),
+ KUNIT_CASE(dev_addr_test_add_del),
+ KUNIT_CASE(dev_addr_test_del_main),
+ KUNIT_CASE(dev_addr_test_add_set),
+ KUNIT_CASE(dev_addr_test_add_excl),
+ {}
+};
+
+static struct kunit_suite dev_addr_test_suite = {
+ .name = "dev-addr-list-test",
+ .test_cases = dev_addr_test_cases,
+ .init = dev_addr_test_init,
+ .exit = dev_addr_test_exit,
+};
+kunit_test_suite(dev_addr_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index cbab5fec64b1..1b807d119da5 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -192,7 +192,7 @@ static int net_hwtstamp_validate(struct ifreq *ifr)
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
- if (cfg.flags) /* reserved for future extensions */
+ if (cfg.flags & ~HWTSTAMP_FLAG_MASK)
return -EINVAL;
tx_type = cfg.tx_type;
@@ -313,6 +313,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
int err;
struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
const struct net_device_ops *ops;
+ netdevice_tracker dev_tracker;
if (!dev)
return -ENODEV;
@@ -381,10 +382,10 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, void __user *data,
return -ENODEV;
if (!netif_is_bridge_master(dev))
return -EOPNOTSUPP;
- dev_hold(dev);
+ dev_hold_track(dev, &dev_tracker, GFP_KERNEL);
rtnl_unlock();
err = br_ioctl_call(net, netdev_priv(dev), cmd, ifr, NULL);
- dev_put(dev);
+ dev_put_track(dev, &dev_tracker);
rtnl_lock();
return err;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index c06c9ba6e8c5..fcd9f6d85cf1 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -7,6 +7,7 @@
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
*/
+#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -69,6 +70,35 @@ struct devlink {
char priv[] __aligned(NETDEV_ALIGN);
};
+/**
+ * struct devlink_resource - devlink resource
+ * @name: name of the resource
+ * @id: id, per devlink instance
+ * @size: size of the resource
+ * @size_new: updated size of the resource, reload is needed
+ * @size_valid: valid in case the total size of the resource is valid
+ * including its children
+ * @parent: parent resource
+ * @size_params: size parameters
+ * @list: parent list
+ * @resource_list: list of child resources
+ * @occ_get: occupancy getter callback
+ * @occ_get_priv: occupancy getter callback priv
+ */
+struct devlink_resource {
+ const char *name;
+ u64 id;
+ u64 size;
+ u64 size_new;
+ bool size_valid;
+ struct devlink_resource *parent;
+ struct devlink_resource_size_params size_params;
+ struct list_head list;
+ struct list_head resource_list;
+ devlink_resource_occ_get_t *occ_get;
+ void *occ_get_priv;
+};
+
void *devlink_priv(struct devlink *devlink)
{
return &devlink->priv;
@@ -4432,6 +4462,21 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME,
.type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ .name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME,
+ .type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE,
+ },
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ .name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME,
+ .type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
@@ -8840,8 +8885,6 @@ static const struct genl_small_ops devlink_nl_ops[] = {
GENL_DONT_VALIDATE_DUMP_STRICT,
.dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
- DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR,
@@ -9905,34 +9948,38 @@ out:
}
EXPORT_SYMBOL_GPL(devlink_resource_register);
+static void devlink_resource_unregister(struct devlink *devlink,
+ struct devlink_resource *resource)
+{
+ struct devlink_resource *tmp, *child_resource;
+
+ list_for_each_entry_safe(child_resource, tmp, &resource->resource_list,
+ list) {
+ devlink_resource_unregister(devlink, child_resource);
+ list_del(&child_resource->list);
+ kfree(child_resource);
+ }
+}
+
/**
* devlink_resources_unregister - free all resources
*
* @devlink: devlink
- * @resource: resource
*/
-void devlink_resources_unregister(struct devlink *devlink,
- struct devlink_resource *resource)
+void devlink_resources_unregister(struct devlink *devlink)
{
struct devlink_resource *tmp, *child_resource;
- struct list_head *resource_list;
- if (resource)
- resource_list = &resource->resource_list;
- else
- resource_list = &devlink->resource_list;
-
- if (!resource)
- mutex_lock(&devlink->lock);
+ mutex_lock(&devlink->lock);
- list_for_each_entry_safe(child_resource, tmp, resource_list, list) {
- devlink_resources_unregister(devlink, child_resource);
+ list_for_each_entry_safe(child_resource, tmp, &devlink->resource_list,
+ list) {
+ devlink_resource_unregister(devlink, child_resource);
list_del(&child_resource->list);
kfree(child_resource);
}
- if (!resource)
- mutex_unlock(&devlink->lock);
+ mutex_unlock(&devlink->lock);
}
EXPORT_SYMBOL_GPL(devlink_resources_unregister);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 49442cae6f69..7b288a121a41 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -110,7 +110,8 @@ static u32 net_dm_queue_len = 1000;
struct net_dm_alert_ops {
void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
- void *location);
+ void *location,
+ enum skb_drop_reason reason);
void (*napi_poll_probe)(void *ignore, struct napi_struct *napi,
int work, int budget);
void (*work_item_func)(struct work_struct *work);
@@ -262,7 +263,9 @@ out:
spin_unlock_irqrestore(&data->lock, flags);
}
-static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
+static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb,
+ void *location,
+ enum skb_drop_reason reason)
{
trace_drop_common(skb, location);
}
@@ -490,7 +493,8 @@ static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
struct sk_buff *skb,
- void *location)
+ void *location,
+ enum skb_drop_reason reason)
{
ktime_t tstamp = ktime_get_real();
struct per_cpu_dm_data *data;
@@ -850,7 +854,7 @@ net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata)
}
hw_metadata->input_dev = metadata->input_dev;
- dev_hold(hw_metadata->input_dev);
+ dev_hold_track(hw_metadata->input_dev, &hw_metadata->dev_tracker, GFP_ATOMIC);
return hw_metadata;
@@ -864,9 +868,9 @@ free_hw_metadata:
}
static void
-net_dm_hw_metadata_free(const struct devlink_trap_metadata *hw_metadata)
+net_dm_hw_metadata_free(struct devlink_trap_metadata *hw_metadata)
{
- dev_put(hw_metadata->input_dev);
+ dev_put_track(hw_metadata->input_dev, &hw_metadata->dev_tracker);
kfree(hw_metadata->fa_cookie);
kfree(hw_metadata->trap_name);
kfree(hw_metadata->trap_group_name);
diff --git a/net/core/dst.c b/net/core/dst.c
index 497ef9b3fc6a..d16c2c9bfebd 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -49,7 +49,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
unsigned short flags)
{
dst->dev = dev;
- dev_hold(dev);
+ dev_hold_track(dev, &dst->dev_tracker, GFP_ATOMIC);
dst->ops = ops;
dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL;
@@ -117,7 +117,7 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
if (dst->ops->destroy)
dst->ops->destroy(dst);
- dev_put(dst->dev);
+ dev_put_track(dst->dev, &dst->dev_tracker);
lwtstate_put(dst->lwtstate);
@@ -159,8 +159,8 @@ void dst_dev_put(struct dst_entry *dst)
dst->input = dst_discard;
dst->output = dst_discard_out;
dst->dev = blackhole_netdev;
- dev_hold(dst->dev);
- dev_put(dev);
+ dev_replace_track(dev, blackhole_netdev, &dst->dev_tracker,
+ GFP_ATOMIC);
}
EXPORT_SYMBOL(dst_dev_put);
diff --git a/net/core/failover.c b/net/core/failover.c
index b5cd3c727285..dcaa92a85ea2 100644
--- a/net/core/failover.c
+++ b/net/core/failover.c
@@ -252,7 +252,7 @@ struct failover *failover_register(struct net_device *dev,
return ERR_PTR(-ENOMEM);
rcu_assign_pointer(failover->ops, ops);
- dev_hold(dev);
+ dev_hold_track(dev, &failover->dev_tracker, GFP_KERNEL);
dev->priv_flags |= IFF_FAILOVER;
rcu_assign_pointer(failover->failover_dev, dev);
@@ -285,7 +285,7 @@ void failover_unregister(struct failover *failover)
failover_dev->name);
failover_dev->priv_flags &= ~IFF_FAILOVER;
- dev_put(failover_dev);
+ dev_put_track(failover_dev, &failover->dev_tracker);
spin_lock(&failover_lock);
list_del(&failover->list);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1bb567a3b329..75282222e0b4 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -750,6 +750,27 @@ static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
return 0;
}
+static const struct nla_policy fib_rule_policy[FRA_MAX + 1] = {
+ [FRA_UNSPEC] = { .strict_start_type = FRA_DPORT_RANGE + 1 },
+ [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
+ [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
+ [FRA_PRIORITY] = { .type = NLA_U32 },
+ [FRA_FWMARK] = { .type = NLA_U32 },
+ [FRA_FLOW] = { .type = NLA_U32 },
+ [FRA_TUN_ID] = { .type = NLA_U64 },
+ [FRA_FWMASK] = { .type = NLA_U32 },
+ [FRA_TABLE] = { .type = NLA_U32 },
+ [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 },
+ [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 },
+ [FRA_GOTO] = { .type = NLA_U32 },
+ [FRA_L3MDEV] = { .type = NLA_U8 },
+ [FRA_UID_RANGE] = { .len = sizeof(struct fib_rule_uid_range) },
+ [FRA_PROTOCOL] = { .type = NLA_U8 },
+ [FRA_IP_PROTO] = { .type = NLA_U8 },
+ [FRA_SPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) },
+ [FRA_DPORT_RANGE] = { .len = sizeof(struct fib_rule_port_range) }
+};
+
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -774,7 +795,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
}
err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
- ops->policy, extack);
+ fib_rule_policy, extack);
if (err < 0) {
NL_SET_ERR_MSG(extack, "Error parsing msg");
goto errout;
@@ -882,7 +903,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
}
err = nlmsg_parse_deprecated(nlh, sizeof(*frh), tb, FRA_MAX,
- ops->policy, extack);
+ fib_rule_policy, extack);
if (err < 0) {
NL_SET_ERR_MSG(extack, "Error parsing msg");
goto errout;
diff --git a/net/core/filter.c b/net/core/filter.c
index 6102f093d59a..4603b7cd3cd1 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -301,7 +301,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
break;
case SKF_AD_PKTTYPE:
- *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
+ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET);
*insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
@@ -323,7 +323,7 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
offsetof(struct sk_buff, vlan_tci));
break;
case SKF_AD_VLAN_TAG_PRESENT:
- *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
+ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET);
if (PKT_VLAN_PRESENT_BIT)
*insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
if (PKT_VLAN_PRESENT_BIT < 7)
@@ -1242,10 +1242,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
int err, new_len, old_len = fp->len;
bool seen_ld_abs = false;
- /* We are free to overwrite insns et al right here as it
- * won't be used at this point in time anymore internally
- * after the migration to the internal BPF instruction
- * representation.
+ /* We are free to overwrite insns et al right here as it won't be used at
+ * this point in time anymore internally after the migration to the eBPF
+ * instruction representation.
*/
BUILD_BUG_ON(sizeof(struct sock_filter) !=
sizeof(struct bpf_insn));
@@ -1336,8 +1335,8 @@ static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
*/
bpf_jit_compile(fp);
- /* JIT compiler couldn't process this filter, so do the
- * internal BPF translation for the optimized interpreter.
+ /* JIT compiler couldn't process this filter, so do the eBPF translation
+ * for the optimized interpreter.
*/
if (!fp->jited)
fp = bpf_migrate_filter(fp);
@@ -1713,7 +1712,7 @@ static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg4_type = ARG_CONST_SIZE,
.arg5_type = ARG_ANYTHING,
};
@@ -2018,9 +2017,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
+ .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
- .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
+ .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
.arg5_type = ARG_ANYTHING,
};
@@ -2541,7 +2540,7 @@ static const struct bpf_func_proto bpf_redirect_neigh_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
- .arg2_type = ARG_PTR_TO_MEM_OR_NULL,
+ .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
};
@@ -3958,10 +3957,35 @@ u32 xdp_master_redirect(struct xdp_buff *xdp)
}
EXPORT_SYMBOL_GPL(xdp_master_redirect);
-int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+static inline int __xdp_do_redirect_xsk(struct bpf_redirect_info *ri,
+ struct net_device *dev,
+ struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+{
+ enum bpf_map_type map_type = ri->map_type;
+ void *fwd = ri->tgt_value;
+ u32 map_id = ri->map_id;
+ int err;
+
+ ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+ ri->map_type = BPF_MAP_TYPE_UNSPEC;
+
+ err = __xsk_map_redirect(fwd, xdp);
+ if (unlikely(err))
+ goto err;
+
+ _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
+ return 0;
+err:
+ _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
+ return err;
+}
+
+static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
+ struct net_device *dev,
+ struct xdp_frame *xdpf,
+ struct bpf_prog *xdp_prog)
{
- struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
enum bpf_map_type map_type = ri->map_type;
void *fwd = ri->tgt_value;
u32 map_id = ri->map_id;
@@ -3971,6 +3995,11 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
ri->map_type = BPF_MAP_TYPE_UNSPEC;
+ if (unlikely(!xdpf)) {
+ err = -EOVERFLOW;
+ goto err;
+ }
+
switch (map_type) {
case BPF_MAP_TYPE_DEVMAP:
fallthrough;
@@ -3978,17 +4007,14 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
map = READ_ONCE(ri->map);
if (unlikely(map)) {
WRITE_ONCE(ri->map, NULL);
- err = dev_map_enqueue_multi(xdp, dev, map,
+ err = dev_map_enqueue_multi(xdpf, dev, map,
ri->flags & BPF_F_EXCLUDE_INGRESS);
} else {
- err = dev_map_enqueue(fwd, xdp, dev);
+ err = dev_map_enqueue(fwd, xdpf, dev);
}
break;
case BPF_MAP_TYPE_CPUMAP:
- err = cpu_map_enqueue(fwd, xdp, dev);
- break;
- case BPF_MAP_TYPE_XSKMAP:
- err = __xsk_map_redirect(fwd, xdp);
+ err = cpu_map_enqueue(fwd, xdpf, dev);
break;
case BPF_MAP_TYPE_UNSPEC:
if (map_id == INT_MAX) {
@@ -3997,7 +4023,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
err = -EINVAL;
break;
}
- err = dev_xdp_enqueue(fwd, xdp, dev);
+ err = dev_xdp_enqueue(fwd, xdpf, dev);
break;
}
fallthrough;
@@ -4014,8 +4040,34 @@ err:
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
return err;
}
+
+int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ enum bpf_map_type map_type = ri->map_type;
+
+ if (map_type == BPF_MAP_TYPE_XSKMAP)
+ return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
+
+ return __xdp_do_redirect_frame(ri, dev, xdp_convert_buff_to_frame(xdp),
+ xdp_prog);
+}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
+int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp,
+ struct xdp_frame *xdpf, struct bpf_prog *xdp_prog)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ enum bpf_map_type map_type = ri->map_type;
+
+ if (map_type == BPF_MAP_TYPE_XSKMAP)
+ return __xdp_do_redirect_xsk(ri, dev, xdp, xdp_prog);
+
+ return __xdp_do_redirect_frame(ri, dev, xdpf, xdp_prog);
+}
+EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
+
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
@@ -4174,7 +4226,7 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
@@ -4188,7 +4240,7 @@ const struct bpf_func_proto bpf_skb_output_proto = {
.arg1_btf_id = &bpf_skb_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
@@ -4371,7 +4423,7 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
@@ -4397,7 +4449,7 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
};
@@ -4567,7 +4619,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
@@ -4581,7 +4633,7 @@ const struct bpf_func_proto bpf_xdp_output_proto = {
.arg1_btf_id = &bpf_xdp_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
@@ -4742,12 +4794,14 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case SO_RCVBUF:
val = min_t(u32, val, sysctl_rmem_max);
+ val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
WRITE_ONCE(sk->sk_rcvbuf,
max_t(int, val * 2, SOCK_MIN_RCVBUF));
break;
case SO_SNDBUF:
val = min_t(u32, val, sysctl_wmem_max);
+ val = min_t(int, val, INT_MAX / 2);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
WRITE_ONCE(sk->sk_sndbuf,
max_t(int, val * 2, SOCK_MIN_SNDBUF));
@@ -4968,6 +5022,12 @@ static int _bpf_getsockopt(struct sock *sk, int level, int optname,
goto err_clear;
switch (optname) {
+ case SO_RCVBUF:
+ *((int *)optval) = sk->sk_rcvbuf;
+ break;
+ case SO_SNDBUF:
+ *((int *)optval) = sk->sk_sndbuf;
+ break;
case SO_MARK:
*((int *)optval) = sk->sk_mark;
break;
@@ -5067,7 +5127,7 @@ const struct bpf_func_proto bpf_sk_setsockopt_proto = {
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
@@ -5101,7 +5161,7 @@ static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
@@ -5135,7 +5195,7 @@ static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
@@ -5310,7 +5370,7 @@ static const struct bpf_func_proto bpf_bind_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
};
@@ -5898,7 +5958,7 @@ static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg4_type = ARG_CONST_SIZE
};
@@ -5908,7 +5968,7 @@ static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg4_type = ARG_CONST_SIZE
};
@@ -5951,7 +6011,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg4_type = ARG_CONST_SIZE
};
@@ -6039,7 +6099,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
- .arg3_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg4_type = ARG_CONST_SIZE
};
@@ -6264,7 +6324,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6283,7 +6343,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6302,7 +6362,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6339,7 +6399,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6362,7 +6422,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6385,7 +6445,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
.pkt_access = true,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6404,7 +6464,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6423,7 +6483,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6442,7 +6502,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_SOCKET_OR_NULL,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
.arg5_type = ARG_ANYTHING,
@@ -6755,9 +6815,9 @@ static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
.pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
@@ -6824,9 +6884,9 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
.pkt_access = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
- .arg4_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg5_type = ARG_CONST_SIZE,
};
@@ -7055,7 +7115,7 @@ static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_ANYTHING,
};
@@ -8029,7 +8089,7 @@ static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
* (Fast-path, otherwise approximation that we might be
* a clone, do the rest in helper.)
*/
- *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
+ *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET);
*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
@@ -8181,13 +8241,13 @@ static bool xdp_is_valid_access(int off, int size,
return __is_valid_xdp_access(off, size);
}
-void bpf_warn_invalid_xdp_action(u32 act)
+void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act)
{
const u32 act_max = XDP_REDIRECT;
- WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
- act > act_max ? "Illegal" : "Driver unsupported",
- act);
+ pr_warn_once("%s XDP return value %u on prog %s (id %d) dev %s, expect packet loss!\n",
+ act > act_max ? "Illegal" : "Driver unsupported",
+ act, prog->aux->name, prog->aux->id, dev ? dev->name : "N/A");
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
@@ -8617,7 +8677,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, pkt_type):
*target_size = 1;
*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
- PKT_TYPE_OFFSET());
+ PKT_TYPE_OFFSET);
*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
#ifdef __BIG_ENDIAN_BITFIELD
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
@@ -8642,7 +8702,7 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, vlan_present):
*target_size = 1;
*insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
- PKT_VLAN_PRESENT_OFFSET());
+ PKT_VLAN_PRESENT_OFFSET);
if (PKT_VLAN_PRESENT_BIT)
*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
if (PKT_VLAN_PRESENT_BIT < 7)
@@ -10543,6 +10603,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
case bpf_ctx_range(struct bpf_sk_lookup, local_port):
+ case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex):
bpf_ctx_record_field_size(info, sizeof(__u32));
return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
@@ -10632,6 +10693,12 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(struct bpf_sk_lookup_kern,
dport, 2, target_size));
break;
+
+ case offsetof(struct bpf_sk_lookup, ingress_ifindex):
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
+ bpf_target_off(struct bpf_sk_lookup_kern,
+ ingress_ifindex, 4, target_size));
+ break;
}
return insn - insn_buf;
@@ -10656,14 +10723,10 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
}
-#ifdef CONFIG_DEBUG_INFO_BTF
-BTF_ID_LIST_GLOBAL(btf_sock_ids)
+BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE)
#define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type)
BTF_SOCK_TYPE_xxx
#undef BTF_SOCK_TYPE
-#else
-u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
-#endif
BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
{
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 1b094c481f1d..15833e1d6ea1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -5,6 +5,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
+#include <linux/filter.h>
#include <net/dsa.h>
#include <net/dst_metadata.h>
#include <net/ip.h>
@@ -1461,7 +1462,7 @@ out_bad:
}
EXPORT_SYMBOL(__skb_flow_dissect);
-static siphash_key_t hashrnd __read_mostly;
+static siphash_aligned_key_t hashrnd;
static __always_inline void __flow_hash_secret_init(void)
{
net_get_random_once(&hashrnd, sizeof(hashrnd));
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index 6beaea13564a..73f68d4625f3 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <net/act_api.h>
#include <net/flow_offload.h>
#include <linux/rtnetlink.h>
#include <linux/mutex.h>
@@ -27,6 +28,26 @@ struct flow_rule *flow_rule_alloc(unsigned int num_actions)
}
EXPORT_SYMBOL(flow_rule_alloc);
+struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
+{
+ struct flow_offload_action *fl_action;
+ int i;
+
+ fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
+ GFP_KERNEL);
+ if (!fl_action)
+ return NULL;
+
+ fl_action->action.num_entries = num_actions;
+ /* Pre-fill each action hw_stats with DONT_CARE.
+ * Caller can override this if it wants stats for a given action.
+ */
+ for (i = 0; i < num_actions; i++)
+ fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
+
+ return fl_action;
+}
+
#define FLOW_DISSECTOR_MATCH(__rule, __type, __out) \
const struct flow_match *__m = &(__rule)->match; \
struct flow_dissector *__d = (__m)->dissector; \
@@ -397,6 +418,8 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
existing_qdiscs_register(cb, cb_priv);
mutex_unlock(&flow_indr_block_lock);
+ tcf_action_reoffload_cb(cb, cb_priv, true);
+
return 0;
}
EXPORT_SYMBOL(flow_indr_dev_register);
@@ -449,6 +472,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
mutex_unlock(&flow_indr_block_lock);
+ tcf_action_reoffload_cb(cb, cb_priv, false);
flow_block_indr_notify(&cleanup_list);
kfree(indr_dev);
}
@@ -549,19 +573,25 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct flow_indr_dev *this;
+ u32 count = 0;
+ int err;
mutex_lock(&flow_indr_block_lock);
+ if (bo) {
+ if (bo->command == FLOW_BLOCK_BIND)
+ indir_dev_add(data, dev, sch, type, cleanup, bo);
+ else if (bo->command == FLOW_BLOCK_UNBIND)
+ indir_dev_remove(data);
+ }
- if (bo->command == FLOW_BLOCK_BIND)
- indir_dev_add(data, dev, sch, type, cleanup, bo);
- else if (bo->command == FLOW_BLOCK_UNBIND)
- indir_dev_remove(data);
-
- list_for_each_entry(this, &flow_block_indr_dev_list, list)
- this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
+ list_for_each_entry(this, &flow_block_indr_dev_list, list) {
+ err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
+ if (!err)
+ count++;
+ }
mutex_unlock(&flow_indr_block_lock);
- return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
+ return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
}
EXPORT_SYMBOL(flow_indr_dev_setup_offload);
diff --git a/net/core/gro.c b/net/core/gro.c
new file mode 100644
index 000000000000..a11b286d1495
--- /dev/null
+++ b/net/core/gro.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/gro.h>
+#include <net/dst_metadata.h>
+#include <net/busy_poll.h>
+#include <trace/events/net.h>
+
+#define MAX_GRO_SKBS 8
+
+/* This should be increased if a protocol with a bigger head is added. */
+#define GRO_MAX_HEAD (MAX_HEADER + 128)
+
+static DEFINE_SPINLOCK(offload_lock);
+static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
+/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
+int gro_normal_batch __read_mostly = 8;
+
+/**
+ * dev_add_offload - register offload handlers
+ * @po: protocol offload declaration
+ *
+ * Add protocol offload handlers to the networking stack. The passed
+ * &proto_offload is linked into kernel lists and may not be freed until
+ * it has been removed from the kernel lists.
+ *
+ * This call does not sleep therefore it can not
+ * guarantee all CPU's that are in middle of receiving packets
+ * will see the new offload handlers (until the next received packet).
+ */
+void dev_add_offload(struct packet_offload *po)
+{
+ struct packet_offload *elem;
+
+ spin_lock(&offload_lock);
+ list_for_each_entry(elem, &offload_base, list) {
+ if (po->priority < elem->priority)
+ break;
+ }
+ list_add_rcu(&po->list, elem->list.prev);
+ spin_unlock(&offload_lock);
+}
+EXPORT_SYMBOL(dev_add_offload);
+
+/**
+ * __dev_remove_offload - remove offload handler
+ * @po: packet offload declaration
+ *
+ * Remove a protocol offload handler that was previously added to the
+ * kernel offload handlers by dev_add_offload(). The passed &offload_type
+ * is removed from the kernel lists and can be freed or reused once this
+ * function returns.
+ *
+ * The packet type might still be in use by receivers
+ * and must not be freed until after all the CPU's have gone
+ * through a quiescent state.
+ */
+static void __dev_remove_offload(struct packet_offload *po)
+{
+ struct list_head *head = &offload_base;
+ struct packet_offload *po1;
+
+ spin_lock(&offload_lock);
+
+ list_for_each_entry(po1, head, list) {
+ if (po == po1) {
+ list_del_rcu(&po->list);
+ goto out;
+ }
+ }
+
+ pr_warn("dev_remove_offload: %p not found\n", po);
+out:
+ spin_unlock(&offload_lock);
+}
+
+/**
+ * dev_remove_offload - remove packet offload handler
+ * @po: packet offload declaration
+ *
+ * Remove a packet offload handler that was previously added to the kernel
+ * offload handlers by dev_add_offload(). The passed &offload_type is
+ * removed from the kernel lists and can be freed or reused once this
+ * function returns.
+ *
+ * This call sleeps to guarantee that no CPU is looking at the packet
+ * type after return.
+ */
+void dev_remove_offload(struct packet_offload *po)
+{
+ __dev_remove_offload(po);
+
+ synchronize_net();
+}
+EXPORT_SYMBOL(dev_remove_offload);
+
+/**
+ * skb_mac_gso_segment - mac layer segmentation handler.
+ * @skb: buffer to segment
+ * @features: features for the output path (see dev->features)
+ */
+struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
+ netdev_features_t features)
+{
+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+ struct packet_offload *ptype;
+ int vlan_depth = skb->mac_len;
+ __be16 type = skb_network_protocol(skb, &vlan_depth);
+
+ if (unlikely(!type))
+ return ERR_PTR(-EINVAL);
+
+ __skb_pull(skb, vlan_depth);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &offload_base, list) {
+ if (ptype->type == type && ptype->callbacks.gso_segment) {
+ segs = ptype->callbacks.gso_segment(skb, features);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+
+ return segs;
+}
+EXPORT_SYMBOL(skb_mac_gso_segment);
+
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
+{
+ struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
+ unsigned int offset = skb_gro_offset(skb);
+ unsigned int headlen = skb_headlen(skb);
+ unsigned int len = skb_gro_len(skb);
+ unsigned int delta_truesize;
+ unsigned int gro_max_size;
+ unsigned int new_truesize;
+ struct sk_buff *lp;
+
+ /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
+ gro_max_size = READ_ONCE(p->dev->gro_max_size);
+
+ if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
+ return -E2BIG;
+
+ lp = NAPI_GRO_CB(p)->last;
+ pinfo = skb_shinfo(lp);
+
+ if (headlen <= offset) {
+ skb_frag_t *frag;
+ skb_frag_t *frag2;
+ int i = skbinfo->nr_frags;
+ int nr_frags = pinfo->nr_frags + i;
+
+ if (nr_frags > MAX_SKB_FRAGS)
+ goto merge;
+
+ offset -= headlen;
+ pinfo->nr_frags = nr_frags;
+ skbinfo->nr_frags = 0;
+
+ frag = pinfo->frags + nr_frags;
+ frag2 = skbinfo->frags + i;
+ do {
+ *--frag = *--frag2;
+ } while (--i);
+
+ skb_frag_off_add(frag, offset);
+ skb_frag_size_sub(frag, offset);
+
+ /* all fragments truesize : remove (head size + sk_buff) */
+ new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
+ delta_truesize = skb->truesize - new_truesize;
+
+ skb->truesize = new_truesize;
+ skb->len -= skb->data_len;
+ skb->data_len = 0;
+
+ NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
+ goto done;
+ } else if (skb->head_frag) {
+ int nr_frags = pinfo->nr_frags;
+ skb_frag_t *frag = pinfo->frags + nr_frags;
+ struct page *page = virt_to_head_page(skb->head);
+ unsigned int first_size = headlen - offset;
+ unsigned int first_offset;
+
+ if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
+ goto merge;
+
+ first_offset = skb->data -
+ (unsigned char *)page_address(page) +
+ offset;
+
+ pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
+
+ __skb_frag_set_page(frag, page);
+ skb_frag_off_set(frag, first_offset);
+ skb_frag_size_set(frag, first_size);
+
+ memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
+ /* We dont need to clear skbinfo->nr_frags here */
+
+ new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
+ delta_truesize = skb->truesize - new_truesize;
+ skb->truesize = new_truesize;
+ NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
+ goto done;
+ }
+
+merge:
+ /* sk owenrship - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
+ delta_truesize = skb->truesize;
+ if (offset > headlen) {
+ unsigned int eat = offset - headlen;
+
+ skb_frag_off_add(&skbinfo->frags[0], eat);
+ skb_frag_size_sub(&skbinfo->frags[0], eat);
+ skb->data_len -= eat;
+ skb->len -= eat;
+ offset = headlen;
+ }
+
+ __skb_pull(skb, offset);
+
+ if (NAPI_GRO_CB(p)->last == p)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
+ NAPI_GRO_CB(p)->last = skb;
+ __skb_header_release(skb);
+ lp = p;
+
+done:
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += len;
+ p->truesize += delta_truesize;
+ p->len += len;
+ if (lp != p) {
+ lp->data_len += len;
+ lp->truesize += delta_truesize;
+ lp->len += len;
+ }
+ NAPI_GRO_CB(skb)->same_flow = 1;
+ return 0;
+}
+
+
+static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
+{
+ struct packet_offload *ptype;
+ __be16 type = skb->protocol;
+ struct list_head *head = &offload_base;
+ int err = -ENOENT;
+
+ BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
+
+ if (NAPI_GRO_CB(skb)->count == 1) {
+ skb_shinfo(skb)->gso_size = 0;
+ goto out;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_complete)
+ continue;
+
+ err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
+ ipv6_gro_complete, inet_gro_complete,
+ skb, 0);
+ break;
+ }
+ rcu_read_unlock();
+
+ if (err) {
+ WARN_ON(&ptype->list == head);
+ kfree_skb(skb);
+ return;
+ }
+
+out:
+ gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
+}
+
+static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
+ bool flush_old)
+{
+ struct list_head *head = &napi->gro_hash[index].list;
+ struct sk_buff *skb, *p;
+
+ list_for_each_entry_safe_reverse(skb, p, head, list) {
+ if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
+ return;
+ skb_list_del_init(skb);
+ napi_gro_complete(napi, skb);
+ napi->gro_hash[index].count--;
+ }
+
+ if (!napi->gro_hash[index].count)
+ __clear_bit(index, &napi->gro_bitmask);
+}
+
+/* napi->gro_hash[].list contains packets ordered by age.
+ * youngest packets at the head of it.
+ * Complete skbs in reverse order to reduce latencies.
+ */
+void napi_gro_flush(struct napi_struct *napi, bool flush_old)
+{
+ unsigned long bitmask = napi->gro_bitmask;
+ unsigned int i, base = ~0U;
+
+ while ((i = ffs(bitmask)) != 0) {
+ bitmask >>= i;
+ base += i;
+ __napi_gro_flush_chain(napi, base, flush_old);
+ }
+}
+EXPORT_SYMBOL(napi_gro_flush);
+
+static void gro_list_prepare(const struct list_head *head,
+ const struct sk_buff *skb)
+{
+ unsigned int maclen = skb->dev->hard_header_len;
+ u32 hash = skb_get_hash_raw(skb);
+ struct sk_buff *p;
+
+ list_for_each_entry(p, head, list) {
+ unsigned long diffs;
+
+ NAPI_GRO_CB(p)->flush = 0;
+
+ if (hash != skb_get_hash_raw(p)) {
+ NAPI_GRO_CB(p)->same_flow = 0;
+ continue;
+ }
+
+ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
+ if (skb_vlan_tag_present(p))
+ diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
+ diffs |= skb_metadata_differs(p, skb);
+ if (maclen == ETH_HLEN)
+ diffs |= compare_ether_header(skb_mac_header(p),
+ skb_mac_header(skb));
+ else if (!diffs)
+ diffs = memcmp(skb_mac_header(p),
+ skb_mac_header(skb),
+ maclen);
+
+ /* in most common scenarions 'slow_gro' is 0
+ * otherwise we are already on some slower paths
+ * either skip all the infrequent tests altogether or
+ * avoid trying too hard to skip each of them individually
+ */
+ if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
+#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ struct tc_skb_ext *skb_ext;
+ struct tc_skb_ext *p_ext;
+#endif
+
+ diffs |= p->sk != skb->sk;
+ diffs |= skb_metadata_dst_cmp(p, skb);
+ diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
+
+#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ skb_ext = skb_ext_find(skb, TC_SKB_EXT);
+ p_ext = skb_ext_find(p, TC_SKB_EXT);
+
+ diffs |= (!!p_ext) ^ (!!skb_ext);
+ if (!diffs && unlikely(skb_ext))
+ diffs |= p_ext->chain ^ skb_ext->chain;
+#endif
+ }
+
+ NAPI_GRO_CB(p)->same_flow = !diffs;
+ }
+}
+
+static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
+{
+ const struct skb_shared_info *pinfo = skb_shinfo(skb);
+ const skb_frag_t *frag0 = &pinfo->frags[0];
+
+ NAPI_GRO_CB(skb)->data_offset = 0;
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+
+ if (!skb_headlen(skb) && pinfo->nr_frags &&
+ !PageHighMem(skb_frag_page(frag0)) &&
+ (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
+ NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
+ NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+ skb_frag_size(frag0),
+ skb->end - skb->tail);
+ }
+}
+
+static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
+{
+ struct skb_shared_info *pinfo = skb_shinfo(skb);
+
+ BUG_ON(skb->end - skb->tail < grow);
+
+ memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
+
+ skb->data_len -= grow;
+ skb->tail += grow;
+
+ skb_frag_off_add(&pinfo->frags[0], grow);
+ skb_frag_size_sub(&pinfo->frags[0], grow);
+
+ if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
+ skb_frag_unref(skb, 0);
+ memmove(pinfo->frags, pinfo->frags + 1,
+ --pinfo->nr_frags * sizeof(pinfo->frags[0]));
+ }
+}
+
+static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
+{
+ struct sk_buff *oldest;
+
+ oldest = list_last_entry(head, struct sk_buff, list);
+
+ /* We are called with head length >= MAX_GRO_SKBS, so this is
+ * impossible.
+ */
+ if (WARN_ON_ONCE(!oldest))
+ return;
+
+ /* Do not adjust napi->gro_hash[].count, caller is adding a new
+ * SKB to the chain.
+ */
+ skb_list_del_init(oldest);
+ napi_gro_complete(napi, oldest);
+}
+
+static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
+ struct gro_list *gro_list = &napi->gro_hash[bucket];
+ struct list_head *head = &offload_base;
+ struct packet_offload *ptype;
+ __be16 type = skb->protocol;
+ struct sk_buff *pp = NULL;
+ enum gro_result ret;
+ int same_flow;
+ int grow;
+
+ if (netif_elide_gro(skb->dev))
+ goto normal;
+
+ gro_list_prepare(&gro_list->list, skb);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_receive)
+ continue;
+
+ skb_set_network_header(skb, skb_gro_offset(skb));
+ skb_reset_mac_len(skb);
+ NAPI_GRO_CB(skb)->same_flow = 0;
+ NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
+ NAPI_GRO_CB(skb)->free = 0;
+ NAPI_GRO_CB(skb)->encap_mark = 0;
+ NAPI_GRO_CB(skb)->recursion_counter = 0;
+ NAPI_GRO_CB(skb)->is_fou = 0;
+ NAPI_GRO_CB(skb)->is_atomic = 1;
+ NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
+
+ /* Setup for GRO checksum validation */
+ switch (skb->ip_summed) {
+ case CHECKSUM_COMPLETE:
+ NAPI_GRO_CB(skb)->csum = skb->csum;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+ NAPI_GRO_CB(skb)->csum_cnt = 0;
+ break;
+ case CHECKSUM_UNNECESSARY:
+ NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
+ NAPI_GRO_CB(skb)->csum_valid = 0;
+ break;
+ default:
+ NAPI_GRO_CB(skb)->csum_cnt = 0;
+ NAPI_GRO_CB(skb)->csum_valid = 0;
+ }
+
+ pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
+ ipv6_gro_receive, inet_gro_receive,
+ &gro_list->list, skb);
+ break;
+ }
+ rcu_read_unlock();
+
+ if (&ptype->list == head)
+ goto normal;
+
+ if (PTR_ERR(pp) == -EINPROGRESS) {
+ ret = GRO_CONSUMED;
+ goto ok;
+ }
+
+ same_flow = NAPI_GRO_CB(skb)->same_flow;
+ ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
+
+ if (pp) {
+ skb_list_del_init(pp);
+ napi_gro_complete(napi, pp);
+ gro_list->count--;
+ }
+
+ if (same_flow)
+ goto ok;
+
+ if (NAPI_GRO_CB(skb)->flush)
+ goto normal;
+
+ if (unlikely(gro_list->count >= MAX_GRO_SKBS))
+ gro_flush_oldest(napi, &gro_list->list);
+ else
+ gro_list->count++;
+
+ NAPI_GRO_CB(skb)->count = 1;
+ NAPI_GRO_CB(skb)->age = jiffies;
+ NAPI_GRO_CB(skb)->last = skb;
+ skb_shinfo(skb)->gso_size = skb_gro_len(skb);
+ list_add(&skb->list, &gro_list->list);
+ ret = GRO_HELD;
+
+pull:
+ grow = skb_gro_offset(skb) - skb_headlen(skb);
+ if (grow > 0)
+ gro_pull_from_frag0(skb, grow);
+ok:
+ if (gro_list->count) {
+ if (!test_bit(bucket, &napi->gro_bitmask))
+ __set_bit(bucket, &napi->gro_bitmask);
+ } else if (test_bit(bucket, &napi->gro_bitmask)) {
+ __clear_bit(bucket, &napi->gro_bitmask);
+ }
+
+ return ret;
+
+normal:
+ ret = GRO_NORMAL;
+ goto pull;
+}
+
+struct packet_offload *gro_find_receive_by_type(__be16 type)
+{
+ struct list_head *offload_head = &offload_base;
+ struct packet_offload *ptype;
+
+ list_for_each_entry_rcu(ptype, offload_head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_receive)
+ continue;
+ return ptype;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(gro_find_receive_by_type);
+
+struct packet_offload *gro_find_complete_by_type(__be16 type)
+{
+ struct list_head *offload_head = &offload_base;
+ struct packet_offload *ptype;
+
+ list_for_each_entry_rcu(ptype, offload_head, list) {
+ if (ptype->type != type || !ptype->callbacks.gro_complete)
+ continue;
+ return ptype;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(gro_find_complete_by_type);
+
+static gro_result_t napi_skb_finish(struct napi_struct *napi,
+ struct sk_buff *skb,
+ gro_result_t ret)
+{
+ switch (ret) {
+ case GRO_NORMAL:
+ gro_normal_one(napi, skb, 1);
+ break;
+
+ case GRO_MERGED_FREE:
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ __kfree_skb(skb);
+ else
+ __kfree_skb_defer(skb);
+ break;
+
+ case GRO_HELD:
+ case GRO_MERGED:
+ case GRO_CONSUMED:
+ break;
+ }
+
+ return ret;
+}
+
+gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ gro_result_t ret;
+
+ skb_mark_napi_id(skb, napi);
+ trace_napi_gro_receive_entry(skb);
+
+ skb_gro_reset_offset(skb, 0);
+
+ ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
+ trace_napi_gro_receive_exit(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(napi_gro_receive);
+
+static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+{
+ if (unlikely(skb->pfmemalloc)) {
+ consume_skb(skb);
+ return;
+ }
+ __skb_pull(skb, skb_headlen(skb));
+ /* restore the reserve we had after netdev_alloc_skb_ip_align() */
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
+ __vlan_hwaccel_clear_tag(skb);
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
+
+ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+ skb->pkt_type = PACKET_HOST;
+
+ skb->encapsulation = 0;
+ skb_shinfo(skb)->gso_type = 0;
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+ if (unlikely(skb->slow_gro)) {
+ skb_orphan(skb);
+ skb_ext_reset(skb);
+ nf_reset_ct(skb);
+ skb->slow_gro = 0;
+ }
+
+ napi->skb = skb;
+}
+
+struct sk_buff *napi_get_frags(struct napi_struct *napi)
+{
+ struct sk_buff *skb = napi->skb;
+
+ if (!skb) {
+ skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
+ if (skb) {
+ napi->skb = skb;
+ skb_mark_napi_id(skb, napi);
+ }
+ }
+ return skb;
+}
+EXPORT_SYMBOL(napi_get_frags);
+
+static gro_result_t napi_frags_finish(struct napi_struct *napi,
+ struct sk_buff *skb,
+ gro_result_t ret)
+{
+ switch (ret) {
+ case GRO_NORMAL:
+ case GRO_HELD:
+ __skb_push(skb, ETH_HLEN);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (ret == GRO_NORMAL)
+ gro_normal_one(napi, skb, 1);
+ break;
+
+ case GRO_MERGED_FREE:
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else
+ napi_reuse_skb(napi, skb);
+ break;
+
+ case GRO_MERGED:
+ case GRO_CONSUMED:
+ break;
+ }
+
+ return ret;
+}
+
+/* Upper GRO stack assumes network header starts at gro_offset=0
+ * Drivers could call both napi_gro_frags() and napi_gro_receive()
+ * We copy ethernet header into skb->data to have a common layout.
+ */
+static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
+{
+ struct sk_buff *skb = napi->skb;
+ const struct ethhdr *eth;
+ unsigned int hlen = sizeof(*eth);
+
+ napi->skb = NULL;
+
+ skb_reset_mac_header(skb);
+ skb_gro_reset_offset(skb, hlen);
+
+ if (unlikely(skb_gro_header_hard(skb, hlen))) {
+ eth = skb_gro_header_slow(skb, hlen, 0);
+ if (unlikely(!eth)) {
+ net_warn_ratelimited("%s: dropping impossible skb from %s\n",
+ __func__, napi->dev->name);
+ napi_reuse_skb(napi, skb);
+ return NULL;
+ }
+ } else {
+ eth = (const struct ethhdr *)skb->data;
+ gro_pull_from_frag0(skb, hlen);
+ NAPI_GRO_CB(skb)->frag0 += hlen;
+ NAPI_GRO_CB(skb)->frag0_len -= hlen;
+ }
+ __skb_pull(skb, hlen);
+
+ /*
+ * This works because the only protocols we care about don't require
+ * special handling.
+ * We'll fix it up properly in napi_frags_finish()
+ */
+ skb->protocol = eth->h_proto;
+
+ return skb;
+}
+
+gro_result_t napi_gro_frags(struct napi_struct *napi)
+{
+ gro_result_t ret;
+ struct sk_buff *skb = napi_frags_skb(napi);
+
+ trace_napi_gro_frags_entry(skb);
+
+ ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
+ trace_napi_gro_frags_exit(ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(napi_gro_frags);
+
+/* Compute the checksum from gro_offset and return the folded value
+ * after adding in any pseudo checksum.
+ */
+__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
+{
+ __wsum wsum;
+ __sum16 sum;
+
+ wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
+
+ /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
+ sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
+ /* See comments in __skb_checksum_complete(). */
+ if (likely(!sum)) {
+ if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
+ !skb->csum_complete_sw)
+ netdev_rx_csum_fault(skb->dev, skb);
+ }
+
+ NAPI_GRO_CB(skb)->csum = wsum;
+ NAPI_GRO_CB(skb)->csum_valid = 1;
+
+ return sum;
+}
+EXPORT_SYMBOL(__skb_gro_checksum_complete);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 1a455847da54..b0f5344d1185 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -55,7 +55,7 @@ static void rfc2863_policy(struct net_device *dev)
if (operstate == dev->operstate)
return;
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
switch(dev->link_mode) {
case IF_LINK_MODE_TESTING:
@@ -74,7 +74,7 @@ static void rfc2863_policy(struct net_device *dev)
dev->operstate = operstate;
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
}
@@ -109,7 +109,7 @@ static void linkwatch_add_event(struct net_device *dev)
spin_lock_irqsave(&lweventlist_lock, flags);
if (list_empty(&dev->link_watch_list)) {
list_add_tail(&dev->link_watch_list, &lweventlist);
- dev_hold(dev);
+ dev_hold_track(dev, &dev->linkwatch_dev_tracker, GFP_ATOMIC);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
}
@@ -166,6 +166,9 @@ static void linkwatch_do_dev(struct net_device *dev)
netdev_state_change(dev);
}
+ /* Note: our callers are responsible for
+ * calling netdev_tracker_free().
+ */
dev_put(dev);
}
@@ -209,6 +212,10 @@ static void __linkwatch_run_queue(int urgent_only)
list_add_tail(&dev->link_watch_list, &lweventlist);
continue;
}
+ /* We must free netdev tracker under
+ * the spinlock protection.
+ */
+ netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
spin_unlock_irq(&lweventlist_lock);
linkwatch_do_dev(dev);
do_dev--;
@@ -232,6 +239,10 @@ void linkwatch_forget_dev(struct net_device *dev)
if (!list_empty(&dev->link_watch_list)) {
list_del_init(&dev->link_watch_list);
clean = 1;
+ /* We must release netdev tracker under
+ * the spinlock protection.
+ */
+ netdev_tracker_free(dev, &dev->linkwatch_dev_tracker);
}
spin_unlock_irqrestore(&lweventlist_lock, flags);
if (clean)
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 2f7940bcf715..349480ef68a5 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
*/
+#include <linux/filter.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/skbuff.h>
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index 2820aca2173a..9ccd64e8a666 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -197,6 +197,10 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining,
nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla_entype) {
+ if (nla_len(nla_entype) < sizeof(u16)) {
+ NL_SET_ERR_MSG(extack, "Invalid RTA_ENCAP_TYPE");
+ return -EINVAL;
+ }
encap_type = nla_get_u16(nla_entype);
if (lwtunnel_valid_encap_type(encap_type,
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index dda12fbd177b..213cb7b26b7a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -624,7 +624,7 @@ ___neigh_create(struct neigh_table *tbl, const void *pkey,
memcpy(n->primary_key, pkey, key_len);
n->dev = dev;
- dev_hold(dev);
+ dev_hold_track(dev, &n->dev_tracker, GFP_ATOMIC);
/* Protocol specific setup. */
if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
@@ -770,10 +770,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
- dev_hold(dev);
+ dev_hold_track(dev, &n->dev_tracker, GFP_KERNEL);
if (tbl->pconstructor && tbl->pconstructor(n)) {
- dev_put(dev);
+ dev_put_track(dev, &n->dev_tracker);
kfree(n);
n = NULL;
goto out;
@@ -805,7 +805,7 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
write_unlock_bh(&tbl->lock);
if (tbl->pdestructor)
tbl->pdestructor(n);
- dev_put(n->dev);
+ dev_put_track(n->dev, &n->dev_tracker);
kfree(n);
return 0;
}
@@ -838,7 +838,7 @@ static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
n->next = NULL;
if (tbl->pdestructor)
tbl->pdestructor(n);
- dev_put(n->dev);
+ dev_put_track(n->dev, &n->dev_tracker);
kfree(n);
}
return -ENOENT;
@@ -879,7 +879,7 @@ void neigh_destroy(struct neighbour *neigh)
if (dev->netdev_ops->ndo_neigh_destroy)
dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
- dev_put(dev);
+ dev_put_track(dev, &neigh->dev_tracker);
neigh_parms_put(neigh->parms);
neigh_dbg(2, "neigh %p is destroyed\n", neigh);
@@ -1665,13 +1665,13 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
refcount_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
- dev_hold(dev);
+ dev_hold_track(dev, &p->dev_tracker, GFP_KERNEL);
p->dev = dev;
write_pnet(&p->net, net);
p->sysctl_table = NULL;
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
- dev_put(dev);
+ dev_put_track(dev, &p->dev_tracker);
kfree(p);
return NULL;
}
@@ -1702,7 +1702,7 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
list_del(&parms->list);
parms->dead = 1;
write_unlock_bh(&tbl->lock);
- dev_put(parms->dev);
+ dev_put_track(parms->dev, &parms->dev_tracker);
call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
}
EXPORT_SYMBOL(neigh_parms_release);
@@ -3770,10 +3770,6 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
neigh_proc_base_reachable_time;
}
- /* Don't export sysctls to unprivileged users */
- if (neigh_parms_net(p)->user_ns != &init_user_ns)
- t->neigh_vars[0].procname = NULL;
-
switch (neigh_parms_family(p)) {
case AF_INET:
p_name = "ipv4";
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9c01c642cf9e..53ea262ecafd 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -488,14 +488,6 @@ static ssize_t proto_down_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
- struct net_device *netdev = to_net_dev(dev);
-
- /* The check is also done in change_proto_down; this helps returning
- * early without hitting the trylock/restart in netdev_store.
- */
- if (!netdev->netdev_ops->ndo_change_proto_down)
- return -EOPNOTSUPP;
-
return netdev_store(dev, attr, buf, len, change_proto_down);
}
NETDEVICE_SHOW_RW(proto_down, fmt_dec);
@@ -1012,7 +1004,7 @@ static void rx_queue_release(struct kobject *kobj)
#endif
memset(kobj, 0, sizeof(*kobj));
- dev_put(queue->dev);
+ dev_put_track(queue->dev, &queue->dev_tracker);
}
static const void *rx_queue_namespace(struct kobject *kobj)
@@ -1052,7 +1044,7 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
/* Kobject_put later will trigger rx_queue_release call which
* decreases dev refcount: Take that reference here
*/
- dev_hold(queue->dev);
+ dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
@@ -1201,11 +1193,7 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = {
static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
{
- unsigned long trans_timeout;
-
- spin_lock_irq(&queue->_xmit_lock);
- trans_timeout = queue->trans_timeout;
- spin_unlock_irq(&queue->_xmit_lock);
+ unsigned long trans_timeout = atomic_long_read(&queue->trans_timeout);
return sprintf(buf, fmt_ulong, trans_timeout);
}
@@ -1452,7 +1440,7 @@ static ssize_t xps_queue_show(struct net_device *dev, unsigned int index,
for (i = map->len; i--;) {
if (map->queues[i] == index) {
- set_bit(j, mask);
+ __set_bit(j, mask);
break;
}
}
@@ -1619,7 +1607,7 @@ static void netdev_queue_release(struct kobject *kobj)
struct netdev_queue *queue = to_netdev_queue(kobj);
memset(kobj, 0, sizeof(*kobj));
- dev_put(queue->dev);
+ dev_put_track(queue->dev, &queue->dev_tracker);
}
static const void *netdev_queue_namespace(struct kobject *kobj)
@@ -1659,7 +1647,7 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
/* Kobject_put later will trigger netdev_queue_release call
* which decreases dev refcount: Take that reference here
*/
- dev_hold(queue->dev);
+ dev_hold_track(queue->dev, &queue->dev_tracker, GFP_KERNEL);
kobj->kset = dev->queues_kset;
error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
@@ -1706,6 +1694,13 @@ netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
int i;
int error = 0;
+ /* Tx queue kobjects are allowed to be updated when a device is being
+ * unregistered, but solely to remove queues from qdiscs. Any path
+ * adding queues should be fixed.
+ */
+ WARN(dev->reg_state == NETREG_UNREGISTERING && new_num > old_num,
+ "New queues can't be registered after device unregistration.");
+
for (i = old_num; i < new_num; i++) {
error = netdev_queue_add_kobject(dev, i);
if (error) {
@@ -1820,6 +1815,9 @@ static void remove_queue_kobjects(struct net_device *dev)
net_rx_queue_update_kobjects(dev, real_rx, 0);
netdev_queue_update_kobjects(dev, real_tx, 0);
+
+ dev->real_num_rx_queues = 0;
+ dev->real_num_tx_queues = 0;
#ifdef CONFIG_SYSFS
kset_unregister(dev->queues_kset);
#endif
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 202fa5eacd0f..9b7171c40434 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -311,6 +311,8 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
LIST_HEAD(net_exit_list);
refcount_set(&net->ns.count, 1);
+ ref_tracker_dir_init(&net->refcnt_tracker, 128);
+
refcount_set(&net->passive, 1);
get_random_bytes(&net->hash_mix, sizeof(u32));
preempt_disable();
@@ -635,6 +637,7 @@ static DECLARE_WORK(net_cleanup_work, cleanup_net);
void __put_net(struct net *net)
{
+ ref_tracker_dir_exit(&net->refcnt_tracker);
/* Cleanup the network namespace in process context */
if (llist_add(&net->cleanup_list, &cleanup_list))
queue_work(netns_wq, &net_cleanup_work);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index edfc0f8011f8..db724463e7cd 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -776,7 +776,7 @@ put_noaddr:
err = __netpoll_setup(np, ndev);
if (err)
goto put;
-
+ netdev_tracker_alloc(ndev, &np->dev_tracker, GFP_KERNEL);
rtnl_unlock();
return 0;
@@ -853,7 +853,7 @@ void netpoll_cleanup(struct netpoll *np)
if (!np->dev)
goto out;
__netpoll_cleanup(np);
- dev_put(np->dev);
+ dev_put_track(np->dev, &np->dev_tracker);
np->dev = NULL;
out:
rtnl_unlock();
diff --git a/net/core/of_net.c b/net/core/of_net.c
index f1a9bf7578e7..95a64c813ae5 100644
--- a/net/core/of_net.c
+++ b/net/core/of_net.c
@@ -61,7 +61,7 @@ static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr)
{
struct platform_device *pdev = of_find_device_by_node(np);
struct nvmem_cell *cell;
- const void *mac;
+ const void *buf;
size_t len;
int ret;
@@ -78,21 +78,32 @@ static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr)
if (IS_ERR(cell))
return PTR_ERR(cell);
- mac = nvmem_cell_read(cell, &len);
+ buf = nvmem_cell_read(cell, &len);
nvmem_cell_put(cell);
- if (IS_ERR(mac))
- return PTR_ERR(mac);
-
- if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
- kfree(mac);
- return -EINVAL;
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ ret = 0;
+ if (len == ETH_ALEN) {
+ if (is_valid_ether_addr(buf))
+ memcpy(addr, buf, ETH_ALEN);
+ else
+ ret = -EINVAL;
+ } else if (len == 3 * ETH_ALEN - 1) {
+ u8 mac[ETH_ALEN];
+
+ if (mac_pton(buf, mac))
+ memcpy(addr, mac, ETH_ALEN);
+ else
+ ret = -EINVAL;
+ } else {
+ ret = -EINVAL;
}
- memcpy(addr, mac, ETH_ALEN);
- kfree(mac);
+ kfree(buf);
- return 0;
+ return ret;
}
/**
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 1a6978427d6c..bd62c01a2ec3 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -130,9 +130,6 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
#endif
- /* Slower-path: Get pages from locked ring queue */
- spin_lock(&r->consumer_lock);
-
/* Refill alloc array, but only if NUMA match */
do {
page = __ptr_ring_consume(r);
@@ -157,7 +154,6 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
if (likely(pool->alloc.count > 0))
page = pool->alloc.cache[--pool->alloc.count];
- spin_unlock(&r->consumer_lock);
return page;
}
@@ -217,6 +213,8 @@ static void page_pool_set_pp_info(struct page_pool *pool,
{
page->pp = pool;
page->pp_magic |= PP_SIGNATURE;
+ if (pool->p.init_callback)
+ pool->p.init_callback(page, pool->p.init_arg);
}
static void page_pool_clear_pp_info(struct page *page)
@@ -691,10 +689,12 @@ static void page_pool_release_retry(struct work_struct *wq)
schedule_delayed_work(&pool->release_dw, DEFER_TIME);
}
-void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
+ struct xdp_mem_info *mem)
{
refcount_inc(&pool->user_cnt);
pool->disconnect = disconnect;
+ pool->xdp_mem_id = mem->id;
}
void page_pool_destroy(struct page_pool *pool)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a3d74e2704c4..560a5e712dc3 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -410,6 +410,7 @@ struct pktgen_dev {
* device name (not when the inject is
* started as it used to do.)
*/
+ netdevice_tracker dev_tracker;
char odevname[32];
struct flow_state *flows;
unsigned int cflows; /* Concurrent flows (config) */
@@ -2099,7 +2100,7 @@ static int pktgen_setup_dev(const struct pktgen_net *pn,
/* Clean old setups */
if (pkt_dev->odev) {
- dev_put(pkt_dev->odev);
+ dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker);
pkt_dev->odev = NULL;
}
@@ -2117,6 +2118,7 @@ static int pktgen_setup_dev(const struct pktgen_net *pn,
err = -ENETDOWN;
} else {
pkt_dev->odev = odev;
+ netdev_tracker_alloc(odev, &pkt_dev->dev_tracker, GFP_KERNEL);
return 0;
}
@@ -3805,7 +3807,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
return add_dev_to_thread(t, pkt_dev);
out2:
- dev_put(pkt_dev->odev);
+ dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker);
out1:
#ifdef CONFIG_XFRM
free_SAs(pkt_dev);
@@ -3899,7 +3901,7 @@ static int pktgen_remove_device(struct pktgen_thread *t,
/* Dis-associate from the interface */
if (pkt_dev->odev) {
- dev_put(pkt_dev->odev);
+ dev_put_track(pkt_dev->odev, &pkt_dev->dev_tracker);
pkt_dev->odev = NULL;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 2af8aeeadadf..e476403231f0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -842,9 +842,9 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
}
if (dev->operstate != operstate) {
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
dev->operstate = operstate;
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
netdev_state_change(dev);
}
}
@@ -1026,6 +1026,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
+ nla_total_size(4) /* IFLA_GSO_MAX_SEGS */
+ nla_total_size(4) /* IFLA_GSO_MAX_SIZE */
+ + nla_total_size(4) /* IFLA_GRO_MAX_SIZE */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
@@ -1728,6 +1729,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
nla_put_u32(skb, IFLA_GSO_MAX_SEGS, dev->gso_max_segs) ||
nla_put_u32(skb, IFLA_GSO_MAX_SIZE, dev->gso_max_size) ||
+ nla_put_u32(skb, IFLA_GRO_MAX_SIZE, dev->gro_max_size) ||
#ifdef CONFIG_RPS
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
#endif
@@ -1880,6 +1882,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PROTO_DOWN_REASON] = { .type = NLA_NESTED },
[IFLA_NEW_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
[IFLA_PARENT_DEV_NAME] = { .type = NLA_NUL_STRING },
+ [IFLA_GRO_MAX_SIZE] = { .type = NLA_U32 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -2299,6 +2302,14 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
}
}
+ if (tb[IFLA_GRO_MAX_SIZE]) {
+ u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
+
+ if (gro_max_size > GRO_MAX_SIZE) {
+ NL_SET_ERR_MSG(extack, "too big gro_max_size");
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -2539,13 +2550,12 @@ static int do_set_proto_down(struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct nlattr *pdreason[IFLA_PROTO_DOWN_REASON_MAX + 1];
- const struct net_device_ops *ops = dev->netdev_ops;
unsigned long mask = 0;
u32 value;
bool proto_down;
int err;
- if (!ops->ndo_change_proto_down) {
+ if (!(dev->priv_flags & IFF_CHANGE_PROTO_DOWN)) {
NL_SET_ERR_MSG(extack, "Protodown not supported by device");
return -EOPNOTSUPP;
}
@@ -2768,7 +2778,16 @@ static int do_setlink(const struct sk_buff *skb,
}
if (dev->gso_max_segs ^ max_segs) {
- dev->gso_max_segs = max_segs;
+ netif_set_gso_max_segs(dev, max_segs);
+ status |= DO_SETLINK_MODIFIED;
+ }
+ }
+
+ if (tb[IFLA_GRO_MAX_SIZE]) {
+ u32 gro_max_size = nla_get_u32(tb[IFLA_GRO_MAX_SIZE]);
+
+ if (dev->gro_max_size ^ gro_max_size) {
+ netif_set_gro_max_size(dev, gro_max_size);
status |= DO_SETLINK_MODIFIED;
}
}
@@ -2779,11 +2798,11 @@ static int do_setlink(const struct sk_buff *skb,
if (tb[IFLA_LINKMODE]) {
unsigned char value = nla_get_u8(tb[IFLA_LINKMODE]);
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
if (dev->link_mode ^ value)
status |= DO_SETLINK_NOTIFY;
dev->link_mode = value;
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
}
if (tb[IFLA_VFINFO_LIST]) {
@@ -3222,7 +3241,9 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
if (tb[IFLA_GSO_MAX_SIZE])
netif_set_gso_max_size(dev, nla_get_u32(tb[IFLA_GSO_MAX_SIZE]));
if (tb[IFLA_GSO_MAX_SEGS])
- dev->gso_max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
+ netif_set_gso_max_segs(dev, nla_get_u32(tb[IFLA_GSO_MAX_SEGS]));
+ if (tb[IFLA_GRO_MAX_SIZE])
+ netif_set_gro_max_size(dev, nla_get_u32(tb[IFLA_GRO_MAX_SIZE]));
return dev;
}
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index b5bc680d4755..9b8443774449 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -19,8 +19,8 @@
#include <linux/in6.h>
#include <net/tcp.h>
-static siphash_key_t net_secret __read_mostly;
-static siphash_key_t ts_secret __read_mostly;
+static siphash_aligned_key_t net_secret;
+static siphash_aligned_key_t ts_secret;
static __always_inline void net_secret_init(void)
{
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 909db87d7383..0118f0afaa4f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -759,21 +759,23 @@ void __kfree_skb(struct sk_buff *skb)
EXPORT_SYMBOL(__kfree_skb);
/**
- * kfree_skb - free an sk_buff
+ * kfree_skb_reason - free an sk_buff with special reason
* @skb: buffer to free
+ * @reason: reason why this skb is dropped
*
* Drop a reference to the buffer and free it if the usage count has
- * hit zero.
+ * hit zero. Meanwhile, pass the drop reason to 'kfree_skb'
+ * tracepoint.
*/
-void kfree_skb(struct sk_buff *skb)
+void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
{
if (!skb_unref(skb))
return;
- trace_kfree_skb(skb, __builtin_return_address(0));
+ trace_kfree_skb(skb, __builtin_return_address(0), reason);
__kfree_skb(skb);
}
-EXPORT_SYMBOL(kfree_skb);
+EXPORT_SYMBOL(kfree_skb_reason);
void kfree_skb_list(struct sk_buff *segs)
{
@@ -992,12 +994,10 @@ void napi_consume_skb(struct sk_buff *skb, int budget)
}
EXPORT_SYMBOL(napi_consume_skb);
-/* Make sure a field is enclosed inside headers_start/headers_end section */
+/* Make sure a field is contained by headers group */
#define CHECK_SKB_FIELD(field) \
- BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
- offsetof(struct sk_buff, headers_start)); \
- BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
- offsetof(struct sk_buff, headers_end)); \
+ BUILD_BUG_ON(offsetof(struct sk_buff, field) != \
+ offsetof(struct sk_buff, headers.field)); \
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
@@ -1009,14 +1009,12 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
__skb_ext_copy(new, old);
__nf_copy(new, old, false);
- /* Note : this field could be in headers_start/headers_end section
+ /* Note : this field could be in the headers group.
* It is not yet because we do not want to have a 16 bit hole
*/
new->queue_mapping = old->queue_mapping;
- memcpy(&new->headers_start, &old->headers_start,
- offsetof(struct sk_buff, headers_end) -
- offsetof(struct sk_buff, headers_start));
+ memcpy(&new->headers, &old->headers, sizeof(new->headers));
CHECK_SKB_FIELD(protocol);
CHECK_SKB_FIELD(csum);
CHECK_SKB_FIELD(hash);
@@ -2028,6 +2026,30 @@ void *skb_pull(struct sk_buff *skb, unsigned int len)
EXPORT_SYMBOL(skb_pull);
/**
+ * skb_pull_data - remove data from the start of a buffer returning its
+ * original position.
+ * @skb: buffer to use
+ * @len: amount of data to remove
+ *
+ * This function removes data from the start of a buffer, returning
+ * the memory to the headroom. A pointer to the original data in the buffer
+ * is returned after checking if there is enough data to pull. Once the
+ * data has been pulled future pushes will overwrite the old data.
+ */
+void *skb_pull_data(struct sk_buff *skb, size_t len)
+{
+ void *data = skb->data;
+
+ if (skb->len < len)
+ return NULL;
+
+ skb_pull(skb, len);
+
+ return data;
+}
+EXPORT_SYMBOL(skb_pull_data);
+
+/**
* skb_trim - remove end from a buffer
* @skb: buffer to alter
* @len: new length
@@ -3919,32 +3941,6 @@ err_linearize:
}
EXPORT_SYMBOL_GPL(skb_segment_list);
-int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
-{
- if (unlikely(p->len + skb->len >= 65536))
- return -E2BIG;
-
- if (NAPI_GRO_CB(p)->last == p)
- skb_shinfo(p)->frag_list = skb;
- else
- NAPI_GRO_CB(p)->last->next = skb;
-
- skb_pull(skb, skb_gro_offset(skb));
-
- NAPI_GRO_CB(p)->last = skb;
- NAPI_GRO_CB(p)->count++;
- p->data_len += skb->len;
-
- /* sk owenrship - if any - completely transferred to the aggregated packet */
- skb->destructor = NULL;
- p->truesize += skb->truesize;
- p->len += skb->len;
-
- NAPI_GRO_CB(skb)->same_flow = 1;
-
- return 0;
-}
-
/**
* skb_segment - Perform protocol segmentation on skb.
* @head_skb: buffer to segment
@@ -4297,122 +4293,6 @@ err:
}
EXPORT_SYMBOL_GPL(skb_segment);
-int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
-{
- struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
- unsigned int offset = skb_gro_offset(skb);
- unsigned int headlen = skb_headlen(skb);
- unsigned int len = skb_gro_len(skb);
- unsigned int delta_truesize;
- unsigned int new_truesize;
- struct sk_buff *lp;
-
- if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
- return -E2BIG;
-
- lp = NAPI_GRO_CB(p)->last;
- pinfo = skb_shinfo(lp);
-
- if (headlen <= offset) {
- skb_frag_t *frag;
- skb_frag_t *frag2;
- int i = skbinfo->nr_frags;
- int nr_frags = pinfo->nr_frags + i;
-
- if (nr_frags > MAX_SKB_FRAGS)
- goto merge;
-
- offset -= headlen;
- pinfo->nr_frags = nr_frags;
- skbinfo->nr_frags = 0;
-
- frag = pinfo->frags + nr_frags;
- frag2 = skbinfo->frags + i;
- do {
- *--frag = *--frag2;
- } while (--i);
-
- skb_frag_off_add(frag, offset);
- skb_frag_size_sub(frag, offset);
-
- /* all fragments truesize : remove (head size + sk_buff) */
- new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
- delta_truesize = skb->truesize - new_truesize;
-
- skb->truesize = new_truesize;
- skb->len -= skb->data_len;
- skb->data_len = 0;
-
- NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
- goto done;
- } else if (skb->head_frag) {
- int nr_frags = pinfo->nr_frags;
- skb_frag_t *frag = pinfo->frags + nr_frags;
- struct page *page = virt_to_head_page(skb->head);
- unsigned int first_size = headlen - offset;
- unsigned int first_offset;
-
- if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
- goto merge;
-
- first_offset = skb->data -
- (unsigned char *)page_address(page) +
- offset;
-
- pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
-
- __skb_frag_set_page(frag, page);
- skb_frag_off_set(frag, first_offset);
- skb_frag_size_set(frag, first_size);
-
- memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
- /* We dont need to clear skbinfo->nr_frags here */
-
- new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
- delta_truesize = skb->truesize - new_truesize;
- skb->truesize = new_truesize;
- NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
- goto done;
- }
-
-merge:
- /* sk owenrship - if any - completely transferred to the aggregated packet */
- skb->destructor = NULL;
- delta_truesize = skb->truesize;
- if (offset > headlen) {
- unsigned int eat = offset - headlen;
-
- skb_frag_off_add(&skbinfo->frags[0], eat);
- skb_frag_size_sub(&skbinfo->frags[0], eat);
- skb->data_len -= eat;
- skb->len -= eat;
- offset = headlen;
- }
-
- __skb_pull(skb, offset);
-
- if (NAPI_GRO_CB(p)->last == p)
- skb_shinfo(p)->frag_list = skb;
- else
- NAPI_GRO_CB(p)->last->next = skb;
- NAPI_GRO_CB(p)->last = skb;
- __skb_header_release(skb);
- lp = p;
-
-done:
- NAPI_GRO_CB(p)->count++;
- p->data_len += len;
- p->truesize += delta_truesize;
- p->len += len;
- if (lp != p) {
- lp->data_len += len;
- lp->truesize += delta_truesize;
- lp->len += len;
- }
- NAPI_GRO_CB(skb)->same_flow = 1;
- return 0;
-}
-
#ifdef CONFIG_SKB_EXTENSIONS
#define SKB_EXT_ALIGN_VALUE 8
#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
@@ -4849,8 +4729,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
serr->ee.ee_data = skb_shinfo(skb)->tskey;
- if (sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM)
+ if (sk_is_tcp(sk))
serr->ee.ee_data -= sk->sk_tskey;
}
@@ -4919,8 +4798,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
if (tsonly) {
#ifdef CONFIG_INET
if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
- sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM) {
+ sk_is_tcp(sk)) {
skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
ack_skb);
opt_stats = true;
diff --git a/net/core/sock.c b/net/core/sock.c
index 41e91d0f7061..e21485ab285d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -144,8 +144,6 @@
static DEFINE_MUTEX(proto_list_mutex);
static LIST_HEAD(proto_list);
-static void sock_inuse_add(struct net *net, int val);
-
/**
* sk_ns_capable - General socket capability test
* @sk: Socket to use a capability on or through
@@ -327,7 +325,10 @@ int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
BUG_ON(!sock_flag(sk, SOCK_MEMALLOC));
noreclaim_flag = memalloc_noreclaim_save();
- ret = sk->sk_backlog_rcv(sk, skb);
+ ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv,
+ tcp_v6_do_rcv,
+ tcp_v4_do_rcv,
+ sk, skb);
memalloc_noreclaim_restore(noreclaim_flag);
return ret;
@@ -872,8 +873,7 @@ int sock_set_timestamping(struct sock *sk, int optname,
if (val & SOF_TIMESTAMPING_OPT_ID &&
!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
- if (sk->sk_protocol == IPPROTO_TCP &&
- sk->sk_type == SOCK_STREAM) {
+ if (sk_is_tcp(sk)) {
if ((1 << sk->sk_state) &
(TCPF_CLOSE | TCPF_LISTEN))
return -EINVAL;
@@ -1135,6 +1135,7 @@ set_sndbuf:
case SO_PRIORITY:
if ((val >= 0 && val <= 6) ||
+ ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) ||
ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
sk->sk_priority = val;
else
@@ -1280,7 +1281,8 @@ set_sndbuf:
clear_bit(SOCK_PASSSEC, &sock->flags);
break;
case SO_MARK:
- if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+ if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
+ !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
ret = -EPERM;
break;
}
@@ -1370,8 +1372,7 @@ set_sndbuf:
case SO_ZEROCOPY:
if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) {
- if (!((sk->sk_type == SOCK_STREAM &&
- sk->sk_protocol == IPPROTO_TCP) ||
+ if (!(sk_is_tcp(sk) ||
(sk->sk_type == SOCK_DGRAM &&
sk->sk_protocol == IPPROTO_UDP)))
ret = -ENOTSUPP;
@@ -1982,7 +1983,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
sock_lock_init(sk);
sk->sk_net_refcnt = kern ? 0 : 1;
if (likely(sk->sk_net_refcnt)) {
- get_net(net);
+ get_net_track(net, &sk->ns_tracker, priority);
sock_inuse_add(net, 1);
}
@@ -2038,7 +2039,7 @@ static void __sk_destruct(struct rcu_head *head)
put_pid(sk->sk_peer_pid);
if (likely(sk->sk_net_refcnt))
- put_net(sock_net(sk));
+ put_net_track(sock_net(sk), &sk->ns_tracker);
sk_prot_free(sk->sk_prot_creator, sk);
}
@@ -2125,7 +2126,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
/* SANITY */
if (likely(newsk->sk_net_refcnt)) {
- get_net(sock_net(newsk));
+ get_net_track(sock_net(newsk), &newsk->ns_tracker, priority);
sock_inuse_add(sock_net(newsk), 1);
}
sk_node_init(&newsk->sk_node);
@@ -2246,17 +2247,22 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
u32 max_segs = 1;
sk_dst_set(sk, dst);
- sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
+ sk->sk_route_caps = dst->dev->features;
+ if (sk_is_tcp(sk))
+ sk->sk_route_caps |= NETIF_F_GSO;
if (sk->sk_route_caps & NETIF_F_GSO)
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
- sk->sk_route_caps &= ~sk->sk_route_nocaps;
+ if (unlikely(sk->sk_gso_disabled))
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
if (sk_can_gso(sk)) {
if (dst->header_len && !xfrm_dst_offload_ok(dst)) {
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
} else {
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
- sk->sk_gso_max_size = dst->dev->gso_max_size;
- max_segs = max_t(u32, dst->dev->gso_max_segs, 1);
+ /* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */
+ sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size);
+ /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
+ max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
}
}
sk->sk_gso_max_segs = max_segs;
@@ -3286,7 +3292,7 @@ void lock_sock_nested(struct sock *sk, int subclass)
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
- if (sk->sk_lock.owned)
+ if (sock_owned_by_user_nocheck(sk))
__lock_sock(sk);
sk->sk_lock.owned = 1;
spin_unlock_bh(&sk->sk_lock.slock);
@@ -3317,7 +3323,7 @@ bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
- if (!sk->sk_lock.owned) {
+ if (!sock_owned_by_user_nocheck(sk)) {
/*
* Fast path return with bottom halves disabled and
* sock::sk_lock.slock held.
@@ -3532,19 +3538,8 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
}
#ifdef CONFIG_PROC_FS
-#define PROTO_INUSE_NR 64 /* should be enough for the first time */
-struct prot_inuse {
- int val[PROTO_INUSE_NR];
-};
-
static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
-void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
-{
- __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val);
-}
-EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
-
int sock_prot_inuse_get(struct net *net, struct proto *prot)
{
int cpu, idx = prot->inuse_idx;
@@ -3557,17 +3552,12 @@ int sock_prot_inuse_get(struct net *net, struct proto *prot)
}
EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
-static void sock_inuse_add(struct net *net, int val)
-{
- this_cpu_add(*net->core.sock_inuse, val);
-}
-
int sock_inuse_get(struct net *net)
{
int cpu, res = 0;
for_each_possible_cpu(cpu)
- res += *per_cpu_ptr(net->core.sock_inuse, cpu);
+ res += per_cpu_ptr(net->core.prot_inuse, cpu)->all;
return res;
}
@@ -3579,22 +3569,12 @@ static int __net_init sock_inuse_init_net(struct net *net)
net->core.prot_inuse = alloc_percpu(struct prot_inuse);
if (net->core.prot_inuse == NULL)
return -ENOMEM;
-
- net->core.sock_inuse = alloc_percpu(int);
- if (net->core.sock_inuse == NULL)
- goto out;
-
return 0;
-
-out:
- free_percpu(net->core.prot_inuse);
- return -ENOMEM;
}
static void __net_exit sock_inuse_exit_net(struct net *net)
{
free_percpu(net->core.prot_inuse);
- free_percpu(net->core.sock_inuse);
}
static struct pernet_operations net_inuse_ops = {
@@ -3640,9 +3620,6 @@ static inline void release_proto_idx(struct proto *prot)
{
}
-static void sock_inuse_add(struct net *net, int val)
-{
-}
#endif
static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index c9c45b935f99..f7cf74cdd3db 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -1,5 +1,6 @@
/* License: GPL */
+#include <linux/filter.h>
#include <linux/mutex.h>
#include <linux/socket.h>
#include <linux/skbuff.h>
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 4ca4b11f4e5f..1827669eedd6 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -292,15 +292,23 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
if (skb_verdict)
psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
+ /* msg_* and stream_* programs references tracked in psock after this
+ * point. Reference dec and cleanup will occur through psock destructor
+ */
ret = sock_map_init_proto(sk, psock);
- if (ret < 0)
- goto out_drop;
+ if (ret < 0) {
+ sk_psock_put(sk, psock);
+ goto out;
+ }
write_lock_bh(&sk->sk_callback_lock);
if (stream_parser && stream_verdict && !psock->saved_data_ready) {
ret = sk_psock_init_strp(sk, psock);
- if (ret)
- goto out_unlock_drop;
+ if (ret) {
+ write_unlock_bh(&sk->sk_callback_lock);
+ sk_psock_put(sk, psock);
+ goto out;
+ }
sk_psock_start_strp(sk, psock);
} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
sk_psock_start_verdict(sk,psock);
@@ -309,10 +317,6 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
}
write_unlock_bh(&sk->sk_callback_lock);
return 0;
-out_unlock_drop:
- write_unlock_bh(&sk->sk_callback_lock);
-out_drop:
- sk_psock_put(sk, psock);
out_progs:
if (skb_verdict)
bpf_prog_put(skb_verdict);
@@ -325,6 +329,7 @@ out_put_stream_parser:
out_put_stream_verdict:
if (stream_verdict)
bpf_prog_put(stream_verdict);
+out:
return ret;
}
@@ -1564,7 +1569,7 @@ static struct bpf_iter_reg sock_map_iter_reg = {
.ctx_arg_info_size = 2,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__sockmap, key),
- PTR_TO_RDONLY_BUF_OR_NULL },
+ PTR_TO_BUF | PTR_MAYBE_NULL | MEM_RDONLY },
{ offsetof(struct bpf_iter__sockmap, sk),
PTR_TO_BTF_ID_OR_NULL },
},
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 5f88526ad61c..7b4d485aac7a 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -6,6 +6,7 @@
* Added /proc/sys/net/core directory entry (empty =) ). [MS]
*/
+#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/sysctl.h>
#include <linux/module.h>
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 5ddc29f29bad..7aba35504986 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -110,20 +110,15 @@ static void mem_allocator_disconnect(void *allocator)
mutex_unlock(&mem_id_lock);
}
-void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
+void xdp_unreg_mem_model(struct xdp_mem_info *mem)
{
struct xdp_mem_allocator *xa;
- int type = xdp_rxq->mem.type;
- int id = xdp_rxq->mem.id;
+ int type = mem->type;
+ int id = mem->id;
/* Reset mem info to defaults */
- xdp_rxq->mem.id = 0;
- xdp_rxq->mem.type = 0;
-
- if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
- WARN(1, "Missing register, driver bug");
- return;
- }
+ mem->id = 0;
+ mem->type = 0;
if (id == 0)
return;
@@ -135,6 +130,17 @@ void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
rcu_read_unlock();
}
}
+EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
+
+void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
+{
+ if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
+ WARN(1, "Missing register, driver bug");
+ return;
+ }
+
+ xdp_unreg_mem_model(&xdp_rxq->mem);
+}
EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
@@ -159,6 +165,11 @@ static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
struct net_device *dev, u32 queue_index, unsigned int napi_id)
{
+ if (!dev) {
+ WARN(1, "Missing net_device from driver");
+ return -ENODEV;
+ }
+
if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
WARN(1, "Driver promised not to register this");
return -EINVAL;
@@ -169,11 +180,6 @@ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
xdp_rxq_info_unreg(xdp_rxq);
}
- if (!dev) {
- WARN(1, "Missing net_device from driver");
- return -ENODEV;
- }
-
/* State either UNREGISTERED or NEW */
xdp_rxq_info_init(xdp_rxq);
xdp_rxq->dev = dev;
@@ -259,28 +265,24 @@ static bool __is_supported_mem_type(enum xdp_mem_type type)
return true;
}
-int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
- enum xdp_mem_type type, void *allocator)
+static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem,
+ enum xdp_mem_type type,
+ void *allocator)
{
struct xdp_mem_allocator *xdp_alloc;
gfp_t gfp = GFP_KERNEL;
int id, errno, ret;
void *ptr;
- if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
- WARN(1, "Missing register, driver bug");
- return -EFAULT;
- }
-
if (!__is_supported_mem_type(type))
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
- xdp_rxq->mem.type = type;
+ mem->type = type;
if (!allocator) {
if (type == MEM_TYPE_PAGE_POOL)
- return -EINVAL; /* Setup time check page_pool req */
- return 0;
+ return ERR_PTR(-EINVAL); /* Setup time check page_pool req */
+ return NULL;
}
/* Delay init of rhashtable to save memory if feature isn't used */
@@ -290,13 +292,13 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
mutex_unlock(&mem_id_lock);
if (ret < 0) {
WARN_ON(1);
- return ret;
+ return ERR_PTR(ret);
}
}
xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
if (!xdp_alloc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
mutex_lock(&mem_id_lock);
id = __mem_id_cyclic_get(gfp);
@@ -304,31 +306,61 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
errno = id;
goto err;
}
- xdp_rxq->mem.id = id;
- xdp_alloc->mem = xdp_rxq->mem;
+ mem->id = id;
+ xdp_alloc->mem = *mem;
xdp_alloc->allocator = allocator;
/* Insert allocator into ID lookup table */
ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
if (IS_ERR(ptr)) {
- ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
- xdp_rxq->mem.id = 0;
+ ida_simple_remove(&mem_id_pool, mem->id);
+ mem->id = 0;
errno = PTR_ERR(ptr);
goto err;
}
if (type == MEM_TYPE_PAGE_POOL)
- page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
+ page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem);
mutex_unlock(&mem_id_lock);
- trace_mem_connect(xdp_alloc, xdp_rxq);
- return 0;
+ return xdp_alloc;
err:
mutex_unlock(&mem_id_lock);
kfree(xdp_alloc);
- return errno;
+ return ERR_PTR(errno);
}
+
+int xdp_reg_mem_model(struct xdp_mem_info *mem,
+ enum xdp_mem_type type, void *allocator)
+{
+ struct xdp_mem_allocator *xdp_alloc;
+
+ xdp_alloc = __xdp_reg_mem_model(mem, type, allocator);
+ if (IS_ERR(xdp_alloc))
+ return PTR_ERR(xdp_alloc);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(xdp_reg_mem_model);
+
+int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
+ enum xdp_mem_type type, void *allocator)
+{
+ struct xdp_mem_allocator *xdp_alloc;
+
+ if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
+ WARN(1, "Missing register, driver bug");
+ return -EFAULT;
+ }
+
+ xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator);
+ if (IS_ERR(xdp_alloc))
+ return PTR_ERR(xdp_alloc);
+
+ trace_mem_connect(xdp_alloc, xdp_rxq);
+ return 0;
+}
+
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
/* XDP RX runs under NAPI protection, and in different delivery error
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index fc44dadc778b..a976b4d29892 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -238,17 +238,6 @@ void dccp_destroy_sock(struct sock *sk)
EXPORT_SYMBOL_GPL(dccp_destroy_sock);
-static inline int dccp_listen_start(struct sock *sk, int backlog)
-{
- struct dccp_sock *dp = dccp_sk(sk);
-
- dp->dccps_role = DCCP_ROLE_LISTEN;
- /* do not start to listen if feature negotiation setup fails */
- if (dccp_feat_finalise_settings(dp))
- return -EPROTO;
- return inet_csk_listen_start(sk, backlog);
-}
-
static inline int dccp_need_reset(int state)
{
return state != DCCP_CLOSED && state != DCCP_LISTEN &&
@@ -931,11 +920,17 @@ int inet_dccp_listen(struct socket *sock, int backlog)
* we can only allow the backlog to be adjusted.
*/
if (old_state != DCCP_LISTEN) {
- /*
- * FIXME: here it probably should be sk->sk_prot->listen_start
- * see tcp_listen_start
- */
- err = dccp_listen_start(sk, backlog);
+ struct dccp_sock *dp = dccp_sk(sk);
+
+ dp->dccps_role = DCCP_ROLE_LISTEN;
+
+ /* do not start to listen if feature negotiation setup fails */
+ if (dccp_feat_finalise_settings(dp)) {
+ err = -EPROTO;
+ goto out;
+ }
+
+ err = inet_csk_listen_start(sk);
if (err)
goto out;
}
diff --git a/net/dccp/trace.h b/net/dccp/trace.h
index 5062421beee9..5a43b3508c7f 100644
--- a/net/dccp/trace.h
+++ b/net/dccp/trace.h
@@ -60,9 +60,7 @@ TRACE_EVENT(dccp_probe,
__entry->tx_t_ipi = hc->tx_t_ipi;
} else {
__entry->tx_s = 0;
- memset(&__entry->tx_rtt, 0, (void *)&__entry->tx_t_ipi -
- (void *)&__entry->tx_rtt +
- sizeof(__entry->tx_t_ipi));
+ memset_startat(__entry, 0, tx_rtt);
}
),
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index 7ab788f41a3f..c59be5b04479 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -38,6 +38,7 @@
*******************************************************************************/
#include <linux/errno.h>
+#include <linux/filter.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 4a4e3c17740c..ee73057529cf 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -101,10 +101,6 @@ errout:
return err;
}
-static const struct nla_policy dn_fib_rule_policy[FRA_MAX+1] = {
- FRA_GENERIC_POLICY,
-};
-
static int dn_fib_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
@@ -235,7 +231,6 @@ static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = {
.fill = dn_fib_rule_fill,
.flush_cache = dn_fib_rule_flush_cache,
.nlgroup = RTNLGRP_DECnet_RULE,
- .policy = dn_fib_rule_policy,
.owner = THIS_MODULE,
.fro_net = &init_net,
};
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index ea5169e671ae..d9d0d227092c 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -406,7 +406,7 @@ EXPORT_SYMBOL_GPL(dsa_devlink_resource_register);
void dsa_devlink_resources_unregister(struct dsa_switch *ds)
{
- devlink_resources_unregister(ds->devlink, NULL);
+ devlink_resources_unregister(ds->devlink);
}
EXPORT_SYMBOL_GPL(dsa_devlink_resources_unregister);
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 826957b6442b..3d21521453fe 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -129,35 +129,52 @@ void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
}
}
+struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
+ const struct net_device *br)
+{
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dsa_port_bridge_dev_get(dp) == br)
+ return dp->bridge;
+
+ return NULL;
+}
+
static int dsa_bridge_num_find(const struct net_device *bridge_dev)
{
struct dsa_switch_tree *dst;
- struct dsa_port *dp;
- /* When preparing the offload for a port, it will have a valid
- * dp->bridge_dev pointer but a not yet valid dp->bridge_num.
- * However there might be other ports having the same dp->bridge_dev
- * and a valid dp->bridge_num, so just ignore this port.
- */
- list_for_each_entry(dst, &dsa_tree_list, list)
- list_for_each_entry(dp, &dst->ports, list)
- if (dp->bridge_dev == bridge_dev &&
- dp->bridge_num != -1)
- return dp->bridge_num;
+ list_for_each_entry(dst, &dsa_tree_list, list) {
+ struct dsa_bridge *bridge;
+
+ bridge = dsa_tree_bridge_find(dst, bridge_dev);
+ if (bridge)
+ return bridge->num;
+ }
- return -1;
+ return 0;
}
-int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
+unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
{
- int bridge_num = dsa_bridge_num_find(bridge_dev);
+ unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
- if (bridge_num < 0) {
- /* First port that offloads TX forwarding for this bridge */
- bridge_num = find_first_zero_bit(&dsa_fwd_offloading_bridges,
- DSA_MAX_NUM_OFFLOADING_BRIDGES);
+ /* Switches without FDB isolation support don't get unique
+ * bridge numbering
+ */
+ if (!max)
+ return 0;
+
+ if (!bridge_num) {
+ /* First port that requests FDB isolation or TX forwarding
+ * offload for this bridge
+ */
+ bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
+ DSA_MAX_NUM_OFFLOADING_BRIDGES,
+ 1);
if (bridge_num >= max)
- return -1;
+ return 0;
set_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
@@ -165,13 +182,14 @@ int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
return bridge_num;
}
-void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num)
+void dsa_bridge_num_put(const struct net_device *bridge_dev,
+ unsigned int bridge_num)
{
- /* Check if the bridge is still in use, otherwise it is time
- * to clean it up so we can reuse this bridge_num later.
+ /* Since we refcount bridges, we know that when we call this function
+ * it is no longer in use, so we can just go ahead and remove it from
+ * the bit mask.
*/
- if (dsa_bridge_num_find(bridge_dev) < 0)
- clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
+ clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
}
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
@@ -543,6 +561,7 @@ static void dsa_port_teardown(struct dsa_port *dp)
struct devlink_port *dlp = &dp->devlink_port;
struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a, *tmp;
+ struct net_device *slave;
if (!dp->setup)
return;
@@ -564,9 +583,11 @@ static void dsa_port_teardown(struct dsa_port *dp)
dsa_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_USER:
- if (dp->slave) {
- dsa_slave_destroy(dp->slave);
+ slave = dp->slave;
+
+ if (slave) {
dp->slave = NULL;
+ dsa_slave_destroy(slave);
}
break;
}
@@ -804,7 +825,7 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
int err;
if (tag_ops->proto == dst->default_proto)
- return 0;
+ goto connect;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
rtnl_lock();
@@ -818,7 +839,30 @@ static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
}
}
+connect:
+ if (tag_ops->connect) {
+ err = tag_ops->connect(ds);
+ if (err)
+ return err;
+ }
+
+ if (ds->ops->connect_tag_protocol) {
+ err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
+ if (err) {
+ dev_err(ds->dev,
+ "Unable to connect to tag protocol \"%s\": %pe\n",
+ tag_ops->name, ERR_PTR(err));
+ goto disconnect;
+ }
+ }
+
return 0;
+
+disconnect:
+ if (tag_ops->disconnect)
+ tag_ops->disconnect(ds);
+
+ return err;
}
static int dsa_switch_setup(struct dsa_switch *ds)
@@ -962,23 +1006,28 @@ static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
dsa_switch_teardown(dp->ds);
}
-static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+/* Bring shared ports up first, then non-shared ports */
+static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
- int err;
+ int err = 0;
list_for_each_entry(dp, &dst->ports, list) {
- err = dsa_switch_setup(dp->ds);
- if (err)
- goto teardown;
+ if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
+ err = dsa_port_setup(dp);
+ if (err)
+ goto teardown;
+ }
}
list_for_each_entry(dp, &dst->ports, list) {
- err = dsa_port_setup(dp);
- if (err) {
- err = dsa_port_reinit_as_unused(dp);
- if (err)
- goto teardown;
+ if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
+ err = dsa_port_setup(dp);
+ if (err) {
+ err = dsa_port_reinit_as_unused(dp);
+ if (err)
+ goto teardown;
+ }
}
}
@@ -987,7 +1036,21 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
teardown:
dsa_tree_teardown_ports(dst);
- dsa_tree_teardown_switches(dst);
+ return err;
+}
+
+static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
+{
+ struct dsa_port *dp;
+ int err = 0;
+
+ list_for_each_entry(dp, &dst->ports, list) {
+ err = dsa_switch_setup(dp->ds);
+ if (err) {
+ dsa_tree_teardown_switches(dst);
+ break;
+ }
+ }
return err;
}
@@ -997,6 +1060,8 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
struct dsa_port *dp;
int err;
+ rtnl_lock();
+
list_for_each_entry(dp, &dst->ports, list) {
if (dsa_port_is_cpu(dp)) {
err = dsa_master_setup(dp->master, dp);
@@ -1005,6 +1070,8 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
}
}
+ rtnl_unlock();
+
return 0;
}
@@ -1012,9 +1079,13 @@ static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
{
struct dsa_port *dp;
+ rtnl_lock();
+
list_for_each_entry(dp, &dst->ports, list)
if (dsa_port_is_cpu(dp))
dsa_master_teardown(dp->master);
+
+ rtnl_unlock();
}
static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
@@ -1070,20 +1141,25 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
if (err)
goto teardown_switches;
- err = dsa_tree_setup_lags(dst);
+ err = dsa_tree_setup_ports(dst);
if (err)
goto teardown_master;
+ err = dsa_tree_setup_lags(dst);
+ if (err)
+ goto teardown_ports;
+
dst->setup = true;
pr_info("DSA: tree %d setup\n", dst->index);
return 0;
+teardown_ports:
+ dsa_tree_teardown_ports(dst);
teardown_master:
dsa_tree_teardown_master(dst);
teardown_switches:
- dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
@@ -1100,10 +1176,10 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dsa_tree_teardown_lags(dst);
- dsa_tree_teardown_master(dst);
-
dsa_tree_teardown_ports(dst);
+ dsa_tree_teardown_master(dst);
+
dsa_tree_teardown_switches(dst);
dsa_tree_teardown_cpu_ports(dst);
@@ -1118,6 +1194,37 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
dst->setup = false;
}
+static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
+ const struct dsa_device_ops *tag_ops)
+{
+ const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
+ struct dsa_notifier_tag_proto_info info;
+ int err;
+
+ dst->tag_ops = tag_ops;
+
+ /* Notify the switches from this tree about the connection
+ * to the new tagger
+ */
+ info.tag_ops = tag_ops;
+ err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
+ if (err && err != -EOPNOTSUPP)
+ goto out_disconnect;
+
+ /* Notify the old tagger about the disconnection from this tree */
+ info.tag_ops = old_tag_ops;
+ dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
+
+ return 0;
+
+out_disconnect:
+ info.tag_ops = tag_ops;
+ dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
+ dst->tag_ops = old_tag_ops;
+
+ return err;
+}
+
/* Since the dsa/tagging sysfs device attribute is per master, the assumption
* is that all DSA switches within a tree share the same tagger, otherwise
* they would have formed disjoint trees (different "dsa,member" values).
@@ -1150,12 +1257,15 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
goto out_unlock;
}
+ /* Notify the tag protocol change */
info.tag_ops = tag_ops;
err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
if (err)
- goto out_unwind_tagger;
+ return err;
- dst->tag_ops = tag_ops;
+ err = dsa_tree_bind_tag_proto(dst, tag_ops);
+ if (err)
+ goto out_unwind_tagger;
rtnl_unlock();
@@ -1184,7 +1294,6 @@ static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
dp->ds = ds;
dp->index = index;
- dp->bridge_num = -1;
INIT_LIST_HEAD(&dp->list);
list_add_tail(&dp->list, &dst->ports);
@@ -1366,7 +1475,7 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
}
if (reg >= ds->num_ports) {
- dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
+ dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
port, reg, ds->num_ports);
of_node_put(port);
err = -EINVAL;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index a5c9bc7b66c6..760306f0012f 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -8,6 +8,7 @@
#define __DSA_PRIV_H
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
@@ -24,8 +25,6 @@ enum {
DSA_NOTIFIER_FDB_DEL,
DSA_NOTIFIER_HOST_FDB_ADD,
DSA_NOTIFIER_HOST_FDB_DEL,
- DSA_NOTIFIER_HSR_JOIN,
- DSA_NOTIFIER_HSR_LEAVE,
DSA_NOTIFIER_LAG_CHANGE,
DSA_NOTIFIER_LAG_JOIN,
DSA_NOTIFIER_LAG_LEAVE,
@@ -37,10 +36,8 @@ enum {
DSA_NOTIFIER_VLAN_DEL,
DSA_NOTIFIER_MTU,
DSA_NOTIFIER_TAG_PROTO,
- DSA_NOTIFIER_MRP_ADD,
- DSA_NOTIFIER_MRP_DEL,
- DSA_NOTIFIER_MRP_ADD_RING_ROLE,
- DSA_NOTIFIER_MRP_DEL_RING_ROLE,
+ DSA_NOTIFIER_TAG_PROTO_CONNECT,
+ DSA_NOTIFIER_TAG_PROTO_DISCONNECT,
DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
};
@@ -52,10 +49,11 @@ struct dsa_notifier_ageing_time_info {
/* DSA_NOTIFIER_BRIDGE_* */
struct dsa_notifier_bridge_info {
- struct net_device *br;
+ struct dsa_bridge bridge;
int tree_index;
int sw_index;
int port;
+ bool tx_fwd_offload;
};
/* DSA_NOTIFIER_FDB_* */
@@ -103,20 +101,6 @@ struct dsa_notifier_tag_proto_info {
const struct dsa_device_ops *tag_ops;
};
-/* DSA_NOTIFIER_MRP_* */
-struct dsa_notifier_mrp_info {
- const struct switchdev_obj_mrp *mrp;
- int sw_index;
- int port;
-};
-
-/* DSA_NOTIFIER_MRP_* */
-struct dsa_notifier_mrp_ring_role_info {
- const struct switchdev_obj_ring_role_mrp *mrp;
- int sw_index;
- int port;
-};
-
/* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
struct dsa_notifier_tag_8021q_vlan_info {
int tree_index;
@@ -139,13 +123,6 @@ struct dsa_switchdev_event_work {
bool host_addr;
};
-/* DSA_NOTIFIER_HSR_* */
-struct dsa_notifier_hsr_info {
- struct net_device *hsr;
- int sw_index;
- int port;
-};
-
struct dsa_slave_priv {
/* Copy of CPU port xmit for faster access in slave transmit hot path */
struct sk_buff * (*xmit)(struct sk_buff *skb,
@@ -258,54 +235,13 @@ int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp);
int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp);
+int dsa_port_phylink_create(struct dsa_port *dp);
int dsa_port_link_register_of(struct dsa_port *dp);
void dsa_port_link_unregister_of(struct dsa_port *dp);
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
-extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
-
-static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
- const struct net_device *dev)
-{
- return dsa_port_to_bridge_port(dp) == dev;
-}
-
-static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
- const struct net_device *bridge_dev)
-{
- /* DSA ports connected to a bridge, and event was emitted
- * for the bridge.
- */
- return dp->bridge_dev == bridge_dev;
-}
-
-/* Returns true if any port of this tree offloads the given net_device */
-static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
- const struct net_device *dev)
-{
- struct dsa_port *dp;
-
- list_for_each_entry(dp, &dst->ports, list)
- if (dsa_port_offloads_bridge_port(dp, dev))
- return true;
-
- return false;
-}
-
-/* Returns true if any port of this tree offloads the given bridge */
-static inline bool dsa_tree_offloads_bridge(struct dsa_switch_tree *dst,
- const struct net_device *bridge_dev)
-{
- struct dsa_port *dp;
-
- list_for_each_entry(dp, &dst->ports, list)
- if (dsa_port_offloads_bridge(dp, bridge_dev))
- return true;
-
- return false;
-}
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
@@ -345,7 +281,7 @@ dsa_slave_to_master(const struct net_device *dev)
static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- struct net_device *br = dp->bridge_dev;
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct net_device *dev = skb->dev;
struct net_device *upper_dev;
u16 vid, pvid, proto;
@@ -415,7 +351,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
if (dp->type != DSA_PORT_TYPE_USER)
continue;
- if (!dp->bridge_dev)
+ if (!dp->bridge)
continue;
if (dp->stp_state != BR_STATE_LEARNING &&
@@ -444,7 +380,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
/* If the ingress port offloads the bridge, we mark the frame as autonomously
* forwarded by hardware, so the software bridge doesn't forward in twice, back
* to us, because we already did. However, if we're in fallback mode and we do
- * software bridging, we are not offloading it, therefore the dp->bridge_dev
+ * software bridging, we are not offloading it, therefore the dp->bridge
* pointer is not populated, and flooding needs to be done by software (we are
* effectively operating in standalone ports mode).
*/
@@ -452,7 +388,7 @@ static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
{
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- skb->offload_fwd_mark = !!(dp->bridge_dev);
+ skb->offload_fwd_mark = !!(dp->bridge);
}
/* Helper for removing DSA header tags from packets in the RX path.
@@ -546,8 +482,11 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
struct net_device *master,
const struct dsa_device_ops *tag_ops,
const struct dsa_device_ops *old_tag_ops);
-int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
-void dsa_bridge_num_put(const struct net_device *bridge_dev, int bridge_num);
+unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
+void dsa_bridge_num_put(const struct net_device *bridge_dev,
+ unsigned int bridge_num);
+struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
+ const struct net_device *br);
/* tag_8021q.c */
int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
diff --git a/net/dsa/master.c b/net/dsa/master.c
index e8e19857621b..2199104ca7df 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -267,9 +267,9 @@ static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
if (!ops->promisc_on_master)
return;
- rtnl_lock();
+ ASSERT_RTNL();
+
dev_set_promiscuity(dev, inc);
- rtnl_unlock();
}
static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
@@ -330,28 +330,13 @@ static const struct attribute_group dsa_group = {
.attrs = dsa_slave_attrs,
};
-static void dsa_master_reset_mtu(struct net_device *dev)
-{
- int err;
-
- rtnl_lock();
- err = dev_set_mtu(dev, ETH_DATA_LEN);
- if (err)
- netdev_dbg(dev,
- "Unable to reset MTU to exclude DSA overheads\n");
- rtnl_unlock();
-}
-
static struct lock_class_key dsa_master_addr_list_lock_key;
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
- const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
struct dsa_switch *ds = cpu_dp->ds;
struct device_link *consumer_link;
- int mtu, ret;
-
- mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
+ int ret;
/* The DSA master must use SET_NETDEV_DEV for this to work. */
consumer_link = device_link_add(ds->dev, dev->dev.parent,
@@ -361,13 +346,6 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
"Failed to create a device link to DSA switch %s\n",
dev_name(ds->dev));
- rtnl_lock();
- ret = dev_set_mtu(dev, mtu);
- rtnl_unlock();
- if (ret)
- netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
- ret, mtu);
-
/* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get
* sent to the tag format's receive function.
@@ -405,7 +383,6 @@ void dsa_master_teardown(struct net_device *dev)
sysfs_remove_group(&dev->dev.kobj, &dsa_group);
dsa_netdev_ops_set(dev, NULL);
dsa_master_ethtool_teardown(dev);
- dsa_master_reset_mtu(dev);
dsa_master_set_promiscuity(dev, -1);
dev->dsa_ptr = NULL;
diff --git a/net/dsa/port.c b/net/dsa/port.c
index f6f12ad2b525..bd78192e0e47 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -130,7 +130,7 @@ int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
return err;
}
- if (!dp->bridge_dev)
+ if (!dp->bridge)
dsa_port_set_state_now(dp, BR_STATE_FORWARDING, false);
if (dp->pl)
@@ -158,7 +158,7 @@ void dsa_port_disable_rt(struct dsa_port *dp)
if (dp->pl)
phylink_stop(dp->pl);
- if (!dp->bridge_dev)
+ if (!dp->bridge)
dsa_port_set_state_now(dp, BR_STATE_DISABLED, false);
if (ds->ops->port_disable)
@@ -221,7 +221,7 @@ static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp,
struct netlink_ext_ack *extack)
{
struct net_device *brport_dev = dsa_port_to_bridge_port(dp);
- struct net_device *br = dp->bridge_dev;
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
int err;
err = dsa_port_inherit_brport_flags(dp, extack);
@@ -270,52 +270,55 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
*/
}
-static void dsa_port_bridge_tx_fwd_unoffload(struct dsa_port *dp,
- struct net_device *bridge_dev)
+static int dsa_port_bridge_create(struct dsa_port *dp,
+ struct net_device *br,
+ struct netlink_ext_ack *extack)
{
- int bridge_num = dp->bridge_num;
struct dsa_switch *ds = dp->ds;
+ struct dsa_bridge *bridge;
- /* No bridge TX forwarding offload => do nothing */
- if (!ds->ops->port_bridge_tx_fwd_unoffload || dp->bridge_num == -1)
- return;
+ bridge = dsa_tree_bridge_find(ds->dst, br);
+ if (bridge) {
+ refcount_inc(&bridge->refcount);
+ dp->bridge = bridge;
+ return 0;
+ }
- dp->bridge_num = -1;
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
- dsa_bridge_num_put(bridge_dev, bridge_num);
+ refcount_set(&bridge->refcount, 1);
- /* Notify the chips only once the offload has been deactivated, so
- * that they can update their configuration accordingly.
- */
- ds->ops->port_bridge_tx_fwd_unoffload(ds, dp->index, bridge_dev,
- bridge_num);
+ bridge->dev = br;
+
+ bridge->num = dsa_bridge_num_get(br, ds->max_num_bridges);
+ if (ds->max_num_bridges && !bridge->num) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range of offloadable bridges exceeded");
+ kfree(bridge);
+ return -EOPNOTSUPP;
+ }
+
+ dp->bridge = bridge;
+
+ return 0;
}
-static bool dsa_port_bridge_tx_fwd_offload(struct dsa_port *dp,
- struct net_device *bridge_dev)
+static void dsa_port_bridge_destroy(struct dsa_port *dp,
+ const struct net_device *br)
{
- struct dsa_switch *ds = dp->ds;
- int bridge_num, err;
+ struct dsa_bridge *bridge = dp->bridge;
- if (!ds->ops->port_bridge_tx_fwd_offload)
- return false;
+ dp->bridge = NULL;
- bridge_num = dsa_bridge_num_get(bridge_dev,
- ds->num_fwd_offloading_bridges);
- if (bridge_num < 0)
- return false;
-
- dp->bridge_num = bridge_num;
+ if (!refcount_dec_and_test(&bridge->refcount))
+ return;
- /* Notify the driver */
- err = ds->ops->port_bridge_tx_fwd_offload(ds, dp->index, bridge_dev,
- bridge_num);
- if (err) {
- dsa_port_bridge_tx_fwd_unoffload(dp, bridge_dev);
- return false;
- }
+ if (bridge->num)
+ dsa_bridge_num_put(br, bridge->num);
- return true;
+ kfree(bridge);
}
int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
@@ -325,30 +328,32 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
.tree_index = dp->ds->dst->index,
.sw_index = dp->ds->index,
.port = dp->index,
- .br = br,
};
struct net_device *dev = dp->slave;
struct net_device *brport_dev;
- bool tx_fwd_offload;
int err;
/* Here the interface is already bridged. Reflect the current
* configuration so that drivers can program their chips accordingly.
*/
- dp->bridge_dev = br;
+ err = dsa_port_bridge_create(dp, br, extack);
+ if (err)
+ return err;
brport_dev = dsa_port_to_bridge_port(dp);
+ info.bridge = *dp->bridge;
err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_JOIN, &info);
if (err)
goto out_rollback;
- tx_fwd_offload = dsa_port_bridge_tx_fwd_offload(dp, br);
+ /* Drivers which support bridge TX forwarding should set this */
+ dp->bridge->tx_fwd_offload = info.tx_fwd_offload;
err = switchdev_bridge_port_offload(brport_dev, dev, dp,
&dsa_slave_switchdev_notifier,
&dsa_slave_switchdev_blocking_notifier,
- tx_fwd_offload, extack);
+ dp->bridge->tx_fwd_offload, extack);
if (err)
goto out_rollback_unbridge;
@@ -365,7 +370,7 @@ out_rollback_unoffload:
out_rollback_unbridge:
dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
out_rollback:
- dp->bridge_dev = NULL;
+ dsa_port_bridge_destroy(dp, br);
return err;
}
@@ -390,16 +395,14 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
.tree_index = dp->ds->dst->index,
.sw_index = dp->ds->index,
.port = dp->index,
- .br = br,
+ .bridge = *dp->bridge,
};
int err;
/* Here the port is already unbridged. Reflect the current configuration
* so that drivers can program their chips accordingly.
*/
- dp->bridge_dev = NULL;
-
- dsa_port_bridge_tx_fwd_unoffload(dp, br);
+ dsa_port_bridge_destroy(dp, br);
err = dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info);
if (err)
@@ -477,12 +480,15 @@ err_lag_join:
void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag)
{
- if (dp->bridge_dev)
- dsa_port_pre_bridge_leave(dp, dp->bridge_dev);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
+
+ if (br)
+ dsa_port_pre_bridge_leave(dp, br);
}
void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
{
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_notifier_lag_info info = {
.sw_index = dp->ds->index,
.port = dp->index,
@@ -496,8 +502,8 @@ void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag)
/* Port might have been part of a LAG that in turn was
* attached to a bridge.
*/
- if (dp->bridge_dev)
- dsa_port_bridge_leave(dp, dp->bridge_dev);
+ if (br)
+ dsa_port_bridge_leave(dp, br);
dp->lag_tx_enabled = false;
dp->lag_dev = NULL;
@@ -526,8 +532,8 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
* as long as we have 8021q uppers.
*/
if (vlan_filtering && dsa_port_is_user(dp)) {
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct net_device *upper_dev, *slave = dp->slave;
- struct net_device *br = dp->bridge_dev;
struct list_head *iter;
netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
@@ -561,17 +567,15 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
* different setting than what is being requested.
*/
dsa_switch_for_each_port(other_dp, ds) {
- struct net_device *other_bridge;
+ struct net_device *other_br = dsa_port_bridge_dev_get(other_dp);
- other_bridge = other_dp->bridge_dev;
- if (!other_bridge)
- continue;
/* If it's the same bridge, it also has same
* vlan_filtering setting => no need to check
*/
- if (other_bridge == dp->bridge_dev)
+ if (!other_br || other_br == dsa_port_bridge_dev_get(dp))
continue;
- if (br_vlan_enabled(other_bridge) != vlan_filtering) {
+
+ if (br_vlan_enabled(other_br) != vlan_filtering) {
NL_SET_ERR_MSG_MOD(extack,
"VLAN filtering is a global setting");
return false;
@@ -655,13 +659,13 @@ restore:
*/
bool dsa_port_skip_vlan_configuration(struct dsa_port *dp)
{
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct dsa_switch *ds = dp->ds;
- if (!dp->bridge_dev)
+ if (!br)
return false;
- return (!ds->configure_vlan_while_not_filtering &&
- !br_vlan_enabled(dp->bridge_dev));
+ return !ds->configure_vlan_while_not_filtering && !br_vlan_enabled(br);
}
int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock)
@@ -903,49 +907,45 @@ int dsa_port_vlan_del(struct dsa_port *dp,
int dsa_port_mrp_add(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp)
{
- struct dsa_notifier_mrp_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .mrp = mrp,
- };
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_mrp_add)
+ return -EOPNOTSUPP;
- return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD, &info);
+ return ds->ops->port_mrp_add(ds, dp->index, mrp);
}
int dsa_port_mrp_del(const struct dsa_port *dp,
const struct switchdev_obj_mrp *mrp)
{
- struct dsa_notifier_mrp_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .mrp = mrp,
- };
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_mrp_del)
+ return -EOPNOTSUPP;
- return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL, &info);
+ return ds->ops->port_mrp_del(ds, dp->index, mrp);
}
int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp)
{
- struct dsa_notifier_mrp_ring_role_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .mrp = mrp,
- };
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_mrp_add_ring_role)
+ return -EOPNOTSUPP;
- return dsa_port_notify(dp, DSA_NOTIFIER_MRP_ADD_RING_ROLE, &info);
+ return ds->ops->port_mrp_add_ring_role(ds, dp->index, mrp);
}
int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
const struct switchdev_obj_ring_role_mrp *mrp)
{
- struct dsa_notifier_mrp_ring_role_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .mrp = mrp,
- };
+ struct dsa_switch *ds = dp->ds;
+
+ if (!ds->ops->port_mrp_del_ring_role)
+ return -EOPNOTSUPP;
- return dsa_port_notify(dp, DSA_NOTIFIER_MRP_DEL_RING_ROLE, &info);
+ return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp);
}
void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
@@ -981,8 +981,11 @@ static void dsa_port_phylink_validate(struct phylink_config *config,
struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
struct dsa_switch *ds = dp->ds;
- if (!ds->ops->phylink_validate)
+ if (!ds->ops->phylink_validate) {
+ if (config->mac_capabilities)
+ phylink_generic_validate(config, supported, state);
return;
+ }
ds->ops->phylink_validate(ds, dp->index, supported, state);
}
@@ -1072,7 +1075,7 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config,
speed, duplex, tx_pause, rx_pause);
}
-const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
+static const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
.validate = dsa_port_phylink_validate,
.mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state,
.mac_config = dsa_port_phylink_mac_config,
@@ -1081,6 +1084,36 @@ const struct phylink_mac_ops dsa_port_phylink_mac_ops = {
.mac_link_up = dsa_port_phylink_mac_link_up,
};
+int dsa_port_phylink_create(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ phy_interface_t mode;
+ int err;
+
+ err = of_get_phy_mode(dp->dn, &mode);
+ if (err)
+ mode = PHY_INTERFACE_MODE_NA;
+
+ /* Presence of phylink_mac_link_state or phylink_mac_an_restart is
+ * an indicator of a legacy phylink driver.
+ */
+ if (ds->ops->phylink_mac_link_state ||
+ ds->ops->phylink_mac_an_restart)
+ dp->pl_config.legacy_pre_march2020 = true;
+
+ if (ds->ops->phylink_get_caps)
+ ds->ops->phylink_get_caps(ds, dp->index, &dp->pl_config);
+
+ dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(dp->dn),
+ mode, &dsa_port_phylink_mac_ops);
+ if (IS_ERR(dp->pl)) {
+ pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
+ return PTR_ERR(dp->pl);
+ }
+
+ return 0;
+}
+
static int dsa_port_setup_phy_of(struct dsa_port *dp, bool enable)
{
struct dsa_switch *ds = dp->ds;
@@ -1157,27 +1190,15 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
{
struct dsa_switch *ds = dp->ds;
struct device_node *port_dn = dp->dn;
- phy_interface_t mode;
int err;
- err = of_get_phy_mode(port_dn, &mode);
- if (err)
- mode = PHY_INTERFACE_MODE_NA;
-
dp->pl_config.dev = ds->dev;
dp->pl_config.type = PHYLINK_DEV;
dp->pl_config.pcs_poll = ds->pcs_poll;
- if (ds->ops->phylink_get_interfaces)
- ds->ops->phylink_get_interfaces(ds, dp->index,
- dp->pl_config.supported_interfaces);
-
- dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn),
- mode, &dsa_port_phylink_mac_ops);
- if (IS_ERR(dp->pl)) {
- pr_err("error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
- return PTR_ERR(dp->pl);
- }
+ err = dsa_port_phylink_create(dp);
+ if (err)
+ return err;
err = phylink_of_phy_connect(dp->pl, port_dn, 0);
if (err && err != -ENODEV) {
@@ -1296,16 +1317,15 @@ EXPORT_SYMBOL_GPL(dsa_port_get_phy_sset_count);
int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
{
- struct dsa_notifier_hsr_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .hsr = hsr,
- };
+ struct dsa_switch *ds = dp->ds;
int err;
+ if (!ds->ops->port_hsr_join)
+ return -EOPNOTSUPP;
+
dp->hsr_dev = hsr;
- err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_JOIN, &info);
+ err = ds->ops->port_hsr_join(ds, dp->index, hsr);
if (err)
dp->hsr_dev = NULL;
@@ -1314,20 +1334,18 @@ int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr)
void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr)
{
- struct dsa_notifier_hsr_info info = {
- .sw_index = dp->ds->index,
- .port = dp->index,
- .hsr = hsr,
- };
+ struct dsa_switch *ds = dp->ds;
int err;
dp->hsr_dev = NULL;
- err = dsa_port_notify(dp, DSA_NOTIFIER_HSR_LEAVE, &info);
- if (err)
- dev_err(dp->ds->dev,
- "port %d failed to notify DSA_NOTIFIER_HSR_LEAVE: %pe\n",
- dp->index, ERR_PTR(err));
+ if (ds->ops->port_hsr_leave) {
+ err = ds->ops->port_hsr_leave(ds, dp->index, hsr);
+ if (err)
+ dev_err(dp->ds->dev,
+ "port %d failed to leave HSR %s: %pe\n",
+ dp->index, hsr->name, ERR_PTR(err));
+ }
}
int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast)
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index ad61f6bc8886..22241afcac81 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -289,14 +289,14 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
ret = dsa_port_set_state(dp, attr->u.stp_state, true);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering,
extack);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev))
return -EOPNOTSUPP;
ret = dsa_port_ageing_time(dp, attr->u.ageing_time);
@@ -363,7 +363,7 @@ static int dsa_slave_vlan_add(struct net_device *dev,
/* Deny adding a bridge VLAN when there is already an 802.1Q upper with
* the same VID.
*/
- if (br_vlan_enabled(dp->bridge_dev)) {
+ if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) {
rcu_read_lock();
err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
rcu_read_unlock();
@@ -409,7 +409,7 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_HOST_MDB:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
@@ -421,13 +421,13 @@ static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx,
err = dsa_slave_vlan_add(dev, obj, extack);
break;
case SWITCHDEV_OBJ_ID_MRP:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj));
break;
case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_add_ring_role(dp,
@@ -483,7 +483,7 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
case SWITCHDEV_OBJ_ID_HOST_MDB:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj));
@@ -495,13 +495,13 @@ static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx,
err = dsa_slave_vlan_del(dev, obj);
break;
case SWITCHDEV_OBJ_ID_MRP:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj));
break;
case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
- if (!dsa_port_offloads_bridge(dp, obj->orig_dev))
+ if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev))
return -EOPNOTSUPP;
err = dsa_port_mrp_del_ring_role(dp,
@@ -1564,7 +1564,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
if (!dp->ds->mtu_enforcement_ingress)
return;
- if (!dp->bridge_dev)
+ if (!dp->bridge)
return;
INIT_LIST_HEAD(&hw_port_list);
@@ -1580,7 +1580,7 @@ static void dsa_bridge_mtu_normalization(struct dsa_port *dp)
if (other_dp->type != DSA_PORT_TYPE_USER)
continue;
- if (other_dp->bridge_dev != dp->bridge_dev)
+ if (!dsa_port_bridge_same(dp, other_dp))
continue;
if (!other_dp->ds->mtu_enforcement_ingress)
@@ -1851,14 +1851,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
struct dsa_port *dp = dsa_slave_to_port(slave_dev);
struct device_node *port_dn = dp->dn;
struct dsa_switch *ds = dp->ds;
- phy_interface_t mode;
u32 phy_flags = 0;
int ret;
- ret = of_get_phy_mode(port_dn, &mode);
- if (ret)
- mode = PHY_INTERFACE_MODE_NA;
-
dp->pl_config.dev = &slave_dev->dev;
dp->pl_config.type = PHYLINK_NETDEV;
@@ -1871,17 +1866,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
dp->pl_config.poll_fixed_state = true;
}
- if (ds->ops->phylink_get_interfaces)
- ds->ops->phylink_get_interfaces(ds, dp->index,
- dp->pl_config.supported_interfaces);
-
- dp->pl = phylink_create(&dp->pl_config, of_fwnode_handle(port_dn), mode,
- &dsa_port_phylink_mac_ops);
- if (IS_ERR(dp->pl)) {
- netdev_err(slave_dev,
- "error creating PHYLINK: %ld\n", PTR_ERR(dp->pl));
- return PTR_ERR(dp->pl);
- }
+ ret = dsa_port_phylink_create(dp);
+ if (ret)
+ return ret;
if (ds->ops->get_phy_flags)
phy_flags = ds->ops->get_phy_flags(ds, dp->index);
@@ -2024,13 +2011,6 @@ int dsa_slave_create(struct dsa_port *port)
port->slave = slave_dev;
dsa_slave_setup_tagger(slave_dev);
- rtnl_lock();
- ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
- rtnl_unlock();
- if (ret && ret != -EOPNOTSUPP)
- dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
- ret, ETH_DATA_LEN, port->index);
-
netif_carrier_off(slave_dev);
ret = dsa_slave_phy_setup(slave_dev);
@@ -2043,6 +2023,11 @@ int dsa_slave_create(struct dsa_port *port)
rtnl_lock();
+ ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
+ if (ret && ret != -EOPNOTSUPP)
+ dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n",
+ ret, ETH_DATA_LEN, port->index);
+
ret = register_netdevice(slave_dev);
if (ret) {
netdev_err(master, "error %d registering interface %s\n",
@@ -2233,7 +2218,7 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *ext_ack;
- struct net_device *slave;
+ struct net_device *slave, *br;
struct dsa_port *dp;
ext_ack = netdev_notifier_info_to_extack(&info->info);
@@ -2246,11 +2231,12 @@ dsa_prevent_bridging_8021q_upper(struct net_device *dev,
return NOTIFY_DONE;
dp = dsa_slave_to_port(slave);
- if (!dp->bridge_dev)
+ br = dsa_port_bridge_dev_get(dp);
+ if (!br)
return NOTIFY_DONE;
/* Deny enslaving a VLAN device into a VLAN-aware bridge */
- if (br_vlan_enabled(dp->bridge_dev) &&
+ if (br_vlan_enabled(br) &&
netif_is_bridge_master(info->upper_dev) && info->linking) {
NL_SET_ERR_MSG_MOD(ext_ack,
"Cannot enslave VLAN device into VLAN aware bridge");
@@ -2265,7 +2251,7 @@ dsa_slave_check_8021q_upper(struct net_device *dev,
struct netdev_notifier_changeupper_info *info)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct net_device *br = dp->bridge_dev;
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct bridge_vlan_info br_info;
struct netlink_ext_ack *extack;
int err = NOTIFY_DONE;
@@ -2462,7 +2448,7 @@ static bool dsa_foreign_dev_check(const struct net_device *dev,
struct dsa_switch_tree *dst = dp->ds->dst;
if (netif_is_bridge_master(foreign_dev))
- return !dsa_tree_offloads_bridge(dst, foreign_dev);
+ return !dsa_tree_offloads_bridge_dev(dst, foreign_dev);
if (netif_is_bridge_port(foreign_dev))
return !dsa_tree_offloads_bridge_port(dst, foreign_dev);
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index bb155a16d454..e3c7d2627a61 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -95,7 +95,8 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds,
if (!ds->ops->port_bridge_join)
return -EOPNOTSUPP;
- err = ds->ops->port_bridge_join(ds, info->port, info->br);
+ err = ds->ops->port_bridge_join(ds, info->port, info->bridge,
+ &info->tx_fwd_offload);
if (err)
return err;
}
@@ -104,7 +105,7 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds,
ds->ops->crosschip_bridge_join) {
err = ds->ops->crosschip_bridge_join(ds, info->tree_index,
info->sw_index,
- info->port, info->br);
+ info->port, info->bridge);
if (err)
return err;
}
@@ -124,19 +125,20 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
if (dst->index == info->tree_index && ds->index == info->sw_index &&
ds->ops->port_bridge_leave)
- ds->ops->port_bridge_leave(ds, info->port, info->br);
+ ds->ops->port_bridge_leave(ds, info->port, info->bridge);
if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
ds->ops->crosschip_bridge_leave)
ds->ops->crosschip_bridge_leave(ds, info->tree_index,
info->sw_index, info->port,
- info->br);
+ info->bridge);
- if (ds->needs_standalone_vlan_filtering && !br_vlan_enabled(info->br)) {
+ if (ds->needs_standalone_vlan_filtering &&
+ !br_vlan_enabled(info->bridge.dev)) {
change_vlan_filtering = true;
vlan_filtering = true;
} else if (!ds->needs_standalone_vlan_filtering &&
- br_vlan_enabled(info->br)) {
+ br_vlan_enabled(info->bridge.dev)) {
change_vlan_filtering = true;
vlan_filtering = false;
}
@@ -151,11 +153,9 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
*/
if (change_vlan_filtering && ds->vlan_filtering_is_global) {
dsa_switch_for_each_port(dp, ds) {
- struct net_device *bridge_dev;
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
- bridge_dev = dp->bridge_dev;
-
- if (bridge_dev && br_vlan_enabled(bridge_dev)) {
+ if (br && br_vlan_enabled(br)) {
change_vlan_filtering = false;
break;
}
@@ -437,24 +437,6 @@ static int dsa_switch_fdb_del(struct dsa_switch *ds,
return dsa_port_do_fdb_del(dp, info->addr, info->vid);
}
-static int dsa_switch_hsr_join(struct dsa_switch *ds,
- struct dsa_notifier_hsr_info *info)
-{
- if (ds->index == info->sw_index && ds->ops->port_hsr_join)
- return ds->ops->port_hsr_join(ds, info->port, info->hsr);
-
- return -EOPNOTSUPP;
-}
-
-static int dsa_switch_hsr_leave(struct dsa_switch *ds,
- struct dsa_notifier_hsr_info *info)
-{
- if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
- return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
-
- return -EOPNOTSUPP;
-}
-
static int dsa_switch_lag_change(struct dsa_switch *ds,
struct dsa_notifier_lag_info *info)
{
@@ -647,55 +629,57 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
return 0;
}
-static int dsa_switch_mrp_add(struct dsa_switch *ds,
- struct dsa_notifier_mrp_info *info)
-{
- if (!ds->ops->port_mrp_add)
- return -EOPNOTSUPP;
-
- if (ds->index == info->sw_index)
- return ds->ops->port_mrp_add(ds, info->port, info->mrp);
-
- return 0;
-}
-
-static int dsa_switch_mrp_del(struct dsa_switch *ds,
- struct dsa_notifier_mrp_info *info)
+/* We use the same cross-chip notifiers to inform both the tagger side, as well
+ * as the switch side, of connection and disconnection events.
+ * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
+ * switch side doesn't support connecting to this tagger, and therefore, the
+ * fact that we don't disconnect the tagger side doesn't constitute a memory
+ * leak: the tagger will still operate with persistent per-switch memory, just
+ * with the switch side unconnected to it. What does constitute a hard error is
+ * when the switch side supports connecting but fails.
+ */
+static int
+dsa_switch_connect_tag_proto(struct dsa_switch *ds,
+ struct dsa_notifier_tag_proto_info *info)
{
- if (!ds->ops->port_mrp_del)
- return -EOPNOTSUPP;
-
- if (ds->index == info->sw_index)
- return ds->ops->port_mrp_del(ds, info->port, info->mrp);
+ const struct dsa_device_ops *tag_ops = info->tag_ops;
+ int err;
- return 0;
-}
+ /* Notify the new tagger about the connection to this switch */
+ if (tag_ops->connect) {
+ err = tag_ops->connect(ds);
+ if (err)
+ return err;
+ }
-static int
-dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
- struct dsa_notifier_mrp_ring_role_info *info)
-{
- if (!ds->ops->port_mrp_add)
+ if (!ds->ops->connect_tag_protocol)
return -EOPNOTSUPP;
- if (ds->index == info->sw_index)
- return ds->ops->port_mrp_add_ring_role(ds, info->port,
- info->mrp);
+ /* Notify the switch about the connection to the new tagger */
+ err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
+ if (err) {
+ /* Revert the new tagger's connection to this tree */
+ if (tag_ops->disconnect)
+ tag_ops->disconnect(ds);
+ return err;
+ }
return 0;
}
static int
-dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
- struct dsa_notifier_mrp_ring_role_info *info)
+dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
+ struct dsa_notifier_tag_proto_info *info)
{
- if (!ds->ops->port_mrp_del)
- return -EOPNOTSUPP;
+ const struct dsa_device_ops *tag_ops = info->tag_ops;
- if (ds->index == info->sw_index)
- return ds->ops->port_mrp_del_ring_role(ds, info->port,
- info->mrp);
+ /* Notify the tagger about the disconnection from this switch */
+ if (tag_ops->disconnect && ds->tagger_data)
+ tag_ops->disconnect(ds);
+ /* No need to notify the switch, since it shouldn't have any
+ * resources to tear down
+ */
return 0;
}
@@ -727,12 +711,6 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_HOST_FDB_DEL:
err = dsa_switch_host_fdb_del(ds, info);
break;
- case DSA_NOTIFIER_HSR_JOIN:
- err = dsa_switch_hsr_join(ds, info);
- break;
- case DSA_NOTIFIER_HSR_LEAVE:
- err = dsa_switch_hsr_leave(ds, info);
- break;
case DSA_NOTIFIER_LAG_CHANGE:
err = dsa_switch_lag_change(ds, info);
break;
@@ -766,17 +744,11 @@ static int dsa_switch_event(struct notifier_block *nb,
case DSA_NOTIFIER_TAG_PROTO:
err = dsa_switch_change_tag_proto(ds, info);
break;
- case DSA_NOTIFIER_MRP_ADD:
- err = dsa_switch_mrp_add(ds, info);
- break;
- case DSA_NOTIFIER_MRP_DEL:
- err = dsa_switch_mrp_del(ds, info);
- break;
- case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
- err = dsa_switch_mrp_add_ring_role(ds, info);
+ case DSA_NOTIFIER_TAG_PROTO_CONNECT:
+ err = dsa_switch_connect_tag_proto(ds, info);
break;
- case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
- err = dsa_switch_mrp_del_ring_role(ds, info);
+ case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
+ err = dsa_switch_disconnect_tag_proto(ds, info);
break;
case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
err = dsa_switch_tag_8021q_vlan_add(ds, info);
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 72cac2c0af7b..27712a81c967 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -67,10 +67,12 @@
#define DSA_8021Q_PORT(x) (((x) << DSA_8021Q_PORT_SHIFT) & \
DSA_8021Q_PORT_MASK)
-u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num)
+u16 dsa_8021q_bridge_tx_fwd_offload_vid(unsigned int bridge_num)
{
- /* The VBID value of 0 is reserved for precise TX */
- return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num + 1);
+ /* The VBID value of 0 is reserved for precise TX, but it is also
+ * reserved/invalid for the bridge_num, so all is well.
+ */
+ return DSA_8021Q_DIR_TX | DSA_8021Q_VBID(bridge_num);
}
EXPORT_SYMBOL_GPL(dsa_8021q_bridge_tx_fwd_offload_vid);
@@ -335,7 +337,7 @@ dsa_port_tag_8021q_bridge_match(struct dsa_port *dp,
return false;
if (dsa_port_is_user(dp))
- return dp->bridge_dev == info->br;
+ return dsa_port_offloads_bridge(dp, &info->bridge);
return false;
}
@@ -408,10 +410,9 @@ int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
}
int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
+ struct dsa_bridge bridge)
{
- u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num);
+ u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num);
return dsa_port_tag_8021q_vlan_add(dsa_to_port(ds, port), tx_vid,
true);
@@ -419,10 +420,9 @@ int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port,
EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_tx_fwd_offload);
void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port,
- struct net_device *br,
- int bridge_num)
+ struct dsa_bridge bridge)
{
- u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num);
+ u16 tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge.num);
dsa_port_tag_8021q_vlan_del(dsa_to_port(ds, port), tx_vid, true);
}
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index b3da4b2ea11c..8abf39dcac64 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -132,6 +132,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
u8 *dsa_header;
if (skb->offload_fwd_mark) {
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
struct dsa_switch_tree *dst = dp->ds->dst;
cmd = DSA_CMD_FORWARD;
@@ -140,7 +141,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
* packets on behalf of a virtual switch device with an index
* past the physical switches.
*/
- tag_dev = dst->last_switch + 1 + dp->bridge_num;
+ tag_dev = dst->last_switch + bridge_num;
tag_port = 0;
} else {
cmd = DSA_CMD_FROM_CPU;
@@ -165,7 +166,7 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
dsa_header[2] &= ~0x10;
}
} else {
- struct net_device *br = dp->bridge_dev;
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
u16 vid;
vid = br ? MV88E6XXX_VID_BRIDGED : MV88E6XXX_VID_STANDALONE;
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 4ed74d509d6a..0d81f172b7a6 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -12,7 +12,7 @@
static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp,
u64 *vlan_tci, u64 *tag_type)
{
- struct net_device *br = READ_ONCE(dp->bridge_dev);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
struct vlan_ethhdr *hdr;
u16 proto, tci;
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index a1919ea5e828..68982b2789a5 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -12,25 +12,39 @@
#include <linux/dsa/ocelot.h>
#include "dsa_priv.h"
+struct ocelot_8021q_tagger_private {
+ struct ocelot_8021q_tagger_data data; /* Must be first */
+ struct kthread_worker *xmit_worker;
+};
+
static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp,
struct sk_buff *skb)
{
+ struct ocelot_8021q_tagger_private *priv = dp->ds->tagger_data;
+ struct ocelot_8021q_tagger_data *data = &priv->data;
+ void (*xmit_work_fn)(struct kthread_work *work);
struct felix_deferred_xmit_work *xmit_work;
- struct felix_port *felix_port = dp->priv;
+ struct kthread_worker *xmit_worker;
+
+ xmit_work_fn = data->xmit_work_fn;
+ xmit_worker = priv->xmit_worker;
+
+ if (!xmit_work_fn || !xmit_worker)
+ return NULL;
xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
if (!xmit_work)
return NULL;
/* Calls felix_port_deferred_xmit in felix.c */
- kthread_init_work(&xmit_work->work, felix_port->xmit_work_fn);
+ kthread_init_work(&xmit_work->work, xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
xmit_work->dp = dp;
xmit_work->skb = skb_get(skb);
- kthread_queue_work(felix_port->xmit_worker, &xmit_work->work);
+ kthread_queue_work(xmit_worker, &xmit_work->work);
return NULL;
}
@@ -67,11 +81,43 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
return skb;
}
+static void ocelot_disconnect(struct dsa_switch *ds)
+{
+ struct ocelot_8021q_tagger_private *priv = ds->tagger_data;
+
+ kthread_destroy_worker(priv->xmit_worker);
+ kfree(priv);
+ ds->tagger_data = NULL;
+}
+
+static int ocelot_connect(struct dsa_switch *ds)
+{
+ struct ocelot_8021q_tagger_private *priv;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->xmit_worker = kthread_create_worker(0, "felix_xmit");
+ if (IS_ERR(priv->xmit_worker)) {
+ err = PTR_ERR(priv->xmit_worker);
+ kfree(priv);
+ return err;
+ }
+
+ ds->tagger_data = priv;
+
+ return 0;
+}
+
static const struct dsa_device_ops ocelot_8021q_netdev_ops = {
.name = "ocelot-8021q",
.proto = DSA_TAG_PROTO_OCELOT_8021Q,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
+ .connect = ocelot_connect,
+ .disconnect = ocelot_disconnect,
.needed_headroom = VLAN_HLEN,
.promisc_on_master = true,
};
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 262c8833a910..72d5e0ef8dcf 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -4,7 +4,6 @@
#include <linux/if_vlan.h>
#include <linux/dsa/sja1105.h>
#include <linux/dsa/8021q.h>
-#include <linux/skbuff.h>
#include <linux/packing.h>
#include "dsa_priv.h"
@@ -54,11 +53,25 @@
#define SJA1110_TX_TRAILER_LEN 4
#define SJA1110_MAX_PADDING_LEN 15
-enum sja1110_meta_tstamp {
- SJA1110_META_TSTAMP_TX = 0,
- SJA1110_META_TSTAMP_RX = 1,
+#define SJA1105_HWTS_RX_EN 0
+
+struct sja1105_tagger_private {
+ struct sja1105_tagger_data data; /* Must be first */
+ unsigned long state;
+ /* Protects concurrent access to the meta state machine
+ * from taggers running on multiple ports on SMP systems
+ */
+ spinlock_t meta_lock;
+ struct sk_buff *stampable_skb;
+ struct kthread_worker *xmit_worker;
};
+static struct sja1105_tagger_private *
+sja1105_tagger_private(struct dsa_switch *ds)
+{
+ return ds->tagger_data;
+}
+
/* Similar to is_link_local_ether_addr(hdr->h_dest) but also covers PTP */
static inline bool sja1105_is_link_local(const struct sk_buff *skb)
{
@@ -125,16 +138,30 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp,
struct sk_buff *skb)
{
- struct sja1105_port *sp = dp->priv;
+ struct sja1105_tagger_data *tagger_data = sja1105_tagger_data(dp->ds);
+ struct sja1105_tagger_private *priv = sja1105_tagger_private(dp->ds);
+ void (*xmit_work_fn)(struct kthread_work *work);
+ struct sja1105_deferred_xmit_work *xmit_work;
+ struct kthread_worker *xmit_worker;
- if (!dsa_port_is_sja1105(dp))
- return skb;
+ xmit_work_fn = tagger_data->xmit_work_fn;
+ xmit_worker = priv->xmit_worker;
+
+ if (!xmit_work_fn || !xmit_worker)
+ return NULL;
+
+ xmit_work = kzalloc(sizeof(*xmit_work), GFP_ATOMIC);
+ if (!xmit_work)
+ return NULL;
+ kthread_init_work(&xmit_work->work, xmit_work_fn);
/* Increase refcount so the kfree_skb in dsa_slave_xmit
* won't really free the packet.
*/
- skb_queue_tail(&sp->xmit_queue, skb_get(skb));
- kthread_queue_work(sp->xmit_worker, &sp->xmit_work);
+ xmit_work->dp = dp;
+ xmit_work->skb = skb_get(skb);
+
+ kthread_queue_work(xmit_worker, &xmit_work->work);
return NULL;
}
@@ -159,14 +186,16 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp)
* need to find it.
*/
dsa_switch_for_each_port(other_dp, ds) {
- if (!other_dp->bridge_dev)
+ struct net_device *br = dsa_port_bridge_dev_get(other_dp);
+
+ if (!br)
continue;
/* Error is returned only if CONFIG_BRIDGE_VLAN_FILTERING,
* which seems pointless to handle, as our port cannot become
* VLAN-aware in that case.
*/
- br_vlan_get_proto(other_dp->bridge_dev, &proto);
+ br_vlan_get_proto(br, &proto);
return proto;
}
@@ -180,7 +209,8 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
- struct net_device *br = dp->bridge_dev;
+ unsigned int bridge_num = dsa_port_bridge_num_get(dp);
+ struct net_device *br = dsa_port_bridge_dev_get(dp);
u16 tx_vid;
/* If the port is under a VLAN-aware bridge, just slide the
@@ -196,7 +226,7 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb,
* TX VLAN that targets the bridge's entire broadcast domain,
* instead of just the specific port.
*/
- tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(dp->bridge_num);
+ tx_vid = dsa_8021q_bridge_tx_fwd_offload_vid(bridge_num);
return dsa_8021q_xmit(skb, netdev, sja1105_xmit_tpid(dp), tx_vid);
}
@@ -352,32 +382,32 @@ static struct sk_buff
*/
if (is_link_local) {
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- struct sja1105_port *sp = dp->priv;
+ struct sja1105_tagger_private *priv;
+ struct dsa_switch *ds = dp->ds;
- if (unlikely(!dsa_port_is_sja1105(dp)))
- return skb;
+ priv = sja1105_tagger_private(ds);
- if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+ if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
/* Do normal processing. */
return skb;
- spin_lock(&sp->data->meta_lock);
+ spin_lock(&priv->meta_lock);
/* Was this a link-local frame instead of the meta
* that we were expecting?
*/
- if (sp->data->stampable_skb) {
- dev_err_ratelimited(dp->ds->dev,
+ if (priv->stampable_skb) {
+ dev_err_ratelimited(ds->dev,
"Expected meta frame, is %12llx "
"in the DSA master multicast filter?\n",
SJA1105_META_DMAC);
- kfree_skb(sp->data->stampable_skb);
+ kfree_skb(priv->stampable_skb);
}
/* Hold a reference to avoid dsa_switch_rcv
* from freeing the skb.
*/
- sp->data->stampable_skb = skb_get(skb);
- spin_unlock(&sp->data->meta_lock);
+ priv->stampable_skb = skb_get(skb);
+ spin_unlock(&priv->meta_lock);
/* Tell DSA we got nothing */
return NULL;
@@ -390,37 +420,37 @@ static struct sk_buff
*/
} else if (is_meta) {
struct dsa_port *dp = dsa_slave_to_port(skb->dev);
- struct sja1105_port *sp = dp->priv;
+ struct sja1105_tagger_private *priv;
+ struct dsa_switch *ds = dp->ds;
struct sk_buff *stampable_skb;
- if (unlikely(!dsa_port_is_sja1105(dp)))
- return skb;
+ priv = sja1105_tagger_private(ds);
/* Drop the meta frame if we're not in the right state
* to process it.
*/
- if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+ if (!test_bit(SJA1105_HWTS_RX_EN, &priv->state))
return NULL;
- spin_lock(&sp->data->meta_lock);
+ spin_lock(&priv->meta_lock);
- stampable_skb = sp->data->stampable_skb;
- sp->data->stampable_skb = NULL;
+ stampable_skb = priv->stampable_skb;
+ priv->stampable_skb = NULL;
/* Was this a meta frame instead of the link-local
* that we were expecting?
*/
if (!stampable_skb) {
- dev_err_ratelimited(dp->ds->dev,
+ dev_err_ratelimited(ds->dev,
"Unexpected meta frame\n");
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->meta_lock);
return NULL;
}
if (stampable_skb->dev != skb->dev) {
- dev_err_ratelimited(dp->ds->dev,
+ dev_err_ratelimited(ds->dev,
"Meta frame on wrong port\n");
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->meta_lock);
return NULL;
}
@@ -431,12 +461,36 @@ static struct sk_buff
skb = stampable_skb;
sja1105_transfer_meta(skb, meta);
- spin_unlock(&sp->data->meta_lock);
+ spin_unlock(&priv->meta_lock);
}
return skb;
}
+static bool sja1105_rxtstamp_get_state(struct dsa_switch *ds)
+{
+ struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
+
+ return test_bit(SJA1105_HWTS_RX_EN, &priv->state);
+}
+
+static void sja1105_rxtstamp_set_state(struct dsa_switch *ds, bool on)
+{
+ struct sja1105_tagger_private *priv = sja1105_tagger_private(ds);
+
+ if (on)
+ set_bit(SJA1105_HWTS_RX_EN, &priv->state);
+ else
+ clear_bit(SJA1105_HWTS_RX_EN, &priv->state);
+
+ /* Initialize the meta state machine to a known state */
+ if (!priv->stampable_skb)
+ return;
+
+ kfree_skb(priv->stampable_skb);
+ priv->stampable_skb = NULL;
+}
+
static bool sja1105_skb_has_tag_8021q(const struct sk_buff *skb)
{
u16 tpid = ntohs(eth_hdr(skb)->h_proto);
@@ -523,48 +577,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_meta);
}
-static void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port,
- u8 ts_id, enum sja1110_meta_tstamp dir,
- u64 tstamp)
-{
- struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct skb_shared_hwtstamps shwt = {0};
- struct sja1105_port *sp = dp->priv;
-
- if (!dsa_port_is_sja1105(dp))
- return;
-
- /* We don't care about RX timestamps on the CPU port */
- if (dir == SJA1110_META_TSTAMP_RX)
- return;
-
- spin_lock(&sp->data->skb_txtstamp_queue.lock);
-
- skb_queue_walk_safe(&sp->data->skb_txtstamp_queue, skb, skb_tmp) {
- if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
- continue;
-
- __skb_unlink(skb, &sp->data->skb_txtstamp_queue);
- skb_match = skb;
-
- break;
- }
-
- spin_unlock(&sp->data->skb_txtstamp_queue.lock);
-
- if (WARN_ON(!skb_match))
- return;
-
- shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
- skb_complete_tx_timestamp(skb_match, &shwt);
-}
-
static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
{
u8 *buf = dsa_etype_header_pos_rx(skb) + SJA1110_HEADER_LEN;
int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header);
int n_ts = SJA1110_RX_HEADER_N_TS(rx_header);
+ struct sja1105_tagger_data *tagger_data;
struct net_device *master = skb->dev;
struct dsa_port *cpu_dp;
struct dsa_switch *ds;
@@ -578,6 +596,10 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
return NULL;
}
+ tagger_data = sja1105_tagger_data(ds);
+ if (!tagger_data->meta_tstamp_handler)
+ return NULL;
+
for (i = 0; i <= n_ts; i++) {
u8 ts_id, source_port, dir;
u64 tstamp;
@@ -587,8 +609,8 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
dir = (buf[1] & BIT(3)) >> 3;
tstamp = be64_to_cpu(*(__be64 *)(buf + 2));
- sja1110_process_meta_tstamp(ds, source_port, ts_id, dir,
- tstamp);
+ tagger_data->meta_tstamp_handler(ds, source_port, ts_id, dir,
+ tstamp);
buf += SJA1110_META_TSTAMP_SIZE;
}
@@ -719,11 +741,53 @@ static void sja1110_flow_dissect(const struct sk_buff *skb, __be16 *proto,
*proto = ((__be16 *)skb->data)[(VLAN_HLEN / 2) - 1];
}
+static void sja1105_disconnect(struct dsa_switch *ds)
+{
+ struct sja1105_tagger_private *priv = ds->tagger_data;
+
+ kthread_destroy_worker(priv->xmit_worker);
+ kfree(priv);
+ ds->tagger_data = NULL;
+}
+
+static int sja1105_connect(struct dsa_switch *ds)
+{
+ struct sja1105_tagger_data *tagger_data;
+ struct sja1105_tagger_private *priv;
+ struct kthread_worker *xmit_worker;
+ int err;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spin_lock_init(&priv->meta_lock);
+
+ xmit_worker = kthread_create_worker(0, "dsa%d:%d_xmit",
+ ds->dst->index, ds->index);
+ if (IS_ERR(xmit_worker)) {
+ err = PTR_ERR(xmit_worker);
+ kfree(priv);
+ return err;
+ }
+
+ priv->xmit_worker = xmit_worker;
+ /* Export functions for switch driver use */
+ tagger_data = &priv->data;
+ tagger_data->rxtstamp_get_state = sja1105_rxtstamp_get_state;
+ tagger_data->rxtstamp_set_state = sja1105_rxtstamp_set_state;
+ ds->tagger_data = priv;
+
+ return 0;
+}
+
static const struct dsa_device_ops sja1105_netdev_ops = {
.name = "sja1105",
.proto = DSA_TAG_PROTO_SJA1105,
.xmit = sja1105_xmit,
.rcv = sja1105_rcv,
+ .connect = sja1105_connect,
+ .disconnect = sja1105_disconnect,
.needed_headroom = VLAN_HLEN,
.flow_dissect = sja1105_flow_dissect,
.promisc_on_master = true,
@@ -737,6 +801,8 @@ static const struct dsa_device_ops sja1110_netdev_ops = {
.proto = DSA_TAG_PROTO_SJA1110,
.xmit = sja1110_xmit,
.rcv = sja1110_rcv,
+ .connect = sja1105_connect,
+ .disconnect = sja1105_disconnect,
.flow_dissect = sja1110_flow_dissect,
.needed_headroom = SJA1110_HEADER_LEN + VLAN_HLEN,
.needed_tailroom = SJA1110_RX_TRAILER_LEN + SJA1110_MAX_PADDING_LEN,
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index c7d9e08107cb..ebcc812735a4 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -436,11 +436,10 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
type = eh->h_proto;
- rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (ptype == NULL) {
flush = 1;
- goto out_unlock;
+ goto out;
}
skb_gro_pull(skb, sizeof(*eh));
@@ -450,8 +449,6 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
ipv6_gro_receive, inet_gro_receive,
head, skb);
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
@@ -469,14 +466,12 @@ int eth_gro_complete(struct sk_buff *skb, int nhoff)
if (skb->encapsulation)
skb_set_inner_mac_header(skb, nhoff);
- rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype != NULL)
err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
ipv6_gro_complete, inet_gro_complete,
skb, nhoff + sizeof(*eh));
- rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(eth_gro_complete);
diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c
index 63560bbb7d1f..920aac02fe39 100644
--- a/net/ethtool/cabletest.c
+++ b/net/ethtool/cabletest.c
@@ -96,7 +96,7 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
out_rtnl:
rtnl_unlock();
out_dev_put:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
@@ -353,7 +353,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
out_rtnl:
rtnl_unlock();
out_dev_put:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index 6a070dc8e4b0..403158862011 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -219,6 +219,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c
index 46776ea42a92..487bdf345541 100644
--- a/net/ethtool/coalesce.c
+++ b/net/ethtool/coalesce.c
@@ -336,6 +336,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index c63e0739dc6a..0c5210015911 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -89,6 +89,7 @@ tunable_strings[__ETHTOOL_TUNABLE_COUNT][ETH_GSTRING_LEN] = {
[ETHTOOL_RX_COPYBREAK] = "rx-copybreak",
[ETHTOOL_TX_COPYBREAK] = "tx-copybreak",
[ETHTOOL_PFC_PREVENTION_TOUT] = "pfc-prevention-tout",
+ [ETHTOOL_TX_COPYBREAK_BUF_SIZE] = "tx-copybreak-buf-size",
};
const char
diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c
index f99912d7957e..d73888c7d19c 100644
--- a/net/ethtool/debug.c
+++ b/net/ethtool/debug.c
@@ -123,6 +123,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index e10bfcc07853..45c42b2d5f17 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -185,6 +185,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/features.c b/net/ethtool/features.c
index 1c9f4df273bd..55d449a2d3fc 100644
--- a/net/ethtool/features.c
+++ b/net/ethtool/features.c
@@ -136,7 +136,6 @@ static void ethnl_features_to_bitmap(unsigned long *dest, netdev_features_t val)
const unsigned int words = BITS_TO_LONGS(NETDEV_FEATURE_COUNT);
unsigned int i;
- bitmap_zero(dest, NETDEV_FEATURE_COUNT);
for (i = 0; i < words; i++)
dest[i] = (unsigned long)(val >> (i * BITS_PER_LONG));
}
@@ -284,6 +283,6 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
out_rtnl:
rtnl_unlock();
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/fec.c b/net/ethtool/fec.c
index 8738dafd5417..9f5a134e2e01 100644
--- a/net/ethtool/fec.c
+++ b/net/ethtool/fec.c
@@ -305,6 +305,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 20bcf86970ff..326e14ee05db 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -8,6 +8,7 @@
*/
#include <linux/compat.h>
+#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/capability.h>
@@ -734,6 +735,9 @@ ethtool_get_drvinfo(struct net_device *dev, struct ethtool_devlink_compat *rsp)
sizeof(rsp->info.bus_info));
strlcpy(rsp->info.driver, dev->dev.parent->driver->name,
sizeof(rsp->info.driver));
+ } else if (dev->rtnl_link_ops) {
+ strlcpy(rsp->info.driver, dev->rtnl_link_ops->kind,
+ sizeof(rsp->info.driver));
} else {
return -EOPNOTSUPP;
}
@@ -1743,11 +1747,13 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
+ struct kernel_ethtool_ringparam kernel_ringparam = {};
if (!dev->ethtool_ops->get_ringparam)
return -EOPNOTSUPP;
- dev->ethtool_ops->get_ringparam(dev, &ringparam);
+ dev->ethtool_ops->get_ringparam(dev, &ringparam,
+ &kernel_ringparam, NULL);
if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
return -EFAULT;
@@ -1757,6 +1763,7 @@ static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_ringparam ringparam, max = { .cmd = ETHTOOL_GRINGPARAM };
+ struct kernel_ethtool_ringparam kernel_ringparam;
int ret;
if (!dev->ethtool_ops->set_ringparam || !dev->ethtool_ops->get_ringparam)
@@ -1765,7 +1772,7 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
return -EFAULT;
- dev->ethtool_ops->get_ringparam(dev, &max);
+ dev->ethtool_ops->get_ringparam(dev, &max, &kernel_ringparam, NULL);
/* ensure new ring parameters are within the maximums */
if (ringparam.rx_pending > max.rx_max_pending ||
@@ -1774,7 +1781,8 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
ringparam.tx_pending > max.tx_max_pending)
return -EINVAL;
- ret = dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
+ &kernel_ringparam, NULL);
if (!ret)
ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
return ret;
@@ -1982,6 +1990,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
struct ethtool_value id;
static bool busy;
const struct ethtool_ops *ops = dev->ethtool_ops;
+ netdevice_tracker dev_tracker;
int rc;
if (!ops->set_phys_id)
@@ -2001,7 +2010,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
* removal of the device.
*/
busy = true;
- dev_hold(dev);
+ dev_hold_track(dev, &dev_tracker, GFP_KERNEL);
rtnl_unlock();
if (rc == 0) {
@@ -2025,7 +2034,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
}
rtnl_lock();
- dev_put(dev);
+ dev_put_track(dev, &dev_tracker);
busy = false;
(void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
@@ -2087,9 +2096,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count))
return -EOPNOTSUPP;
- if (dev->phydev && !ops->get_ethtool_phy_stats &&
+ if (phydev && !ops->get_ethtool_phy_stats &&
phy_ops && phy_ops->get_sset_count)
- n_stats = phy_ops->get_sset_count(dev->phydev);
+ n_stats = phy_ops->get_sset_count(phydev);
else
n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
if (n_stats < 0)
@@ -2108,9 +2117,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
if (!data)
return -ENOMEM;
- if (dev->phydev && !ops->get_ethtool_phy_stats &&
+ if (phydev && !ops->get_ethtool_phy_stats &&
phy_ops && phy_ops->get_stats) {
- ret = phy_ops->get_stats(dev->phydev, &stats, data);
+ ret = phy_ops->get_stats(phydev, &stats, data);
if (ret < 0)
goto out;
} else {
@@ -2396,6 +2405,7 @@ static int ethtool_tunable_valid(const struct ethtool_tunable *tuna)
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
case ETHTOOL_TX_COPYBREAK:
+ case ETHTOOL_TX_COPYBREAK_BUF_SIZE:
if (tuna->len != sizeof(u32) ||
tuna->type_id != ETHTOOL_TUNABLE_U32)
return -EINVAL;
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index b91839870efc..efa0f7f48836 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -149,6 +149,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index f9eda596f301..99b29b4fe947 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -358,6 +358,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/module.c b/net/ethtool/module.c
index bc2cef11bbda..898ed436b9e4 100644
--- a/net/ethtool/module.c
+++ b/net/ethtool/module.c
@@ -175,6 +175,6 @@ out_ops:
ethnl_ops_complete(dev);
out_rtnl:
rtnl_unlock();
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 96f4180aabd2..5fe8f4ae2ceb 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -142,6 +142,8 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
}
req_info->dev = dev;
+ if (dev)
+ netdev_tracker_alloc(dev, &req_info->dev_tracker, GFP_KERNEL);
req_info->flags = flags;
return 0;
}
@@ -400,7 +402,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
ops->cleanup_data(reply_data);
genlmsg_end(rskb, reply_payload);
- dev_put(req_info->dev);
+ dev_put_track(req_info->dev, &req_info->dev_tracker);
kfree(reply_data);
kfree(req_info);
return genlmsg_reply(rskb, info);
@@ -412,7 +414,7 @@ err_cleanup:
if (ops->cleanup_data)
ops->cleanup_data(reply_data);
err_dev:
- dev_put(req_info->dev);
+ dev_put_track(req_info->dev, &req_info->dev_tracker);
kfree(reply_data);
kfree(req_info);
return ret;
@@ -548,7 +550,7 @@ static int ethnl_default_start(struct netlink_callback *cb)
* same parser as for non-dump (doit) requests is used, it
* would take reference to the device if it finds one
*/
- dev_put(req_info->dev);
+ dev_put_track(req_info->dev, &req_info->dev_tracker);
req_info->dev = NULL;
}
if (ret < 0)
@@ -635,7 +637,6 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd,
if (ret < 0)
goto err_cleanup;
reply_len = ret + ethnl_reply_header_size();
- ret = -ENOMEM;
skb = genlmsg_new(reply_len, GFP_KERNEL);
if (!skb)
goto err_cleanup;
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index 836ee7157848..75856db299e9 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -222,6 +222,7 @@ static inline unsigned int ethnl_reply_header_size(void)
/**
* struct ethnl_req_info - base type of request information for GET requests
* @dev: network device the request is for (may be null)
+ * @dev_tracker: refcount tracker for @dev reference
* @flags: request flags common for all request types
*
* This is a common base for request specific structures holding data from
@@ -230,9 +231,15 @@ static inline unsigned int ethnl_reply_header_size(void)
*/
struct ethnl_req_info {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
u32 flags;
};
+static inline void ethnl_parse_header_dev_put(struct ethnl_req_info *req_info)
+{
+ dev_put_track(req_info->dev, &req_info->dev_tracker);
+}
+
/**
* struct ethnl_reply_data - base type of reply data for GET requests
* @dev: device for current reply message; in single shot requests it is
@@ -356,7 +363,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
-extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX + 1];
+extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_BUF_LEN + 1];
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
index ee1e5806bc93..a8c113d244db 100644
--- a/net/ethtool/pause.c
+++ b/net/ethtool/pause.c
@@ -181,6 +181,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c
index fc9f3be23a19..4c7bfa81e4ab 100644
--- a/net/ethtool/privflags.c
+++ b/net/ethtool/privflags.c
@@ -196,6 +196,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index 4e097812a967..c1d5f5e0fdc9 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -10,6 +10,7 @@ struct rings_req_info {
struct rings_reply_data {
struct ethnl_reply_data base;
struct ethtool_ringparam ringparam;
+ struct kernel_ethtool_ringparam kernel_ringparam;
};
#define RINGS_REPDATA(__reply_base) \
@@ -25,6 +26,7 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base,
struct genl_info *info)
{
struct rings_reply_data *data = RINGS_REPDATA(reply_base);
+ struct netlink_ext_ack *extack = info ? info->extack : NULL;
struct net_device *dev = reply_base->dev;
int ret;
@@ -33,7 +35,8 @@ static int rings_prepare_data(const struct ethnl_req_info *req_base,
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
- dev->ethtool_ops->get_ringparam(dev, &data->ringparam);
+ dev->ethtool_ops->get_ringparam(dev, &data->ringparam,
+ &data->kernel_ringparam, extack);
ethnl_ops_complete(dev);
return 0;
@@ -49,7 +52,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
nla_total_size(sizeof(u32)) + /* _RINGS_RX */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_MINI */
nla_total_size(sizeof(u32)) + /* _RINGS_RX_JUMBO */
- nla_total_size(sizeof(u32)); /* _RINGS_TX */
+ nla_total_size(sizeof(u32)) + /* _RINGS_TX */
+ nla_total_size(sizeof(u32)); /* _RINGS_RX_BUF_LEN */
}
static int rings_fill_reply(struct sk_buff *skb,
@@ -57,6 +61,7 @@ static int rings_fill_reply(struct sk_buff *skb,
const struct ethnl_reply_data *reply_base)
{
const struct rings_reply_data *data = RINGS_REPDATA(reply_base);
+ const struct kernel_ethtool_ringparam *kernel_ringparam = &data->kernel_ringparam;
const struct ethtool_ringparam *ringparam = &data->ringparam;
if ((ringparam->rx_max_pending &&
@@ -78,7 +83,10 @@ static int rings_fill_reply(struct sk_buff *skb,
(nla_put_u32(skb, ETHTOOL_A_RINGS_TX_MAX,
ringparam->tx_max_pending) ||
nla_put_u32(skb, ETHTOOL_A_RINGS_TX,
- ringparam->tx_pending))))
+ ringparam->tx_pending))) ||
+ (kernel_ringparam->rx_buf_len &&
+ (nla_put_u32(skb, ETHTOOL_A_RINGS_RX_BUF_LEN,
+ kernel_ringparam->rx_buf_len))))
return -EMSGSIZE;
return 0;
@@ -105,10 +113,12 @@ const struct nla_policy ethnl_rings_set_policy[] = {
[ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 },
+ [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
};
int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
{
+ struct kernel_ethtool_ringparam kernel_ringparam = {};
struct ethtool_ringparam ringparam = {};
struct ethnl_req_info req_info = {};
struct nlattr **tb = info->attrs;
@@ -134,7 +144,7 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
ret = ethnl_ops_begin(dev);
if (ret < 0)
goto out_rtnl;
- ops->get_ringparam(dev, &ringparam);
+ ops->get_ringparam(dev, &ringparam, &kernel_ringparam, info->extack);
ethnl_update_u32(&ringparam.rx_pending, tb[ETHTOOL_A_RINGS_RX], &mod);
ethnl_update_u32(&ringparam.rx_mini_pending,
@@ -142,6 +152,8 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
ethnl_update_u32(&ringparam.rx_jumbo_pending,
tb[ETHTOOL_A_RINGS_RX_JUMBO], &mod);
ethnl_update_u32(&ringparam.tx_pending, tb[ETHTOOL_A_RINGS_TX], &mod);
+ ethnl_update_u32(&kernel_ringparam.rx_buf_len,
+ tb[ETHTOOL_A_RINGS_RX_BUF_LEN], &mod);
ret = 0;
if (!mod)
goto out_ops;
@@ -164,7 +176,17 @@ int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
goto out_ops;
}
- ret = dev->ethtool_ops->set_ringparam(dev, &ringparam);
+ if (kernel_ringparam.rx_buf_len != 0 &&
+ !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) {
+ ret = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_ATTR(info->extack,
+ tb[ETHTOOL_A_RINGS_RX_BUF_LEN],
+ "setting rx buf len not supported");
+ goto out_ops;
+ }
+
+ ret = dev->ethtool_ops->set_ringparam(dev, &ringparam,
+ &kernel_ringparam, info->extack);
if (ret < 0)
goto out_ops;
ethtool_notify(dev, ETHTOOL_MSG_RINGS_NTF, NULL);
@@ -174,6 +196,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/ethtool/stats.c b/net/ethtool/stats.c
index ec07f5765e03..a20e0a24ff61 100644
--- a/net/ethtool/stats.c
+++ b/net/ethtool/stats.c
@@ -14,10 +14,12 @@ struct stats_req_info {
struct stats_reply_data {
struct ethnl_reply_data base;
- struct ethtool_eth_phy_stats phy_stats;
- struct ethtool_eth_mac_stats mac_stats;
- struct ethtool_eth_ctrl_stats ctrl_stats;
- struct ethtool_rmon_stats rmon_stats;
+ struct_group(stats,
+ struct ethtool_eth_phy_stats phy_stats;
+ struct ethtool_eth_mac_stats mac_stats;
+ struct ethtool_eth_ctrl_stats ctrl_stats;
+ struct ethtool_rmon_stats rmon_stats;
+ );
const struct ethtool_rmon_hist_range *rmon_ranges;
};
@@ -117,10 +119,7 @@ static int stats_prepare_data(const struct ethnl_req_info *req_base,
/* Mark all stats as unset (see ETHTOOL_STAT_NOT_SET) to prevent them
* from being reported to user space in case driver did not set them.
*/
- memset(&data->phy_stats, 0xff, sizeof(data->phy_stats));
- memset(&data->mac_stats, 0xff, sizeof(data->mac_stats));
- memset(&data->ctrl_stats, 0xff, sizeof(data->ctrl_stats));
- memset(&data->rmon_stats, 0xff, sizeof(data->rmon_stats));
+ memset(&data->stats, 0xff, sizeof(data->stats));
if (test_bit(ETHTOOL_STATS_ETH_PHY, req_info->stat_mask) &&
dev->ethtool_ops->get_eth_phy_stats)
diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c
index e7f2ee0d2471..efde33536687 100644
--- a/net/ethtool/tunnels.c
+++ b/net/ethtool/tunnels.c
@@ -195,7 +195,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
if (ret)
goto err_free_msg;
rtnl_unlock();
- dev_put(req_info.dev);
+ ethnl_parse_header_dev_put(&req_info);
genlmsg_end(rskb, reply_payload);
return genlmsg_reply(rskb, info);
@@ -204,7 +204,7 @@ err_free_msg:
nlmsg_free(rskb);
err_unlock_rtnl:
rtnl_unlock();
- dev_put(req_info.dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
@@ -230,7 +230,7 @@ int ethnl_tunnel_info_start(struct netlink_callback *cb)
sock_net(cb->skb->sk), cb->extack,
false);
if (ctx->req_info.dev) {
- dev_put(ctx->req_info.dev);
+ ethnl_parse_header_dev_put(&ctx->req_info);
ctx->req_info.dev = NULL;
}
diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
index ada7df2331d2..88f435e76481 100644
--- a/net/ethtool/wol.c
+++ b/net/ethtool/wol.c
@@ -165,6 +165,6 @@ out_ops:
out_rtnl:
rtnl_unlock();
out_dev:
- dev_put(dev);
+ ethnl_parse_header_dev_put(&req_info);
return ret;
}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index 737e4f17e1c6..e57fdad9ef94 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -30,13 +30,13 @@ static bool is_slave_up(struct net_device *dev)
static void __hsr_set_operstate(struct net_device *dev, int transition)
{
- write_lock_bh(&dev_base_lock);
+ write_lock(&dev_base_lock);
if (dev->operstate != transition) {
dev->operstate = transition;
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
netdev_state_change(dev);
} else {
- write_unlock_bh(&dev_base_lock);
+ write_unlock(&dev_base_lock);
}
}
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index 7bb9ef35c570..3b2366a88c3c 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -174,8 +174,8 @@ static int raw_hash(struct sock *sk)
{
write_lock_bh(&raw_lock);
sk_add_node(sk, &raw_head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&raw_lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
@@ -453,8 +453,8 @@ static int dgram_hash(struct sock *sk)
{
write_lock_bh(&dgram_lock);
sk_add_node(sk, &dgram_head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&dgram_lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 5f70ffdae1b5..9c465bac1eb0 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -99,6 +99,7 @@
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/inet_connection_sock.h>
+#include <net/gro.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/udplite.h>
@@ -224,7 +225,7 @@ int inet_listen(struct socket *sock, int backlog)
tcp_fastopen_init_key_once(sock_net(sk));
}
- err = inet_csk_listen_start(sk, backlog);
+ err = inet_csk_listen_start(sk);
if (err)
goto out;
tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_LISTEN_CB, 0, NULL);
@@ -488,11 +489,8 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
* is temporarily down)
*/
err = -EADDRNOTAVAIL;
- if (!inet_can_nonlocal_bind(net, inet) &&
- addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
- chk_addr_ret != RTN_LOCAL &&
- chk_addr_ret != RTN_MULTICAST &&
- chk_addr_ret != RTN_BROADCAST)
+ if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
+ chk_addr_ret))
goto out;
snum = ntohs(addr->sin_port);
@@ -533,6 +531,8 @@ int __inet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
err = BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk);
if (err) {
inet->inet_saddr = inet->inet_rcv_saddr = 0;
+ if (sk->sk_prot->put_port)
+ sk->sk_prot->put_port(sk);
goto out_release_sock;
}
}
@@ -1454,19 +1454,18 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
proto = iph->protocol;
- rcu_read_lock();
ops = rcu_dereference(inet_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive)
- goto out_unlock;
+ goto out;
if (*(u8 *)iph != 0x45)
- goto out_unlock;
+ goto out;
if (ip_is_fragment(iph))
- goto out_unlock;
+ goto out;
if (unlikely(ip_fast_csum((u8 *)iph, 5)))
- goto out_unlock;
+ goto out;
id = ntohl(*(__be32 *)&iph->id);
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
@@ -1543,9 +1542,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
ops->callbacks.gro_receive, head, skb);
-out_unlock:
- rcu_read_unlock();
-
out:
skb_gro_flush_final(skb, pp, flush);
@@ -1618,10 +1614,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
- rcu_read_lock();
ops = rcu_dereference(inet_offloads[proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
- goto out_unlock;
+ goto out;
/* Only need to add sizeof(*iph) to get to the next hdr below
* because any hdr with option will have been flushed in
@@ -1631,9 +1626,7 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff)
tcp4_gro_complete, udp4_gro_complete,
skb, nhoff + sizeof(*iph));
-out_unlock:
- rcu_read_unlock();
-
+out:
return err;
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 857a144b1ea9..4db0325f6e1a 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1299,21 +1299,6 @@ static struct packet_type arp_packet_type __read_mostly = {
.func = arp_rcv,
};
-static int arp_proc_init(void);
-
-void __init arp_init(void)
-{
- neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl);
-
- dev_add_pack(&arp_packet_type);
- arp_proc_init();
-#ifdef CONFIG_SYSCTL
- neigh_sysctl_register(NULL, &arp_tbl.parms, NULL);
-#endif
- register_netdevice_notifier(&arp_netdev_notifier);
-}
-
-#ifdef CONFIG_PROC_FS
#if IS_ENABLED(CONFIG_AX25)
/* ------------------------------------------------------------------------ */
@@ -1451,16 +1436,14 @@ static struct pernet_operations arp_net_ops = {
.exit = arp_net_exit,
};
-static int __init arp_proc_init(void)
+void __init arp_init(void)
{
- return register_pernet_subsys(&arp_net_ops);
-}
-
-#else /* CONFIG_PROC_FS */
+ neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl);
-static int __init arp_proc_init(void)
-{
- return 0;
+ dev_add_pack(&arp_packet_type);
+ register_pernet_subsys(&arp_net_ops);
+#ifdef CONFIG_SYSCTL
+ neigh_sysctl_register(NULL, &arp_tbl.parms, NULL);
+#endif
+ register_netdevice_notifier(&arp_netdev_notifier);
}
-
-#endif /* CONFIG_PROC_FS */
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index 4bb9401b0a3f..de610cb83694 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -169,7 +169,7 @@ static u32 prog_ops_moff(const struct bpf_prog *prog)
t = bpf_tcp_congestion_ops.type;
m = &btf_type_member(t)[midx];
- return btf_member_bit_offset(t, m) / 8;
+ return __btf_member_bit_offset(t, m) / 8;
}
static const struct bpf_func_proto *
@@ -246,7 +246,7 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
utcp_ca = (const struct tcp_congestion_ops *)udata;
tcp_ca = (struct tcp_congestion_ops *)kdata;
- moff = btf_member_bit_offset(t, member) / 8;
+ moff = __btf_member_bit_offset(t, member) / 8;
switch (moff) {
case offsetof(struct tcp_congestion_ops, flags):
if (utcp_ca->flags & ~TCP_CONG_MASK)
@@ -276,7 +276,7 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
static int bpf_tcp_ca_check_member(const struct btf_type *t,
const struct btf_member *member)
{
- if (is_unsupported(btf_member_bit_offset(t, member) / 8))
+ if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
return -ENOTSUPP;
return 0;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 323e622ff9b7..fba2bffd65f7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -243,7 +243,7 @@ void in_dev_finish_destroy(struct in_device *idev)
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
#endif
- dev_put(dev);
+ dev_put_track(dev, &idev->dev_tracker);
if (!idev->dead)
pr_err("Freeing alive in_device %p\n", idev);
else
@@ -271,7 +271,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
dev_disable_lro(dev);
/* Reference in_dev->dev */
- dev_hold(dev);
+ dev_hold_track(dev, &in_dev->dev_tracker, GFP_KERNEL);
/* Account for reference dev->ip_ptr (below) */
refcount_set(&in_dev->refcnt, 1);
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 8e4e9aa12130..d87f02a6e934 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -16,6 +16,7 @@
#include <crypto/authenc.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index d279cb8ac158..e0b6c8b6de57 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -216,11 +216,6 @@ static struct fib_table *fib_empty_table(struct net *net)
return NULL;
}
-static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = {
- FRA_GENERIC_POLICY,
- [FRA_FLOW] = { .type = NLA_U32 },
-};
-
static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh,
struct nlattr **tb,
@@ -386,7 +381,6 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
.nlmsg_payload = fib4_rule_nlmsg_payload,
.flush_cache = fib4_rule_flush_cache,
.nlgroup = RTNLGRP_IPV4_RULE,
- .policy = fib4_rule_policy,
.owner = THIS_MODULE,
};
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index fde7797b5806..828de171708f 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -208,7 +208,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
void fib_nh_common_release(struct fib_nh_common *nhc)
{
- dev_put(nhc->nhc_dev);
+ dev_put_track(nhc->nhc_dev, &nhc->nhc_dev_tracker);
lwtstate_put(nhc->nhc_lwtstate);
rt_fibinfo_free_cpus(nhc->nhc_pcpu_rth_output);
rt_fibinfo_free(&nhc->nhc_rth_input);
@@ -662,6 +662,19 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
return nhs;
}
+static int fib_gw_from_attr(__be32 *gw, struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ if (nla_len(nla) < sizeof(*gw)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv4 address in RTA_GATEWAY");
+ return -EINVAL;
+ }
+
+ *gw = nla_get_in_addr(nla);
+
+ return 0;
+}
+
/* only called when fib_nh is integrated into fib_info */
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg,
@@ -704,7 +717,11 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
return -EINVAL;
}
if (nla) {
- fib_cfg.fc_gw4 = nla_get_in_addr(nla);
+ ret = fib_gw_from_attr(&fib_cfg.fc_gw4, nla,
+ extack);
+ if (ret)
+ goto errout;
+
if (fib_cfg.fc_gw4)
fib_cfg.fc_gw_family = AF_INET;
} else if (nlav) {
@@ -714,10 +731,18 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
}
nla = nla_find(attrs, attrlen, RTA_FLOW);
- if (nla)
+ if (nla) {
+ if (nla_len(nla) < sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
+ return -EINVAL;
+ }
fib_cfg.fc_flow = nla_get_u32(nla);
+ }
fib_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+ /* RTA_ENCAP_TYPE length checked in
+ * lwtunnel_valid_encap_type_attr
+ */
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla)
fib_cfg.fc_encap_type = nla_get_u16(nla);
@@ -902,6 +927,7 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *nlav, *attrs = rtnh_attrs(rtnh);
+ int err;
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nlav = nla_find(attrs, attrlen, RTA_VIA);
@@ -912,12 +938,17 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
}
if (nla) {
+ __be32 gw;
+
+ err = fib_gw_from_attr(&gw, nla, extack);
+ if (err)
+ return err;
+
if (nh->fib_nh_gw_family != AF_INET ||
- nla_get_in_addr(nla) != nh->fib_nh_gw4)
+ gw != nh->fib_nh_gw4)
return 1;
} else if (nlav) {
struct fib_config cfg2;
- int err;
err = fib_gw_from_via(&cfg2, nlav, extack);
if (err)
@@ -940,8 +971,14 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
- if (nla && nla_get_u32(nla) != nh->nh_tclassid)
- return 1;
+ if (nla) {
+ if (nla_len(nla) < sizeof(u32)) {
+ NL_SET_ERR_MSG(extack, "Invalid RTA_FLOW");
+ return -EINVAL;
+ }
+ if (nla_get_u32(nla) != nh->nh_tclassid)
+ return 1;
+ }
#endif
}
@@ -1006,7 +1043,7 @@ static int fib_check_nh_v6_gw(struct net *net, struct fib_nh *nh,
err = ipv6_stub->fib6_nh_init(net, &fib6_nh, &cfg, GFP_KERNEL, extack);
if (!err) {
nh->fib_nh_dev = fib6_nh.fib_nh_dev;
- dev_hold(nh->fib_nh_dev);
+ dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_KERNEL);
nh->fib_nh_oif = nh->fib_nh_dev->ifindex;
nh->fib_nh_scope = RT_SCOPE_LINK;
@@ -1090,7 +1127,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
if (!netif_carrier_ok(dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
nh->fib_nh_dev = dev;
- dev_hold(dev);
+ dev_hold_track(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
nh->fib_nh_scope = RT_SCOPE_LINK;
return 0;
}
@@ -1144,7 +1181,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
"No egress device for nexthop gateway");
goto out;
}
- dev_hold(dev);
+ dev_hold_track(dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
if (!netif_carrier_ok(dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
@@ -1178,7 +1215,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,
}
nh->fib_nh_dev = in_dev->dev;
- dev_hold(nh->fib_nh_dev);
+ dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC);
nh->fib_nh_scope = RT_SCOPE_HOST;
if (!netif_carrier_ok(nh->fib_nh_dev))
nh->fib_nh_flags |= RTNH_F_LINKDOWN;
@@ -1508,6 +1545,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg,
err = -ENODEV;
if (!nh->fib_nh_dev)
goto failure;
+ netdev_tracker_alloc(nh->fib_nh_dev, &nh->fib_nh_dev_tracker,
+ GFP_KERNEL);
} else {
int linkdown = 0;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 8fcbc6258ec5..0d085cc8d96c 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <net/genetlink.h>
+#include <net/gro.h>
#include <net/gue.h>
#include <net/fou.h>
#include <net/ip.h>
@@ -246,17 +247,14 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
/* Flag this frame as already having an outer encap header */
NAPI_GRO_CB(skb)->is_fou = 1;
- rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
if (!ops || !ops->callbacks.gro_receive)
- goto out_unlock;
+ goto out;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
-out_unlock:
- rcu_read_unlock();
-
+out:
return pp;
}
@@ -268,19 +266,16 @@ static int fou_gro_complete(struct sock *sk, struct sk_buff *skb,
const struct net_offload *ops;
int err = -ENOSYS;
- rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
- goto out_unlock;
+ goto out;
err = ops->callbacks.gro_complete(skb, nhoff);
skb_set_inner_mac_header(skb, nhoff);
-out_unlock:
- rcu_read_unlock();
-
+out:
return err;
}
@@ -438,17 +433,14 @@ next_proto:
/* Flag this frame as already having an outer encap header */
NAPI_GRO_CB(skb)->is_fou = 1;
- rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
- goto out_unlock;
+ goto out;
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
flush = 0;
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
@@ -485,18 +477,16 @@ static int gue_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return err;
}
- rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
- goto out_unlock;
+ goto out;
err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
skb_set_inner_mac_header(skb, nhoff + guehlen);
-out_unlock:
- rcu_read_unlock();
+out:
return err;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 1121a9d5fed9..07073fa35205 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <net/protocol.h>
#include <net/gre.h>
+#include <net/gro.h>
static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t features)
@@ -162,10 +163,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
type = greh->protocol;
- rcu_read_lock();
ptype = gro_find_receive_by_type(type);
if (!ptype)
- goto out_unlock;
+ goto out;
grehlen = GRE_HEADER_SECTION;
@@ -179,13 +179,13 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
if (skb_gro_header_hard(skb, hlen)) {
greh = skb_gro_header_slow(skb, hlen, off);
if (unlikely(!greh))
- goto out_unlock;
+ goto out;
}
/* Don't bother verifying checksum if we're going to flush anyway. */
if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
if (skb_gro_checksum_simple_validate(skb))
- goto out_unlock;
+ goto out;
skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
null_compute_pseudo);
@@ -229,8 +229,6 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
flush = 0;
-out_unlock:
- rcu_read_unlock();
out:
skb_gro_flush_final(skb, pp, flush);
@@ -255,13 +253,10 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
if (greh->flags & GRE_CSUM)
grehlen += GRE_HEADER_SECTION;
- rcu_read_lock();
ptype = gro_find_complete_by_type(type);
if (ptype)
err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);
- rcu_read_unlock();
-
skb_set_inner_mac_header(skb, nhoff + grehlen);
return err;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index d2e2b3d18c66..2ad3c7b42d6d 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -2558,7 +2558,6 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
msf->imsf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
if (!psl) {
- len = 0;
count = 0;
} else {
count = psl->sl_count;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 62a67fdc344c..fc2a985f6064 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -1035,7 +1035,7 @@ void inet_csk_prepare_forced_close(struct sock *sk)
}
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
-int inet_csk_listen_start(struct sock *sk, int backlog)
+int inet_csk_listen_start(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 75737267746f..30ab717ff1b8 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -307,7 +307,7 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
- __be32 daddr, u16 hnum)
+ __be32 daddr, u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -315,8 +315,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
if (hashinfo != &tcp_hashinfo)
return NULL; /* only TCP is supported */
- no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP,
- saddr, sport, daddr, hnum, &sk);
+ no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport,
+ daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@@ -340,7 +340,7 @@ struct sock *__inet_lookup_listener(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet_lookup_run_bpf(net, hashinfo, skb, doff,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, dif);
if (result)
goto done;
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9bca57ef8b83..57c1d8431386 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -672,7 +672,6 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
struct sk_buff *skb2;
struct iphdr *iph;
- len = state->left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > state->mtu)
len = state->mtu;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 38d29b175ca6..445a9ecaefa1 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -576,7 +576,7 @@ out:
return err;
}
-static void __ip_sock_set_tos(struct sock *sk, int val)
+void __ip_sock_set_tos(struct sock *sk, int val)
{
if (sk->sk_type == SOCK_STREAM) {
val &= ~INET_ECN_MASK;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 2dda856ca260..07274619b9ea 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -195,10 +195,6 @@ static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
return 1;
}
-static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
- FRA_GENERIC_POLICY,
-};
-
static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh, struct nlattr **tb,
struct netlink_ext_ack *extack)
@@ -231,7 +227,6 @@ static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
.compare = ipmr_rule_compare,
.fill = ipmr_rule_fill,
.nlgroup = RTNLGRP_IPV4_RULE,
- .policy = ipmr_rule_policy,
.owner = THIS_MODULE,
};
@@ -696,7 +691,7 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
- dev_put(dev);
+ dev_put_track(dev, &v->dev_tracker);
return 0;
}
@@ -896,6 +891,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
v->dev = dev;
+ netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
if (v->flags & VIFF_REGISTER)
mrt->mroute_reg_vif_num = vifi;
if (vifi+1 > mrt->maxvif)
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 63cb953bd019..67087f95579f 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -59,12 +59,8 @@ config NF_TABLES_ARP
endif # NF_TABLES
config NF_FLOW_TABLE_IPV4
- tristate "Netfilter flow table IPv4 module"
- depends on NF_FLOW_TABLE
- help
- This option adds the flow table IPv4 support.
-
- To compile it as a module, choose M here.
+ tristate
+ select NF_FLOW_TABLE_INET
config NF_DUP_IPV4
tristate "Netfilter IPv4 packet duplication to alternate destination"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index f38fb1368ddb..93bad1184251 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -24,9 +24,6 @@ obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NFT_FIB_IPV4) += nft_fib_ipv4.o
obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
-# flow table support
-obj-$(CONFIG_NF_FLOW_TABLE_IPV4) += nf_flow_table_ipv4.o
-
# generic IP tables
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 8fd1aba8af31..b518f20c9a24 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -520,8 +520,11 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
if (IS_ERR(config))
return PTR_ERR(config);
}
- } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN))
+ } else if (memcmp(&config->clustermac, &cipinfo->clustermac, ETH_ALEN)) {
+ clusterip_config_entry_put(config);
+ clusterip_config_put(config);
return -EINVAL;
+ }
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0) {
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
index aba65fe90345..e69de29bb2d1 100644
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <net/netfilter/nf_flow_table.h>
-#include <net/netfilter/nf_tables.h>
-
-static struct nf_flowtable_type flowtable_ipv4 = {
- .family = NFPROTO_IPV4,
- .init = nf_flow_table_init,
- .setup = nf_flow_table_offload_setup,
- .action = nf_flow_rule_route_ipv4,
- .free = nf_flow_table_free,
- .hook = nf_flow_offload_ip_hook,
- .owner = THIS_MODULE,
-};
-
-static int __init nf_flow_ipv4_module_init(void)
-{
- nft_register_flowtable_type(&flowtable_ipv4);
-
- return 0;
-}
-
-static void __exit nf_flow_ipv4_module_exit(void)
-{
- nft_unregister_flowtable_type(&flowtable_ipv4);
-}
-
-module_init(nf_flow_ipv4_module_init);
-module_exit(nf_flow_ipv4_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
-MODULE_DESCRIPTION("Netfilter flow table support");
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 5dbd4b5505eb..eeafeccebb8d 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -8,6 +8,7 @@
#include <linux/nexthop.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/lwtunnel.h>
@@ -1918,9 +1919,6 @@ static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
if (!replaced_nh->is_group)
return;
- /* new dsts must use only the new nexthop group */
- synchronize_net();
-
nhg = rtnl_dereference(replaced_nh->nh_grp);
for (i = 0; i < nhg->num_nh; i++) {
struct nh_grp_entry *nhge = &nhg->nh_entries[i];
@@ -2002,9 +2000,10 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
rcu_assign_pointer(old->nh_grp, newg);
+ /* Make sure concurrent readers are not using 'oldg' anymore. */
+ synchronize_net();
+
if (newg->resilient) {
- /* Make sure concurrent readers are not using 'oldg' anymore. */
- synchronize_net();
rcu_assign_pointer(oldg->res_table, tmp_table);
rcu_assign_pointer(oldg->spare->res_table, tmp_table);
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 1e44a43acfe2..0e56df3a45e2 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -311,15 +311,11 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
- if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
- chk_addr_ret = RTN_LOCAL;
- else
- chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
-
- if ((!inet_can_nonlocal_bind(net, isk) &&
- chk_addr_ret != RTN_LOCAL) ||
- chk_addr_ret == RTN_MULTICAST ||
- chk_addr_ret == RTN_BROADCAST)
+ chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
+
+ if (!inet_addr_valid_or_nonlocal(net, inet_sk(sk),
+ addr->sin_addr.s_addr,
+ chk_addr_ret))
return -EADDRNOTAVAIL;
#if IS_ENABLED(CONFIG_IPV6)
@@ -998,6 +994,7 @@ struct proto ping_prot = {
.hash = ping_hash,
.unhash = ping_unhash,
.get_port = ping_get_port,
+ .put_port = ping_unhash,
.obj_size = sizeof(struct inet_sock),
};
EXPORT_SYMBOL(ping_prot);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bb446e60cf58..a53f256bf9d3 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -99,8 +99,8 @@ int raw_hash_sk(struct sock *sk)
write_lock_bh(&h->lock);
sk_add_node(sk, head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&h->lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
@@ -717,6 +717,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+ struct net *net = sock_net(sk);
u32 tb_id = RT_TABLE_LOCAL;
int ret = -EINVAL;
int chk_addr_ret;
@@ -725,16 +726,16 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
goto out;
if (sk->sk_bound_dev_if)
- tb_id = l3mdev_fib_table_by_index(sock_net(sk),
- sk->sk_bound_dev_if) ? : tb_id;
+ tb_id = l3mdev_fib_table_by_index(net,
+ sk->sk_bound_dev_if) ? : tb_id;
- chk_addr_ret = inet_addr_type_table(sock_net(sk), addr->sin_addr.s_addr,
- tb_id);
+ chk_addr_ret = inet_addr_type_table(net, addr->sin_addr.s_addr, tb_id);
ret = -EADDRNOTAVAIL;
- if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
- chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
+ if (!inet_addr_valid_or_nonlocal(net, inet, addr->sin_addr.s_addr,
+ chk_addr_ret))
goto out;
+
inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr;
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->inet_saddr = 0; /* Use device */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 0b4103b1e622..ff6f91cdb6c4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -110,14 +110,15 @@
#define RT_GC_TIMEOUT (300*HZ)
+#define DEFAULT_MIN_PMTU (512 + 20 + 20)
+#define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
+
static int ip_rt_max_size;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ;
-static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
-static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
@@ -602,7 +603,7 @@ static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
static u32 fnhe_hashfun(__be32 daddr)
{
- static siphash_key_t fnhe_hash_key __read_mostly;
+ static siphash_aligned_key_t fnhe_hash_key;
u64 hval;
net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
@@ -1018,13 +1019,13 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (old_mtu < mtu)
return;
- if (mtu < ip_rt_min_pmtu) {
+ if (mtu < net->ipv4.ip_rt_min_pmtu) {
lock = true;
- mtu = min(old_mtu, ip_rt_min_pmtu);
+ mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
}
if (rt->rt_pmtu == mtu && !lock &&
- time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
+ time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
return;
rcu_read_lock();
@@ -1034,7 +1035,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
fib_select_path(net, &res, fl4, NULL);
nhc = FIB_RES_NHC(res);
update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
- jiffies + ip_rt_mtu_expires);
+ jiffies + net->ipv4.ip_rt_mtu_expires);
}
rcu_read_unlock();
}
@@ -1531,8 +1532,9 @@ void rt_flush_dev(struct net_device *dev)
if (rt->dst.dev != dev)
continue;
rt->dst.dev = blackhole_netdev;
- dev_hold(rt->dst.dev);
- dev_put(dev);
+ dev_replace_track(dev, blackhole_netdev,
+ &rt->dst.dev_tracker,
+ GFP_ATOMIC);
}
spin_unlock_bh(&ul->lock);
}
@@ -2819,7 +2821,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
new->output = dst_discard_out;
new->dev = net->loopback_dev;
- dev_hold(new->dev);
+ dev_hold_track(new->dev, &new->dev_tracker, GFP_ATOMIC);
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
@@ -3534,21 +3536,6 @@ static struct ctl_table ipv4_route_table[] = {
.proc_handler = proc_dointvec,
},
{
- .procname = "mtu_expires",
- .data = &ip_rt_mtu_expires,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "min_pmtu",
- .data = &ip_rt_min_pmtu,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &ip_min_valid_pmtu,
- },
- {
.procname = "min_adv_mss",
.data = &ip_rt_min_advmss,
.maxlen = sizeof(int),
@@ -3560,13 +3547,28 @@ static struct ctl_table ipv4_route_table[] = {
static const char ipv4_route_flush_procname[] = "flush";
-static struct ctl_table ipv4_route_flush_table[] = {
+static struct ctl_table ipv4_route_netns_table[] = {
{
.procname = ipv4_route_flush_procname,
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
},
+ {
+ .procname = "min_pmtu",
+ .data = &init_net.ipv4.ip_rt_min_pmtu,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &ip_min_valid_pmtu,
+ },
+ {
+ .procname = "mtu_expires",
+ .data = &init_net.ipv4.ip_rt_mtu_expires,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
{ },
};
@@ -3574,9 +3576,11 @@ static __net_init int sysctl_route_net_init(struct net *net)
{
struct ctl_table *tbl;
- tbl = ipv4_route_flush_table;
+ tbl = ipv4_route_netns_table;
if (!net_eq(net, &init_net)) {
- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
+ int i;
+
+ tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
if (!tbl)
goto err_dup;
@@ -3585,6 +3589,12 @@ static __net_init int sysctl_route_net_init(struct net *net)
if (tbl[0].procname != ipv4_route_flush_procname)
tbl[0].procname = NULL;
}
+
+ /* Update the variables to point into the current struct net
+ * except for the first element flush
+ */
+ for (i = 1; i < ARRAY_SIZE(ipv4_route_netns_table) - 1; i++)
+ tbl[i].data += (void *)net - (void *)&init_net;
}
tbl[0].extra1 = net;
@@ -3594,7 +3604,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
return 0;
err_reg:
- if (tbl != ipv4_route_flush_table)
+ if (tbl != ipv4_route_netns_table)
kfree(tbl);
err_dup:
return -ENOMEM;
@@ -3606,7 +3616,7 @@ static __net_exit void sysctl_route_net_exit(struct net *net)
tbl = net->ipv4.route_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.route_hdr);
- BUG_ON(tbl == ipv4_route_flush_table);
+ BUG_ON(tbl == ipv4_route_netns_table);
kfree(tbl);
}
@@ -3616,6 +3626,18 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
};
#endif
+static __net_init int netns_ip_rt_init(struct net *net)
+{
+ /* Set default value for namespaceified sysctls */
+ net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
+ net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
+ return 0;
+}
+
+static struct pernet_operations __net_initdata ip_rt_ops = {
+ .init = netns_ip_rt_init,
+};
+
static __net_init int rt_genid_init(struct net *net)
{
atomic_set(&net->ipv4.rt_genid, 0);
@@ -3721,6 +3743,7 @@ int __init ip_rt_init(void)
#ifdef CONFIG_SYSCTL
register_pernet_subsys(&sysctl_route_ops);
#endif
+ register_pernet_subsys(&ip_rt_ops);
register_pernet_subsys(&rt_genid_ops);
register_pernet_subsys(&ipv4_inetpeer_ops);
return 0;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 8696dc343ad2..2cb3b852d148 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -14,7 +14,7 @@
#include <net/tcp.h>
#include <net/route.h>
-static siphash_key_t syncookie_secret[2] __read_mostly;
+static siphash_aligned_key_t syncookie_secret[2];
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2bb28bfd83bf..3b75836db19b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -292,7 +292,7 @@ EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
long sysctl_tcp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_mem);
-atomic_long_t tcp_memory_allocated; /* Current allocated memory. */
+atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */
EXPORT_SYMBOL(tcp_memory_allocated);
#if IS_ENABLED(CONFIG_SMC)
@@ -303,7 +303,7 @@ EXPORT_SYMBOL(tcp_have_smc);
/*
* Current number of TCP sockets.
*/
-struct percpu_counter tcp_sockets_allocated;
+struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL(tcp_sockets_allocated);
/*
@@ -456,7 +456,6 @@ void tcp_init_sock(struct sock *sk)
WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
sk_sockets_allocated_inc(sk);
- sk->sk_route_forced_caps = NETIF_F_GSO;
}
EXPORT_SYMBOL(tcp_init_sock);
@@ -546,10 +545,11 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (state != TCP_SYN_SENT &&
(state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
int target = sock_rcvlowat(sk, 0, INT_MAX);
+ u16 urg_data = READ_ONCE(tp->urg_data);
- if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
- !sock_flag(sk, SOCK_URGINLINE) &&
- tp->urg_data)
+ if (unlikely(urg_data) &&
+ READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
+ !sock_flag(sk, SOCK_URGINLINE))
target++;
if (tcp_stream_is_readable(sk, target))
@@ -574,7 +574,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
} else
mask |= EPOLLOUT | EPOLLWRNORM;
- if (tp->urg_data & TCP_URG_VALID)
+ if (urg_data & TCP_URG_VALID)
mask |= EPOLLPRI;
} else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
/* Active TCP fastopen socket with defer_connect
@@ -608,7 +608,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
unlock_sock_fast(sk, slow);
break;
case SIOCATMARK:
- answ = tp->urg_data &&
+ answ = READ_ONCE(tp->urg_data) &&
READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
break;
case SIOCOUTQ:
@@ -1466,7 +1466,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
char c = tp->urg_data;
if (!(flags & MSG_PEEK))
- tp->urg_data = TCP_URG_READ;
+ WRITE_ONCE(tp->urg_data, TCP_URG_READ);
/* Read urgent data. */
msg->msg_flags |= MSG_OOB;
@@ -1580,6 +1580,36 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
tcp_send_ack(sk);
}
+void __sk_defer_free_flush(struct sock *sk)
+{
+ struct llist_node *head;
+ struct sk_buff *skb, *n;
+
+ head = llist_del_all(&sk->defer_list);
+ llist_for_each_entry_safe(skb, n, head, ll_node) {
+ prefetch(n);
+ skb_mark_not_on_list(skb);
+ __kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL(__sk_defer_free_flush);
+
+static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ __skb_unlink(skb, &sk->sk_receive_queue);
+ if (likely(skb->destructor == sock_rfree)) {
+ sock_rfree(skb);
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ !llist_empty(&sk->defer_list)) {
+ llist_add(&skb->ll_node, &sk->defer_list);
+ return;
+ }
+ }
+ __kfree_skb(skb);
+}
+
static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
{
struct sk_buff *skb;
@@ -1599,7 +1629,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
* splitted a fat GRO packet, while we released socket lock
* in skb_splice_bits()
*/
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
}
return NULL;
}
@@ -1633,7 +1663,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
len = skb->len - offset;
/* Stop reading if we hit a patch of urgent data */
- if (tp->urg_data) {
+ if (unlikely(tp->urg_data)) {
u32 urg_offset = tp->urg_seq - seq;
if (urg_offset < len)
len = urg_offset;
@@ -1665,11 +1695,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
continue;
}
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
++seq;
break;
}
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
if (!desc->count)
break;
WRITE_ONCE(tp->copied_seq, seq);
@@ -2329,7 +2359,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
u32 offset;
/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
- if (tp->urg_data && tp->urg_seq == *seq) {
+ if (unlikely(tp->urg_data) && tp->urg_seq == *seq) {
if (copied)
break;
if (signal_pending(current)) {
@@ -2372,10 +2402,10 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
break;
if (copied) {
- if (sk->sk_err ||
+ if (!timeo ||
+ sk->sk_err ||
sk->sk_state == TCP_CLOSE ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
- !timeo ||
signal_pending(current))
break;
} else {
@@ -2409,13 +2439,12 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
}
}
- tcp_cleanup_rbuf(sk, copied);
-
if (copied >= target) {
/* Do not sleep, just process backlog. */
- release_sock(sk);
- lock_sock(sk);
+ __sk_flush_backlog(sk);
} else {
+ tcp_cleanup_rbuf(sk, copied);
+ sk_defer_free_flush(sk);
sk_wait_data(sk, &timeo, last);
}
@@ -2435,7 +2464,7 @@ found_ok_skb:
used = len;
/* Do we have urgent data here? */
- if (tp->urg_data) {
+ if (unlikely(tp->urg_data)) {
u32 urg_offset = tp->urg_seq - *seq;
if (urg_offset < used) {
if (!urg_offset) {
@@ -2469,8 +2498,8 @@ found_ok_skb:
tcp_rcv_space_adjust(sk);
skip_copy:
- if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
- tp->urg_data = 0;
+ if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) {
+ WRITE_ONCE(tp->urg_data, 0);
tcp_fast_path_check(sk);
}
@@ -2485,14 +2514,14 @@ skip_copy:
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
if (!(flags & MSG_PEEK))
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
continue;
found_fin_ok:
/* Process the FIN. */
WRITE_ONCE(*seq, *seq + 1);
if (!(flags & MSG_PEEK))
- sk_eat_skb(sk, skb);
+ tcp_eat_recv_skb(sk, skb);
break;
} while (len > 0);
@@ -2534,6 +2563,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
ret = tcp_recvmsg_locked(sk, msg, len, nonblock, flags, &tss,
&cmsg_flags);
release_sock(sk);
+ sk_defer_free_flush(sk);
if (cmsg_flags && ret >= 0) {
if (cmsg_flags & TCP_CMSG_TS)
@@ -2964,7 +2994,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
- tp->urg_data = 0;
+ WRITE_ONCE(tp->urg_data, 0);
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
skb_rbtree_purge(&tp->out_of_order_queue);
@@ -3058,7 +3088,7 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_frag.page = NULL;
sk->sk_frag.offset = 0;
}
-
+ sk_defer_free_flush(sk);
sk_error_report(sk);
return 0;
}
@@ -3176,7 +3206,7 @@ static void tcp_enable_tx_delay(void)
* TCP_CORK can be set together with TCP_NODELAY and it is stronger than
* TCP_NODELAY.
*/
-static void __tcp_sock_set_cork(struct sock *sk, bool on)
+void __tcp_sock_set_cork(struct sock *sk, bool on)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -3204,7 +3234,7 @@ EXPORT_SYMBOL(tcp_sock_set_cork);
* However, when TCP_NODELAY is set we make an explicit push, which overrides
* even TCP_CORK for currently queued segments.
*/
-static void __tcp_sock_set_nodelay(struct sock *sk, bool on)
+void __tcp_sock_set_nodelay(struct sock *sk, bool on)
{
if (on) {
tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
@@ -3773,10 +3803,12 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
tcp_get_info_chrono_stats(tp, info);
info->tcpi_segs_out = tp->segs_out;
- info->tcpi_segs_in = tp->segs_in;
+
+ /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */
+ info->tcpi_segs_in = READ_ONCE(tp->segs_in);
+ info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in);
info->tcpi_min_rtt = tcp_min_rtt(tp);
- info->tcpi_data_segs_in = tp->data_segs_in;
info->tcpi_data_segs_out = tp->data_segs_out;
info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
@@ -4185,6 +4217,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
&zc, &len, err);
release_sock(sk);
+ sk_defer_free_flush(sk);
if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags))
goto zerocopy_rcv_cmsg;
switch (len) {
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index f70aa0932bd6..9b9b02052fd3 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -196,12 +196,39 @@ msg_bytes_ready:
long timeo;
int data;
+ if (sock_flag(sk, SOCK_DONE))
+ goto out;
+
+ if (sk->sk_err) {
+ copied = sock_error(sk);
+ goto out;
+ }
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ goto out;
+
+ if (sk->sk_state == TCP_CLOSE) {
+ copied = -ENOTCONN;
+ goto out;
+ }
+
timeo = sock_rcvtimeo(sk, nonblock);
+ if (!timeo) {
+ copied = -EAGAIN;
+ goto out;
+ }
+
+ if (signal_pending(current)) {
+ copied = sock_intr_errno(timeo);
+ goto out;
+ }
+
data = tcp_msg_wait_data(sk, psock, timeo);
if (data && !sk_psock_queue_empty(psock))
goto msg_bytes_ready;
copied = -EAGAIN;
}
+out:
release_sock(sk);
sk_psock_put(sk, psock);
return copied;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0ce46849ec3d..dc49a3d551eb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3601,7 +3601,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
}
/* RFC 5961 7 [ACK Throttling] */
-static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
+static void tcp_send_challenge_ack(struct sock *sk)
{
/* unprotected vars, we dont care of overwrites */
static u32 challenge_timestamp;
@@ -3763,7 +3763,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
if (before(ack, prior_snd_una - tp->max_window)) {
if (!(flag & FLAG_NO_CHALLENGE_ACK))
- tcp_send_challenge_ack(sk, skb);
+ tcp_send_challenge_ack(sk);
return -1;
}
goto old_ack;
@@ -5591,7 +5591,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
}
}
- tp->urg_data = TCP_URG_NOTYET;
+ WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET);
WRITE_ONCE(tp->urg_seq, ptr);
/* Disable header prediction. */
@@ -5604,11 +5604,11 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
struct tcp_sock *tp = tcp_sk(sk);
/* Check if we get a new urgent pointer - normally not. */
- if (th->urg)
+ if (unlikely(th->urg))
tcp_check_urg(sk, th);
/* Do we wait for any urgent data? - normally not... */
- if (tp->urg_data == TCP_URG_NOTYET) {
+ if (unlikely(tp->urg_data == TCP_URG_NOTYET)) {
u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
th->syn;
@@ -5617,7 +5617,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
u8 tmp;
if (skb_copy_bits(skb, ptr, &tmp, 1))
BUG();
- tp->urg_data = TCP_URG_VALID | tmp;
+ WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp);
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk);
}
@@ -5726,7 +5726,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
if (tp->syn_fastopen && !tp->data_segs_in &&
sk->sk_state == TCP_ESTABLISHED)
tcp_fastopen_active_disable(sk);
- tcp_send_challenge_ack(sk, skb);
+ tcp_send_challenge_ack(sk);
}
goto discard;
}
@@ -5741,7 +5741,7 @@ syn_challenge:
if (syn_inerr)
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
- tcp_send_challenge_ack(sk, skb);
+ tcp_send_challenge_ack(sk);
goto discard;
}
@@ -6456,7 +6456,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (!acceptable) {
if (sk->sk_state == TCP_SYN_RECV)
return 1; /* send one RST */
- tcp_send_challenge_ack(sk, skb);
+ tcp_send_challenge_ack(sk);
goto discard;
}
switch (sk->sk_state) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 084df223b5df..b3f34e366b27 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1182,7 +1182,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
if (!md5sig)
return -ENOMEM;
- sk_nocaps_add(sk, NETIF_F_GSO_MASK);
+ sk_gso_disable(sk);
INIT_HLIST_HEAD(&md5sig->head);
rcu_assign_pointer(tp->md5sig_info, md5sig);
}
@@ -1620,7 +1620,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
*/
tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
key->key, key->keylen, GFP_ATOMIC);
- sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
+ sk_gso_disable(newsk);
}
#endif
@@ -1803,8 +1803,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
- u32 tail_gso_size, tail_gso_segs;
+ u32 limit, tail_gso_size, tail_gso_segs;
struct skb_shared_info *shinfo;
const struct tcphdr *th;
struct tcphdr *thtail;
@@ -1912,7 +1911,7 @@ no_coalesce:
* to reduce memory overhead, so add a little headroom here.
* Few sockets backlog are possibly concurrently non empty.
*/
- limit += 64*1024;
+ limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf) + 64*1024;
if (unlikely(sk_add_backlog(sk, skb, limit))) {
bh_unlock_sock(sk);
@@ -1972,8 +1971,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
const struct tcphdr *th;
bool refcounted;
struct sock *sk;
+ int drop_reason;
int ret;
+ drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
if (skb->pkt_type != PACKET_HOST)
goto discard_it;
@@ -1985,8 +1986,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
th = (const struct tcphdr *)skb->data;
- if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
+ if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) {
+ drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
goto bad_packet;
+ }
if (!pskb_may_pull(skb, th->doff * 4))
goto discard_it;
@@ -2091,8 +2094,10 @@ process:
nf_reset_ct(skb);
- if (tcp_filter(sk, skb))
+ if (tcp_filter(sk, skb)) {
+ drop_reason = SKB_DROP_REASON_TCP_FILTER;
goto discard_and_relse;
+ }
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
tcp_v4_fill_cb(skb, iph, th);
@@ -2106,6 +2111,7 @@ process:
sk_incoming_cpu_update(sk);
+ sk_defer_free_flush(sk);
bh_lock_sock_nested(sk);
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
@@ -2124,6 +2130,7 @@ put_and_return:
return ret;
no_tcp_socket:
+ drop_reason = SKB_DROP_REASON_NO_SOCKET;
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
@@ -2131,6 +2138,7 @@ no_tcp_socket:
if (tcp_checksum_complete(skb)) {
csum_error:
+ drop_reason = SKB_DROP_REASON_TCP_CSUM;
trace_tcp_bad_csum(skb);
__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
bad_packet:
@@ -2141,7 +2149,7 @@ bad_packet:
discard_it:
/* Discard frame. */
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
return 0;
discard_and_relse:
@@ -3076,6 +3084,7 @@ struct proto tcp_prot = {
.hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
+ .put_port = inet_put_port,
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = tcp_bpf_update_proto,
#endif
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index fc61cd3fea65..30abde86db45 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -8,6 +8,7 @@
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/gro.h>
#include <net/tcp.h>
#include <net/protocol.h>
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 2e6e5a70168e..5079832af5c1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1359,7 +1359,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_TCP_MD5SIG
/* Calculate the MD5 hash, as we have all we need now */
if (md5) {
- sk_nocaps_add(sk, NETIF_F_GSO_MASK);
+ sk_gso_disable(sk);
tp->af_specific->calc_md5_hash(opts.hash_location,
md5, sk, skb);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0cd6b857e7ec..464590ea922e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -74,6 +74,7 @@
#define pr_fmt(fmt) "UDP: " fmt
+#include <linux/bpf-cgroup.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <linux/memblock.h>
@@ -122,7 +123,7 @@ EXPORT_SYMBOL(udp_table);
long sysctl_udp_mem[3] __read_mostly;
EXPORT_SYMBOL(sysctl_udp_mem);
-atomic_long_t udp_memory_allocated;
+atomic_long_t udp_memory_allocated ____cacheline_aligned_in_smp;
EXPORT_SYMBOL(udp_memory_allocated);
#define MAX_UDP_PORTS 65536
@@ -459,7 +460,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
struct udp_table *udptable,
struct sk_buff *skb,
__be32 saddr, __be16 sport,
- __be32 daddr, u16 hnum)
+ __be32 daddr, u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -467,8 +468,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
if (udptable != &udp_table)
return NULL; /* only UDP is supported */
- no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP,
- saddr, sport, daddr, hnum, &sk);
+ no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport,
+ daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@@ -504,7 +505,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
sk = udp4_lookup_run_bpf(net, udptable, skb,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, dif);
if (sk) {
result = sk;
goto done;
@@ -2410,6 +2411,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
__be32 saddr, daddr;
struct net *net = dev_net(skb->dev);
bool refcounted;
+ int drop_reason;
+
+ drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
/*
* Validate the packet.
@@ -2465,6 +2469,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp_lib_checksum_complete(skb))
goto csum_error;
+ drop_reason = SKB_DROP_REASON_NO_SOCKET;
__UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -2472,10 +2477,11 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
* Hmm. We got an UDP packet to a port to which we
* don't wanna listen. Ignore it.
*/
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
return 0;
short_packet:
+ drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source),
@@ -2488,6 +2494,7 @@ csum_error:
* RFC1122: OK. Discards the bad packet silently (as far as
* the network is concerned, anyway) as per 4.1.3.4 (MUST).
*/
+ drop_reason = SKB_DROP_REASON_UDP_CSUM;
net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
@@ -2495,7 +2502,7 @@ csum_error:
__UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
drop:
__UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
- kfree_skb(skb);
+ kfree_skb_reason(skb, drop_reason);
return 0;
}
@@ -2926,6 +2933,7 @@ struct proto udp_prot = {
.unhash = udp_lib_unhash,
.rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
+ .put_port = udp_lib_unhash,
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = udp_bpf_update_proto,
#endif
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 86d32a1e62ac..6d1a4bec2614 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -7,6 +7,7 @@
*/
#include <linux/skbuff.h>
+#include <net/gro.h>
#include <net/udp.h>
#include <net/protocol.h>
#include <net/inet_common.h>
@@ -424,6 +425,33 @@ out:
return segs;
}
+static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
+{
+ if (unlikely(p->len + skb->len >= 65536))
+ return -E2BIG;
+
+ if (NAPI_GRO_CB(p)->last == p)
+ skb_shinfo(p)->frag_list = skb;
+ else
+ NAPI_GRO_CB(p)->last->next = skb;
+
+ skb_pull(skb, skb_gro_offset(skb));
+
+ NAPI_GRO_CB(p)->last = skb;
+ NAPI_GRO_CB(p)->count++;
+ p->data_len += skb->len;
+
+ /* sk owenrship - if any - completely transferred to the aggregated packet */
+ skb->destructor = NULL;
+ p->truesize += skb->truesize;
+ p->len += skb->len;
+
+ NAPI_GRO_CB(skb)->same_flow = 1;
+
+ return 0;
+}
+
+
#define UDP_GRO_CNT_MAX 64
static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
struct sk_buff *skb)
@@ -600,13 +628,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
inet_gro_compute_pseudo);
skip:
NAPI_GRO_CB(skb)->is_ipv6 = 0;
- rcu_read_lock();
if (static_branch_unlikely(&udp_encap_needed_key))
sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
pp = udp_gro_receive(head, skb, uh, sk);
- rcu_read_unlock();
return pp;
flush:
@@ -641,7 +667,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
uh->len = newlen;
- rcu_read_lock();
sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
udp4_lib_lookup_skb, skb, uh->source, uh->dest);
if (sk && udp_sk(sk)->gro_complete) {
@@ -662,7 +687,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
} else {
err = udp_gro_complete_segment(skb);
}
- rcu_read_unlock();
if (skb->remcsum_offload)
skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 9ebd54752e03..9e83bcb6bc99 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -77,7 +77,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
xdst->u.rt.rt_iif = fl4->flowi4_iif;
xdst->u.dst.dev = dev;
- dev_hold(dev);
+ dev_hold_track(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3445f8017430..3eee17790a82 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -405,13 +405,13 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
if (ndev->cnf.forwarding)
dev_disable_lro(dev);
/* We refer to the device */
- dev_hold(dev);
+ dev_hold_track(dev, &ndev->dev_tracker, GFP_KERNEL);
if (snmp6_alloc_dev(ndev) < 0) {
netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
__func__);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
- dev_put(dev);
+ dev_put_track(dev, &ndev->dev_tracker);
kfree(ndev);
return ERR_PTR(err);
}
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 1d4054bb345b..881d1477d24a 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -263,7 +263,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
#endif
- dev_put(dev);
+ dev_put_track(dev, &idev->dev_tracker);
if (!idev->dead) {
pr_warn("Freeing alive inet6 device %p\n", idev);
return;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index dab4a047590b..8fe7900f1949 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -337,11 +337,8 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
rcu_read_unlock();
- if (!inet_can_nonlocal_bind(net, inet) &&
- v4addr != htonl(INADDR_ANY) &&
- chk_addr_ret != RTN_LOCAL &&
- chk_addr_ret != RTN_MULTICAST &&
- chk_addr_ret != RTN_BROADCAST) {
+ if (!inet_addr_valid_or_nonlocal(net, inet, v4addr,
+ chk_addr_ret)) {
err = -EADDRNOTAVAIL;
goto out;
}
@@ -416,6 +413,8 @@ static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len,
if (err) {
sk->sk_ipv6only = saved_ipv6only;
inet_reset_saddr(sk);
+ if (sk->sk_prot->put_port)
+ sk->sk_prot->put_port(sk);
goto out;
}
}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 828e62514260..b5995c1f4d7a 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -175,7 +175,6 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
* See 11.3.2 of RFC 3775 for details.
*/
if (opt[off] == IPV6_TLV_HAO) {
- struct in6_addr final_addr;
struct ipv6_destopt_hao *hao;
hao = (struct ipv6_destopt_hao *)&opt[off];
@@ -184,9 +183,7 @@ static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *des
hao->length);
goto bad;
}
- final_addr = hao->addr;
- hao->addr = iph->saddr;
- iph->saddr = final_addr;
+ swap(hao->addr, iph->saddr);
}
break;
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index f0bac6f7ab6b..8bb2c407b46b 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -114,7 +114,6 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
{
- struct esp_output_extra *extra = esp_tmp_extra(tmp);
struct crypto_aead *aead = x->data;
int extralen = 0;
u8 *iv;
@@ -122,7 +121,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
struct scatterlist *sg;
if (x->props.flags & XFRM_STATE_ESN)
- extralen += sizeof(*extra);
+ extralen += sizeof(struct esp_output_extra);
iv = esp_tmp_iv(aead, tmp, extralen);
req = esp_tmp_req(aead, iv);
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index a349d4798077..ba5e81cd569c 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -16,6 +16,7 @@
#include <crypto/authenc.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <net/gro.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 38ece3b7b839..77e34aec7e82 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -686,7 +686,6 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
struct net *net = dev_net(skb->dev);
int accept_source_route = net->ipv6.devconf_all->accept_source_route;
- idev = __in6_dev_get(skb->dev);
if (idev && accept_source_route > idev->cnf.accept_source_route)
accept_source_route = idev->cnf.accept_source_route;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index dcedfe29d9d9..ec029c86ae06 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -340,10 +340,6 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_match(struct fib_rule *rule,
return 1;
}
-static const struct nla_policy fib6_rule_policy[FRA_MAX+1] = {
- FRA_GENERIC_POLICY,
-};
-
static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh,
struct nlattr **tb,
@@ -459,7 +455,6 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.fill = fib6_rule_fill,
.nlmsg_payload = fib6_rule_nlmsg_payload,
.nlgroup = RTNLGRP_IPV6_RULE,
- .policy = fib6_rule_policy,
.owner = THIS_MODULE,
.fro_net = &init_net,
};
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index a7c31ab67c5d..96c5cc0f30ce 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -57,6 +57,7 @@
#include <net/protocol.h>
#include <net/raw.h>
#include <net/rawv6.h>
+#include <net/seg6.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
@@ -820,6 +821,7 @@ out_bh_enable:
void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
{
+ struct inet6_skb_parm *opt = IP6CB(skb);
const struct inet6_protocol *ipprot;
int inner_offset;
__be16 frag_off;
@@ -829,6 +831,8 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto out;
+ seg6_icmp_srh(skb, opt);
+
nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
if (ipv6_ext_hdr(nexthdr)) {
/* now skip over extension headers */
@@ -853,7 +857,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
ipprot = rcu_dereference(inet6_protos[nexthdr]);
if (ipprot && ipprot->err_handler)
- ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
+ ipprot->err_handler(skb, opt, type, code, inner_offset, info);
raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
return;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 67c9114835c8..4514444e96c8 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -165,7 +165,7 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
- const u16 hnum)
+ const u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -173,8 +173,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
if (hashinfo != &tcp_hashinfo)
return NULL; /* only TCP is supported */
- no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP,
- saddr, sport, daddr, hnum, &sk);
+ no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport,
+ daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet6_lookup_run_bpf(net, hashinfo, skb, doff,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, dif);
if (result)
goto done;
}
diff --git a/net/ipv6/ioam6.c b/net/ipv6/ioam6.c
index 122a3d47424c..e159eb4328a8 100644
--- a/net/ipv6/ioam6.c
+++ b/net/ipv6/ioam6.c
@@ -13,10 +13,12 @@
#include <linux/ioam6.h>
#include <linux/ioam6_genl.h>
#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
#include <net/addrconf.h>
#include <net/genetlink.h>
#include <net/ioam6.h>
+#include <net/sch_generic.h>
static void ioam6_ns_release(struct ioam6_namespace *ns)
{
@@ -717,7 +719,19 @@ static void __ioam6_fill_trace_data(struct sk_buff *skb,
/* queue depth */
if (trace->type.bit6) {
- *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ struct netdev_queue *queue;
+ struct Qdisc *qdisc;
+ __u32 qlen, backlog;
+
+ if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) {
+ *(__be32 *)data = cpu_to_be32(IOAM6_U32_UNAVAILABLE);
+ } else {
+ queue = skb_get_tx_queue(skb_dst(skb)->dev, skb);
+ qdisc = rcu_dereference(queue->qdisc);
+ qdisc_qstats_qlen_backlog(qdisc, &qlen, &backlog);
+
+ *(__be32 *)data = cpu_to_be32(backlog);
+ }
data += sizeof(__be32);
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0371d2c14145..463c37dea449 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -15,6 +15,7 @@
#define pr_fmt(fmt) "IPv6: " fmt
+#include <linux/bpf.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/net.h>
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index d831d2439693..8753e9cec326 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -403,7 +403,7 @@ static void ip6erspan_tunnel_uninit(struct net_device *dev)
ip6erspan_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
dst_cache_reset(&t->dst_cache);
- dev_put(dev);
+ dev_put_track(dev, &t->dev_tracker);
}
static void ip6gre_tunnel_uninit(struct net_device *dev)
@@ -416,7 +416,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
if (ign->fb_tunnel_dev == dev)
WRITE_ONCE(ign->fb_tunnel_dev, NULL);
dst_cache_reset(&t->dst_cache);
- dev_put(dev);
+ dev_put_track(dev, &t->dev_tracker);
}
@@ -755,6 +755,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
fl6->daddr = key->u.ipv6.dst;
fl6->flowlabel = key->label;
fl6->flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ fl6->fl6_gre_key = tunnel_id_to_key32(key->tun_id);
dsfield = key->tos;
flags = key->tun_flags &
@@ -990,6 +991,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
fl6.daddr = key->u.ipv6.dst;
fl6.flowlabel = key->label;
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ fl6.fl6_gre_key = tunnel_id_to_key32(key->tun_id);
dsfield = key->tos;
if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
@@ -1098,6 +1100,7 @@ static void ip6gre_tnl_link_config_common(struct ip6_tnl *t)
fl6->flowi6_oif = p->link;
fl6->flowlabel = 0;
fl6->flowi6_proto = IPPROTO_GRE;
+ fl6->fl6_gre_key = t->parms.o_key;
if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
@@ -1496,7 +1499,7 @@ static int ip6gre_tunnel_init_common(struct net_device *dev)
}
ip6gre_tnl_init_features(dev);
- dev_hold(dev);
+ dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
cleanup_dst_cache_init:
@@ -1544,7 +1547,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
static struct inet6_protocol ip6gre_protocol __read_mostly = {
.handler = gre_rcv,
.err_handler = ip6gre_err,
- .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
+ .flags = INET6_PROTO_FINAL,
};
static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
@@ -1888,7 +1891,7 @@ static int ip6erspan_tap_init(struct net_device *dev)
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip6erspan_tnl_link_config(tunnel, 1);
- dev_hold(dev);
+ dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
cleanup_dst_cache_init:
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1cbd49d5788d..b29e9ba5e113 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -208,7 +208,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
flush += ntohs(iph->payload_len) != skb_gro_len(skb);
- rcu_read_lock();
proto = iph->nexthdr;
ops = rcu_dereference(inet6_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive) {
@@ -221,7 +220,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
ops = rcu_dereference(inet6_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive)
- goto out_unlock;
+ goto out;
iph = ipv6_hdr(skb);
}
@@ -279,9 +278,6 @@ not_same_flow:
pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
ops->callbacks.gro_receive, head, skb);
-out_unlock:
- rcu_read_unlock();
-
out:
skb_gro_flush_final(skb, pp, flush);
@@ -331,18 +327,14 @@ INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
- rcu_read_lock();
-
nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
if (WARN_ON(!ops || !ops->callbacks.gro_complete))
- goto out_unlock;
+ goto out;
err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
udp6_gro_complete, skb, nhoff);
-out_unlock:
- rcu_read_unlock();
-
+out:
return err;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ff4e83e2a506..2995f8d89e7e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -977,7 +977,7 @@ slow_path:
fail_toobig:
if (skb->sk && dst_allfrag(skb_dst(skb)))
- sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
+ sk_gso_disable(skb->sk);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
err = -EMSGSIZE;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 484aca492cc0..fe786df4f849 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -383,7 +383,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
else
ip6_tnl_unlink(ip6n, t);
dst_cache_reset(&t->dst_cache);
- dev_put(dev);
+ dev_put_track(dev, &t->dev_tracker);
}
/**
@@ -1883,7 +1883,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
- dev_hold(dev);
+ dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL);
return 0;
destroy_dst:
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 5e9474bc54fc..3a434d75925c 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -293,7 +293,7 @@ static void vti6_dev_uninit(struct net_device *dev)
RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
else
vti6_tnl_unlink(ip6n, t);
- dev_put(dev);
+ dev_put_track(dev, &t->dev_tracker);
}
static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -936,7 +936,7 @@ static inline int vti6_dev_init_gen(struct net_device *dev)
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- dev_hold(dev);
+ dev_hold_track(dev, &t->dev_tracker, GFP_KERNEL);
return 0;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 36ed9efb8825..7cf73e60e619 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -182,10 +182,6 @@ static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
return 1;
}
-static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
- FRA_GENERIC_POLICY,
-};
-
static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh, struct nlattr **tb,
struct netlink_ext_ack *extack)
@@ -218,7 +214,6 @@ static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
.compare = ip6mr_rule_compare,
.fill = ip6mr_rule_fill,
.nlgroup = RTNLGRP_IPV6_RULE,
- .policy = ip6mr_rule_policy,
.owner = THIS_MODULE,
};
@@ -746,7 +741,7 @@ static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
if ((v->flags & MIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
- dev_put(dev);
+ dev_put_track(dev, &v->dev_tracker);
return 0;
}
@@ -919,6 +914,7 @@ static int mif6_add(struct net *net, struct mr_table *mrt,
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
v->dev = dev;
+ netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
#ifdef CONFIG_IPV6_PIMSM_V2
if (v->flags & MIFF_REGISTER)
mrt->mroute_reg_vif_num = vifi;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 41efca817db4..a733803a710c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -471,10 +471,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (sk->sk_protocol == IPPROTO_TCP) {
struct inet_connection_sock *icsk = inet_csk(sk);
- local_bh_disable();
+
sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(net, &tcp_prot, 1);
- local_bh_enable();
+
sk->sk_prot = &tcp_prot;
icsk->icsk_af_ops = &ipv4_specific;
sk->sk_socket->ops = &inet_stream_ops;
@@ -485,10 +485,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
if (sk->sk_protocol == IPPROTO_UDPLITE)
prot = &udplite_prot;
- local_bh_disable();
+
sock_prot_inuse_add(net, sk->sk_prot, -1);
sock_prot_inuse_add(net, prot, 1);
- local_bh_enable();
+
sk->sk_prot = prot;
sk->sk_socket->ops = &inet_dgram_ops;
sk->sk_family = PF_INET;
@@ -599,7 +599,14 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
/* RFC 3542, 6.5: default traffic class of 0x0 */
if (val == -1)
val = 0;
- np->tclass = val;
+ if (sk->sk_type == SOCK_STREAM) {
+ val &= ~INET_ECN_MASK;
+ val |= np->tclass & INET_ECN_MASK;
+ }
+ if (np->tclass != val) {
+ np->tclass = val;
+ sk_dst_reset(sk);
+ }
retv = 0;
break;
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index f22233e44ee9..97d3d1b36dbc 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -48,12 +48,8 @@ endif # NF_TABLES_IPV6
endif # NF_TABLES
config NF_FLOW_TABLE_IPV6
- tristate "Netfilter flow table IPv6 module"
- depends on NF_FLOW_TABLE
- help
- This option adds the flow table IPv6 support.
-
- To compile it as a module, choose M here.
+ tristate
+ select NF_FLOW_TABLE_INET
config NF_DUP_IPV6
tristate "Netfilter IPv6 packet duplication to alternate destination"
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
index 667b8af2546a..e69de29bb2d1 100644
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -1,38 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/netfilter.h>
-#include <linux/rhashtable.h>
-#include <net/netfilter/nf_flow_table.h>
-#include <net/netfilter/nf_tables.h>
-
-static struct nf_flowtable_type flowtable_ipv6 = {
- .family = NFPROTO_IPV6,
- .init = nf_flow_table_init,
- .setup = nf_flow_table_offload_setup,
- .action = nf_flow_rule_route_ipv6,
- .free = nf_flow_table_free,
- .hook = nf_flow_offload_ipv6_hook,
- .owner = THIS_MODULE,
-};
-
-static int __init nf_flow_ipv6_module_init(void)
-{
- nft_register_flowtable_type(&flowtable_ipv6);
-
- return 0;
-}
-
-static void __exit nf_flow_ipv6_module_exit(void)
-{
- nft_unregister_flowtable_type(&flowtable_ipv6);
-}
-
-module_init(nf_flow_ipv6_module_init);
-module_exit(nf_flow_ipv6_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
-MODULE_DESCRIPTION("Netfilter flow table IPv6 module");
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 6ac88fe24a8e..9256f6ba87ef 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -177,6 +177,7 @@ struct proto pingv6_prot = {
.hash = ping_hash,
.unhash = ping_unhash,
.get_port = ping_get_port,
+ .put_port = ping_unhash,
.obj_size = sizeof(struct raw6_sock),
};
EXPORT_SYMBOL_GPL(pingv6_prot);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 42d60c76d30a..e6de94203c13 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -182,8 +182,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
if (rt_dev == dev) {
rt->dst.dev = blackhole_netdev;
- dev_hold(rt->dst.dev);
- dev_put(rt_dev);
+ dev_replace_track(rt_dev, blackhole_netdev,
+ &rt->dst.dev_tracker,
+ GFP_ATOMIC);
}
}
spin_unlock_bh(&ul->lock);
@@ -328,9 +329,7 @@ static const struct rt6_info ip6_blk_hole_entry_template = {
static void rt6_info_init(struct rt6_info *rt)
{
- struct dst_entry *dst = &rt->dst;
-
- memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+ memset_after(rt, 0, dst);
INIT_LIST_HEAD(&rt->rt6i_uncached);
}
@@ -594,6 +593,7 @@ struct __rt6_probe_work {
struct work_struct work;
struct in6_addr target;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
};
static void rt6_probe_deferred(struct work_struct *w)
@@ -604,7 +604,7 @@ static void rt6_probe_deferred(struct work_struct *w)
addrconf_addr_solict_mult(&work->target, &mcaddr);
ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
- dev_put(work->dev);
+ dev_put_track(work->dev, &work->dev_tracker);
kfree(work);
}
@@ -658,7 +658,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
} else {
INIT_WORK(&work->work, rt6_probe_deferred);
work->target = *nh_gw;
- dev_hold(dev);
+ dev_hold_track(dev, &work->dev_tracker, GFP_ATOMIC);
work->dev = dev;
schedule_work(&work->work);
}
@@ -1485,7 +1485,7 @@ static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
static u32 rt6_exception_hash(const struct in6_addr *dst,
const struct in6_addr *src)
{
- static siphash_key_t rt6_exception_key __read_mostly;
+ static siphash_aligned_key_t rt6_exception_key;
struct {
struct in6_addr dst;
struct in6_addr src;
@@ -3628,6 +3628,8 @@ pcpu_alloc:
}
fib6_nh->fib_nh_dev = dev;
+ netdev_tracker_alloc(dev, &fib6_nh->fib_nh_dev_tracker, gfp_flags);
+
fib6_nh->fib_nh_oif = dev->ifindex;
err = 0;
out:
@@ -3658,24 +3660,8 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
rcu_read_unlock();
- if (fib6_nh->rt6i_pcpu) {
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct rt6_info **ppcpu_rt;
- struct rt6_info *pcpu_rt;
-
- ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
- pcpu_rt = *ppcpu_rt;
- if (pcpu_rt) {
- dst_dev_put(&pcpu_rt->dst);
- dst_release(&pcpu_rt->dst);
- *ppcpu_rt = NULL;
- }
- }
-
- free_percpu(fib6_nh->rt6i_pcpu);
- }
+ fib6_nh_release_dsts(fib6_nh);
+ free_percpu(fib6_nh->rt6i_pcpu);
fib_nh_common_release(&fib6_nh->nh_common);
}
@@ -5224,6 +5210,19 @@ out:
return should_notify;
}
+static int fib6_gw_from_attr(struct in6_addr *gw, struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ if (nla_len(nla) < sizeof(*gw)) {
+ NL_SET_ERR_MSG(extack, "Invalid IPv6 address in RTA_GATEWAY");
+ return -EINVAL;
+ }
+
+ *gw = nla_get_in6_addr(nla);
+
+ return 0;
+}
+
static int ip6_route_multipath_add(struct fib6_config *cfg,
struct netlink_ext_ack *extack)
{
@@ -5264,10 +5263,18 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
- r_cfg.fc_gateway = nla_get_in6_addr(nla);
+ err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
+ extack);
+ if (err)
+ goto cleanup;
+
r_cfg.fc_flags |= RTF_GATEWAY;
}
r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
+
+ /* RTA_ENCAP_TYPE length checked in
+ * lwtunnel_valid_encap_type_attr
+ */
nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
if (nla)
r_cfg.fc_encap_type = nla_get_u16(nla);
@@ -5434,7 +5441,13 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla) {
- nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+ err = fib6_gw_from_attr(&r_cfg.fc_gateway, nla,
+ extack);
+ if (err) {
+ last_err = err;
+ goto next_rtnh;
+ }
+
r_cfg.fc_flags |= RTF_GATEWAY;
}
}
@@ -5442,6 +5455,7 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
if (err)
last_err = err;
+next_rtnh:
rtnh = rtnh_next(rtnh, &remaining);
}
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index a8b5784afb1a..73aaabf0e966 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -75,6 +75,65 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced)
return true;
}
+struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags)
+{
+ struct ipv6_sr_hdr *srh;
+ int len, srhoff = 0;
+
+ if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
+ return NULL;
+
+ if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
+ return NULL;
+
+ srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
+ len = (srh->hdrlen + 1) << 3;
+
+ if (!pskb_may_pull(skb, srhoff + len))
+ return NULL;
+
+ /* note that pskb_may_pull may change pointers in header;
+ * for this reason it is necessary to reload them when needed.
+ */
+ srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
+
+ if (!seg6_validate_srh(srh, len, true))
+ return NULL;
+
+ return srh;
+}
+
+/* Determine if an ICMP invoking packet contains a segment routing
+ * header. If it does, extract the offset to the true destination
+ * address, which is in the first segment address.
+ */
+void seg6_icmp_srh(struct sk_buff *skb, struct inet6_skb_parm *opt)
+{
+ __u16 network_header = skb->network_header;
+ struct ipv6_sr_hdr *srh;
+
+ /* Update network header to point to the invoking packet
+ * inside the ICMP packet, so we can use the seg6_get_srh()
+ * helper.
+ */
+ skb_reset_network_header(skb);
+
+ srh = seg6_get_srh(skb, 0);
+ if (!srh)
+ goto out;
+
+ if (srh->type != IPV6_SRCRT_TYPE_4)
+ goto out;
+
+ opt->flags |= IP6SKB_SEG6;
+ opt->srhoff = (unsigned char *)srh - skb->data;
+
+out:
+ /* Restore the network header back to the ICMP packet */
+ skb->network_header = network_header;
+}
+
static struct genl_family seg6_genl_family;
static const struct nla_policy seg6_genl_policy[SEG6_ATTR_MAX + 1] = {
diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c
index 2dc40b3f373e..9fbe243a0e81 100644
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
@@ -7,6 +7,7 @@
* eBPF support: Mathieu Xhonneux <m.xhonneux@gmail.com>
*/
+#include <linux/filter.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/net.h>
@@ -150,40 +151,11 @@ static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt)
return (struct seg6_local_lwt *)lwt->data;
}
-static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb, int flags)
-{
- struct ipv6_sr_hdr *srh;
- int len, srhoff = 0;
-
- if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, &flags) < 0)
- return NULL;
-
- if (!pskb_may_pull(skb, srhoff + sizeof(*srh)))
- return NULL;
-
- srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
-
- len = (srh->hdrlen + 1) << 3;
-
- if (!pskb_may_pull(skb, srhoff + len))
- return NULL;
-
- /* note that pskb_may_pull may change pointers in header;
- * for this reason it is necessary to reload them when needed.
- */
- srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
-
- if (!seg6_validate_srh(srh, len, true))
- return NULL;
-
- return srh;
-}
-
static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb)
{
struct ipv6_sr_hdr *srh;
- srh = get_srh(skb, IP6_FH_F_SKIP_RH);
+ srh = seg6_get_srh(skb, IP6_FH_F_SKIP_RH);
if (!srh)
return NULL;
@@ -200,7 +172,7 @@ static bool decap_and_validate(struct sk_buff *skb, int proto)
struct ipv6_sr_hdr *srh;
unsigned int off = 0;
- srh = get_srh(skb, 0);
+ srh = seg6_get_srh(skb, 0);
if (srh && srh->segments_left > 0)
return false;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8a3618a30632..a618dce7e0bc 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -521,7 +521,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
ipip6_tunnel_del_prl(tunnel, NULL);
}
dst_cache_reset(&tunnel->dst_cache);
- dev_put(dev);
+ dev_put_track(dev, &tunnel->dev_tracker);
}
static int ipip6_err(struct sk_buff *skb, u32 info)
@@ -1463,7 +1463,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
dev->tstats = NULL;
return err;
}
- dev_hold(dev);
+ dev_hold_track(dev, &tunnel->dev_tracker, GFP_KERNEL);
return 0;
}
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index e8cfb9e997bf..d1b61d00368e 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -20,7 +20,7 @@
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-static siphash_key_t syncookie6_secret[2] __read_mostly;
+static siphash_aligned_key_t syncookie6_secret[2];
/* RFC 2460, Section 8.3:
* [ipv6 tcp] MSS must be computed as the maximum packet size minus 60 [..]
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 680e6481b967..075ee8a2df3b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -72,7 +72,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req);
-static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
+INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
static const struct inet_connection_sock_af_ops ipv6_mapped;
const struct inet_connection_sock_af_ops ipv6_specific;
@@ -1466,7 +1466,8 @@ INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
* This is because we cannot sleep with the original spinlock
* held.
*/
-static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
+INDIRECT_CALLABLE_SCOPE
+int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct ipv6_pinfo *np = tcp_inet6_sk(sk);
struct sk_buff *opt_skb = NULL;
@@ -1760,6 +1761,7 @@ process:
sk_incoming_cpu_update(sk);
+ sk_defer_free_flush(sk);
bh_lock_sock_nested(sk);
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
@@ -1896,9 +1898,7 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
{
- struct ipv6_pinfo *np = inet6_sk(sk);
-
- __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
+ __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
}
const struct inet_connection_sock_af_ops ipv6_specific = {
@@ -2181,6 +2181,7 @@ struct proto tcpv6_prot = {
.hash = inet6_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
+ .put_port = inet_put_port,
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = tcp_bpf_update_proto,
#endif
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 1796856bc24f..39db5a226855 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -7,6 +7,7 @@
*/
#include <linux/indirect_call_wrapper.h>
#include <linux/skbuff.h>
+#include <net/gro.h>
#include <net/protocol.h>
#include <net/tcp.h>
#include <net/ip6_checksum.h>
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 8cde9efd7919..528b81ef19c9 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -17,6 +17,7 @@
* YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
*/
+#include <linux/bpf-cgroup.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -40,6 +41,7 @@
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/raw.h>
+#include <net/seg6.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/ip6_tunnel.h>
@@ -195,7 +197,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
const struct in6_addr *saddr,
__be16 sport,
const struct in6_addr *daddr,
- u16 hnum)
+ u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -203,8 +205,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
if (udptable != &udp_table)
return NULL; /* only UDP is supported */
- no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
- saddr, sport, daddr, hnum, &sk);
+ no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
+ daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@@ -240,7 +242,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
sk = udp6_lookup_run_bpf(net, udptable, skb,
- saddr, sport, daddr, hnum);
+ saddr, sport, daddr, hnum, dif);
if (sk) {
result = sk;
goto done;
@@ -561,7 +563,7 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
- const struct in6_addr *daddr = &hdr->daddr;
+ const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
struct udphdr *uh = (struct udphdr *)(skb->data+offset);
bool tunnel = false;
struct sock *sk;
@@ -1731,6 +1733,7 @@ struct proto udpv6_prot = {
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
+ .put_port = udp_lib_unhash,
#ifdef CONFIG_BPF_SYSCALL
.psock_update_sk_prot = udp_bpf_update_proto,
#endif
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index b3d9ed96e5ea..7720d04ed396 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -13,6 +13,7 @@
#include <net/udp.h>
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
+#include <net/gro.h>
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
netdev_features_t features)
@@ -144,13 +145,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
skip:
NAPI_GRO_CB(skb)->is_ipv6 = 1;
- rcu_read_lock();
if (static_branch_unlikely(&udpv6_encap_needed_key))
sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest);
pp = udp_gro_receive(head, skb, uh, sk);
- rcu_read_unlock();
return pp;
flush:
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index af7a4b8b1e9c..fad687ee6dd8 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -74,11 +74,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
struct rt6_info *rt = (struct rt6_info *)xdst->route;
xdst->u.dst.dev = dev;
- dev_hold(dev);
+ dev_hold_track(dev, &xdst->u.dst.dev_tracker, GFP_ATOMIC);
xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
if (!xdst->u.rt6.rt6i_idev) {
- dev_put(dev);
+ dev_put_track(dev, &xdst->u.dst.dev_tracker);
return -ENODEV;
}
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 18316ee3c692..a1760add5bf1 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -13,6 +13,7 @@
#define KMSG_COMPONENT "af_iucv"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/filter.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/types.h>
@@ -142,7 +143,7 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
* iucv_sock_in_state() - check for specific states
* @sk: sock structure
* @state: first iucv sk state
- * @state: second iucv sk state
+ * @state2: second iucv sk state
*
* Returns true if the socket in either in the first or second state.
*/
@@ -172,7 +173,7 @@ static inline int iucv_below_msglim(struct sock *sk)
(atomic_read(&iucv->pendings) <= 0));
}
-/**
+/*
* iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
*/
static void iucv_sock_wake_msglim(struct sock *sk)
@@ -187,7 +188,7 @@ static void iucv_sock_wake_msglim(struct sock *sk)
rcu_read_unlock();
}
-/**
+/*
* afiucv_hs_send() - send a message through HiperSockets transport
*/
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
@@ -473,7 +474,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
atomic_set(&iucv->msg_recv, 0);
iucv->path = NULL;
iucv->sk_txnotify = afiucv_hs_callback_txnotify;
- memset(&iucv->src_user_id , 0, 32);
+ memset(&iucv->init, 0, sizeof(iucv->init));
if (pr_iucv)
iucv->transport = AF_IUCV_TRANS_IUCV;
else
@@ -1831,9 +1832,9 @@ static void afiucv_swap_src_dest(struct sk_buff *skb)
memset(skb->data, 0, ETH_HLEN);
}
-/**
+/*
* afiucv_hs_callback_syn - react on received SYN
- **/
+ */
static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
{
struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
@@ -1896,9 +1897,9 @@ out:
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_synack() - react on received SYN-ACK
- **/
+ */
static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1917,9 +1918,9 @@ static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_synfin() - react on received SYN_FIN
- **/
+ */
static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1937,9 +1938,9 @@ static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_fin() - react on received FIN
- **/
+ */
static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1960,9 +1961,9 @@ static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_win() - react on received WIN
- **/
+ */
static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1978,9 +1979,9 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_rx() - react on received data
- **/
+ */
static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -2022,11 +2023,11 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_rcv() - base function for arriving data through HiperSockets
* transport
* called from netif RX softirq
- **/
+ */
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
@@ -2128,10 +2129,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
return err;
}
-/**
+/*
* afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets
* transport
- **/
+ */
static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
{
struct iucv_sock *iucv = iucv_sk(sk);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f3343a8541a5..8f4d49a7d3e8 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -276,8 +276,8 @@ static union iucv_param *iucv_param[NR_CPUS];
static union iucv_param *iucv_param_irq[NR_CPUS];
/**
- * iucv_call_b2f0
- * @code: identifier of IUCV call to CP.
+ * __iucv_call_b2f0
+ * @command: identifier of IUCV call to CP.
* @parm: pointer to a struct iucv_parm block
*
* Calls CP to execute IUCV commands.
@@ -309,7 +309,7 @@ static inline int iucv_call_b2f0(int command, union iucv_param *parm)
return ccode == 1 ? parm->ctrl.iprcode : ccode;
}
-/**
+/*
* iucv_query_maxconn
*
* Determines the maximum number of connections that may be established.
@@ -493,8 +493,8 @@ static void iucv_retrieve_cpu(void *data)
cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
}
-/**
- * iucv_setmask_smp
+/*
+ * iucv_setmask_mp
*
* Allow iucv interrupts on all cpus.
*/
@@ -512,7 +512,7 @@ static void iucv_setmask_mp(void)
cpus_read_unlock();
}
-/**
+/*
* iucv_setmask_up
*
* Allow iucv interrupts on a single cpu.
@@ -529,7 +529,7 @@ static void iucv_setmask_up(void)
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
}
-/**
+/*
* iucv_enable
*
* This function makes iucv ready for use. It allocates the pathid
@@ -564,7 +564,7 @@ out:
return rc;
}
-/**
+/*
* iucv_disable
*
* This function shuts down iucv. It disables iucv interrupts, retrieves
@@ -1347,8 +1347,9 @@ EXPORT_SYMBOL(iucv_message_send);
* @srccls: source class of message
* @buffer: address of send buffer or address of struct iucv_array
* @size: length of send buffer
- * @ansbuf: address of answer buffer or address of struct iucv_array
+ * @answer: address of answer buffer or address of struct iucv_array
* @asize: size of reply buffer
+ * @residual: ignored
*
* This function transmits data to another application. Data to be
* transmitted is in a buffer. The receiver of the send is expected to
@@ -1400,13 +1401,6 @@ out:
}
EXPORT_SYMBOL(iucv_message_send2way);
-/**
- * iucv_path_pending
- * @data: Pointer to external interrupt buffer
- *
- * Process connection pending work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_pending {
u16 ippathid;
u8 ipflags1;
@@ -1420,6 +1414,13 @@ struct iucv_path_pending {
u8 res4[3];
} __packed;
+/**
+ * iucv_path_pending
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection pending work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_pending(struct iucv_irq_data *data)
{
struct iucv_path_pending *ipp = (void *) data;
@@ -1461,13 +1462,6 @@ out_sever:
iucv_sever_pathid(ipp->ippathid, error);
}
-/**
- * iucv_path_complete
- * @data: Pointer to external interrupt buffer
- *
- * Process connection complete work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_complete {
u16 ippathid;
u8 ipflags1;
@@ -1481,6 +1475,13 @@ struct iucv_path_complete {
u8 res4[3];
} __packed;
+/**
+ * iucv_path_complete
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection complete work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_complete(struct iucv_irq_data *data)
{
struct iucv_path_complete *ipc = (void *) data;
@@ -1492,13 +1493,6 @@ static void iucv_path_complete(struct iucv_irq_data *data)
path->handler->path_complete(path, ipc->ipuser);
}
-/**
- * iucv_path_severed
- * @data: Pointer to external interrupt buffer
- *
- * Process connection severed work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_severed {
u16 ippathid;
u8 res1;
@@ -1511,6 +1505,13 @@ struct iucv_path_severed {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_severed
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection severed work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_severed(struct iucv_irq_data *data)
{
struct iucv_path_severed *ips = (void *) data;
@@ -1528,13 +1529,6 @@ static void iucv_path_severed(struct iucv_irq_data *data)
}
}
-/**
- * iucv_path_quiesced
- * @data: Pointer to external interrupt buffer
- *
- * Process connection quiesced work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_quiesced {
u16 ippathid;
u8 res1;
@@ -1547,6 +1541,13 @@ struct iucv_path_quiesced {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_quiesced
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection quiesced work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_quiesced(struct iucv_irq_data *data)
{
struct iucv_path_quiesced *ipq = (void *) data;
@@ -1556,13 +1557,6 @@ static void iucv_path_quiesced(struct iucv_irq_data *data)
path->handler->path_quiesced(path, ipq->ipuser);
}
-/**
- * iucv_path_resumed
- * @data: Pointer to external interrupt buffer
- *
- * Process connection resumed work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_resumed {
u16 ippathid;
u8 res1;
@@ -1575,6 +1569,13 @@ struct iucv_path_resumed {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_resumed
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection resumed work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_resumed(struct iucv_irq_data *data)
{
struct iucv_path_resumed *ipr = (void *) data;
@@ -1584,13 +1585,6 @@ static void iucv_path_resumed(struct iucv_irq_data *data)
path->handler->path_resumed(path, ipr->ipuser);
}
-/**
- * iucv_message_complete
- * @data: Pointer to external interrupt buffer
- *
- * Process message complete work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_message_complete {
u16 ippathid;
u8 ipflags1;
@@ -1606,6 +1600,13 @@ struct iucv_message_complete {
u8 res2[3];
} __packed;
+/**
+ * iucv_message_complete
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process message complete work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_message_complete(struct iucv_irq_data *data)
{
struct iucv_message_complete *imc = (void *) data;
@@ -1624,13 +1625,6 @@ static void iucv_message_complete(struct iucv_irq_data *data)
}
}
-/**
- * iucv_message_pending
- * @data: Pointer to external interrupt buffer
- *
- * Process message pending work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_message_pending {
u16 ippathid;
u8 ipflags1;
@@ -1653,6 +1647,13 @@ struct iucv_message_pending {
u8 res2[3];
} __packed;
+/**
+ * iucv_message_pending
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process message pending work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_message_pending(struct iucv_irq_data *data)
{
struct iucv_message_pending *imp = (void *) data;
@@ -1673,7 +1674,7 @@ static void iucv_message_pending(struct iucv_irq_data *data)
}
}
-/**
+/*
* iucv_tasklet_fn:
*
* This tasklet loops over the queue of irq buffers created by
@@ -1717,7 +1718,7 @@ static void iucv_tasklet_fn(unsigned long ignored)
spin_unlock(&iucv_table_lock);
}
-/**
+/*
* iucv_work_fn:
*
* This work function loops over the queue of path pending irq blocks
@@ -1748,9 +1749,8 @@ static void iucv_work_fn(struct work_struct *work)
spin_unlock_bh(&iucv_table_lock);
}
-/**
+/*
* iucv_external_interrupt
- * @code: irq code
*
* Handles external interrupts coming in from CP.
* Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 11a715d76a4f..71899e5a5a11 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/errqueue.h>
#include <linux/file.h>
+#include <linux/filter.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 93271a2632b8..7499c51b1850 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -250,15 +250,15 @@ struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel,
session_list = l2tp_session_id_hash(tunnel, session_id);
- read_lock_bh(&tunnel->hlist_lock);
- hlist_for_each_entry(session, session_list, hlist)
+ rcu_read_lock_bh();
+ hlist_for_each_entry_rcu(session, session_list, hlist)
if (session->session_id == session_id) {
l2tp_session_inc_refcount(session);
- read_unlock_bh(&tunnel->hlist_lock);
+ rcu_read_unlock_bh();
return session;
}
- read_unlock_bh(&tunnel->hlist_lock);
+ rcu_read_unlock_bh();
return NULL;
}
@@ -291,18 +291,18 @@ struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth)
struct l2tp_session *session;
int count = 0;
- read_lock_bh(&tunnel->hlist_lock);
+ rcu_read_lock_bh();
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
+ hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
if (++count > nth) {
l2tp_session_inc_refcount(session);
- read_unlock_bh(&tunnel->hlist_lock);
+ rcu_read_unlock_bh();
return session;
}
}
}
- read_unlock_bh(&tunnel->hlist_lock);
+ rcu_read_unlock_bh();
return NULL;
}
@@ -347,7 +347,7 @@ int l2tp_session_register(struct l2tp_session *session,
head = l2tp_session_id_hash(tunnel, session->session_id);
- write_lock_bh(&tunnel->hlist_lock);
+ spin_lock_bh(&tunnel->hlist_lock);
if (!tunnel->acpt_newsess) {
err = -ENODEV;
goto err_tlock;
@@ -384,8 +384,8 @@ int l2tp_session_register(struct l2tp_session *session,
l2tp_tunnel_inc_refcount(tunnel);
}
- hlist_add_head(&session->hlist, head);
- write_unlock_bh(&tunnel->hlist_lock);
+ hlist_add_head_rcu(&session->hlist, head);
+ spin_unlock_bh(&tunnel->hlist_lock);
trace_register_session(session);
@@ -394,7 +394,7 @@ int l2tp_session_register(struct l2tp_session *session,
err_tlock_pnlock:
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
err_tlock:
- write_unlock_bh(&tunnel->hlist_lock);
+ spin_unlock_bh(&tunnel->hlist_lock);
return err;
}
@@ -1170,9 +1170,9 @@ static void l2tp_session_unhash(struct l2tp_session *session)
/* Remove the session from core hashes */
if (tunnel) {
/* Remove from the per-tunnel hash */
- write_lock_bh(&tunnel->hlist_lock);
- hlist_del_init(&session->hlist);
- write_unlock_bh(&tunnel->hlist_lock);
+ spin_lock_bh(&tunnel->hlist_lock);
+ hlist_del_init_rcu(&session->hlist);
+ spin_unlock_bh(&tunnel->hlist_lock);
/* For L2TPv3 we have a per-net hash: remove from there, too */
if (tunnel->version != L2TP_HDR_VER_2) {
@@ -1181,8 +1181,9 @@ static void l2tp_session_unhash(struct l2tp_session *session)
spin_lock_bh(&pn->l2tp_session_hlist_lock);
hlist_del_init_rcu(&session->global_hlist);
spin_unlock_bh(&pn->l2tp_session_hlist_lock);
- synchronize_rcu();
}
+
+ synchronize_rcu();
}
}
@@ -1190,22 +1191,19 @@ static void l2tp_session_unhash(struct l2tp_session *session)
*/
static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
{
- int hash;
- struct hlist_node *walk;
- struct hlist_node *tmp;
struct l2tp_session *session;
+ int hash;
- write_lock_bh(&tunnel->hlist_lock);
+ spin_lock_bh(&tunnel->hlist_lock);
tunnel->acpt_newsess = false;
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
again:
- hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
- session = hlist_entry(walk, struct l2tp_session, hlist);
- hlist_del_init(&session->hlist);
+ hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
+ hlist_del_init_rcu(&session->hlist);
- write_unlock_bh(&tunnel->hlist_lock);
+ spin_unlock_bh(&tunnel->hlist_lock);
l2tp_session_delete(session);
- write_lock_bh(&tunnel->hlist_lock);
+ spin_lock_bh(&tunnel->hlist_lock);
/* Now restart from the beginning of this hash
* chain. We always remove a session from the
@@ -1215,7 +1213,7 @@ again:
goto again;
}
}
- write_unlock_bh(&tunnel->hlist_lock);
+ spin_unlock_bh(&tunnel->hlist_lock);
}
/* Tunnel socket destroy hook for UDP encapsulation */
@@ -1408,7 +1406,7 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
tunnel->magic = L2TP_TUNNEL_MAGIC;
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
- rwlock_init(&tunnel->hlist_lock);
+ spin_lock_init(&tunnel->hlist_lock);
tunnel->acpt_newsess = true;
tunnel->encap = encap;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 98ea98eb9567..a88e070b431d 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -160,7 +160,7 @@ struct l2tp_tunnel {
unsigned long dead;
struct rcu_head rcu;
- rwlock_t hlist_lock; /* protect session_hlist */
+ spinlock_t hlist_lock; /* write-protection for session_hlist */
bool acpt_newsess; /* indicates whether this tunnel accepts
* new sessions. Protected by hlist_lock.
*/
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index bca75bef8282..9d1aafe75f92 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -32,7 +32,8 @@
static struct dentry *rootdir;
struct l2tp_dfs_seq_data {
- struct net *net;
+ struct net *net;
+ netns_tracker ns_tracker;
int tunnel_idx; /* current tunnel */
int session_idx; /* index of session within current tunnel */
struct l2tp_tunnel *tunnel;
@@ -120,24 +121,21 @@ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
{
struct l2tp_tunnel *tunnel = v;
+ struct l2tp_session *session;
int session_count = 0;
int hash;
- struct hlist_node *walk;
- struct hlist_node *tmp;
- read_lock_bh(&tunnel->hlist_lock);
+ rcu_read_lock_bh();
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
- hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
- struct l2tp_session *session;
-
- session = hlist_entry(walk, struct l2tp_session, hlist);
+ hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) {
+ /* Session ID of zero is a dummy/reserved value used by pppol2tp */
if (session->session_id == 0)
continue;
session_count++;
}
}
- read_unlock_bh(&tunnel->hlist_lock);
+ rcu_read_unlock_bh();
seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
if (tunnel->sock) {
@@ -284,7 +282,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
rc = PTR_ERR(pd->net);
goto err_free_pd;
}
-
+ netns_tracker_alloc(pd->net, &pd->ns_tracker, GFP_KERNEL);
rc = seq_open(file, &l2tp_dfs_seq_ops);
if (rc)
goto err_free_net;
@@ -296,7 +294,7 @@ out:
return rc;
err_free_net:
- put_net(pd->net);
+ put_net_track(pd->net, &pd->ns_tracker);
err_free_pd:
kfree(pd);
goto out;
@@ -310,7 +308,7 @@ static int l2tp_dfs_seq_release(struct inode *inode, struct file *file)
seq = file->private_data;
pd = seq->private;
if (pd->net)
- put_net(pd->net);
+ put_net_track(pd->net, &pd->ns_tracker);
kfree(pd);
seq_release(inode, file);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3086f4a6ae68..26c00ebf4fba 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -224,7 +224,7 @@ static int llc_ui_release(struct socket *sock)
} else {
release_sock(sk);
}
- dev_put(llc->dev);
+ dev_put_track(llc->dev, &llc->dev_tracker);
sock_put(sk);
llc_sk_free(sk);
out:
@@ -295,6 +295,7 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
if (!llc->dev)
goto out;
+ netdev_tracker_alloc(llc->dev, &llc->dev_tracker, GFP_KERNEL);
rc = -EUSERS;
llc->laddr.lsap = llc_ui_autoport();
if (!llc->laddr.lsap)
@@ -362,7 +363,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
} else
llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
addr->sllc_mac);
- dev_hold(llc->dev);
+ dev_hold_track(llc->dev, &llc->dev_tracker, GFP_ATOMIC);
rcu_read_unlock();
if (!llc->dev)
goto out;
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 0ff490a73fae..07e9abb5978a 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -195,7 +195,7 @@ static int llc_seq_core_show(struct seq_file *seq, void *v)
timer_pending(&llc->pf_cycle_timer.timer),
timer_pending(&llc->rej_sent_timer.timer),
timer_pending(&llc->busy_state_timer.timer),
- !!sk->sk_backlog.tail, !!sk->sk_lock.owned);
+ !!sk->sk_backlog.tail, sock_owned_by_user_nocheck(sk));
out:
return 0;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 2d0dd69f9753..87a208089caf 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -5,7 +5,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -3201,6 +3201,18 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif)
}
EXPORT_SYMBOL(ieee80211_csa_finish);
+void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_tx)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = sdata->local;
+
+ sdata->csa_block_tx = block_tx;
+ sdata_info(sdata, "channel switch failed, disconnecting\n");
+ ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
+}
+EXPORT_SYMBOL(ieee80211_channel_switch_disconnect);
+
static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
u32 *changed)
{
@@ -4271,6 +4283,21 @@ ieee80211_color_change_bss_config_notify(struct ieee80211_sub_if_data *sdata,
changed |= BSS_CHANGED_HE_BSS_COLOR;
ieee80211_bss_info_change_notify(sdata, changed);
+
+ if (!sdata->vif.bss_conf.nontransmitted && sdata->vif.mbssid_tx_vif) {
+ struct ieee80211_sub_if_data *child;
+
+ mutex_lock(&sdata->local->iflist_mtx);
+ list_for_each_entry(child, &sdata->local->interfaces, list) {
+ if (child != sdata && child->vif.mbssid_tx_vif == &sdata->vif) {
+ child->vif.bss_conf.he_bss_color.color = color;
+ child->vif.bss_conf.he_bss_color.enabled = enable;
+ ieee80211_bss_info_change_notify(child,
+ BSS_CHANGED_HE_BSS_COLOR);
+ }
+ }
+ mutex_unlock(&sdata->local->iflist_mtx);
+ }
}
static int ieee80211_color_change_finalize(struct ieee80211_sub_if_data *sdata)
@@ -4355,6 +4382,9 @@ ieee80211_color_change(struct wiphy *wiphy, struct net_device *dev,
sdata_assert_lock(sdata);
+ if (sdata->vif.bss_conf.nontransmitted)
+ return -EINVAL;
+
mutex_lock(&local->mtx);
/* don't allow another color change if one is already active or if csa
@@ -4386,6 +4416,18 @@ out:
return err;
}
+static int
+ieee80211_set_radar_background(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ if (!local->ops->set_radar_background)
+ return -EOPNOTSUPP;
+
+ return local->ops->set_radar_background(&local->hw, chandef);
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -4490,4 +4532,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.reset_tid_config = ieee80211_reset_tid_config,
.set_sar_specs = ieee80211_set_sar_specs,
.color_change = ieee80211_color_change,
+ .set_radar_background = ieee80211_set_radar_background,
};
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 481f01b0f65c..9479f2787ea7 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -936,14 +936,15 @@ static ssize_t sta_he_capa_read(struct file *file, char __user *userbuf,
PFLAG(PHY, 9, RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB,
"RX-FULL-BW-SU-USING-MU-WITH-NON-COMP-SIGB");
- switch (cap[9] & IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK) {
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US:
+ switch (u8_get_bits(cap[9],
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK)) {
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US:
PRINT("NOMINAL-PACKET-PADDING-0US");
break;
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US:
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US:
PRINT("NOMINAL-PACKET-PADDING-8US");
break;
- case IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US:
+ case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US:
PRINT("NOMINAL-PACKET-PADDING-16US");
break;
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c336267f4599..4e2fc1a08681 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1486,4 +1486,26 @@ static inline void drv_twt_teardown_request(struct ieee80211_local *local,
trace_drv_return_void(local);
}
+static inline int drv_net_fill_forward_path(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ int ret = -EOPNOTSUPP;
+
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+ trace_drv_net_fill_forward_path(local, sdata, sta);
+ if (local->ops->net_fill_forward_path)
+ ret = local->ops->net_fill_forward_path(&local->hw,
+ &sdata->vif, sta,
+ ctx, path);
+ trace_drv_return_int(local, ret);
+
+ return ret;
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ethtool.c b/net/mac80211/ethtool.c
index 99a2e30b3833..b2253df54413 100644
--- a/net/mac80211/ethtool.c
+++ b/net/mac80211/ethtool.c
@@ -14,7 +14,9 @@
#include "driver-ops.h"
static int ieee80211_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
@@ -25,7 +27,9 @@ static int ieee80211_set_ringparam(struct net_device *dev,
}
static void ieee80211_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *rp)
+ struct ethtool_ringparam *rp,
+ struct kernel_ethtool_ringparam *kernel_rp,
+ struct netlink_ext_ack *extack)
{
struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 5666bbb8860b..330ea62231fa 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -647,6 +647,26 @@ struct mesh_csa_settings {
struct cfg80211_csa_settings settings;
};
+/**
+ * struct mesh_table
+ *
+ * @known_gates: list of known mesh gates and their mpaths by the station. The
+ * gate's mpath may or may not be resolved and active.
+ * @gates_lock: protects updates to known_gates
+ * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containing all mesh_path objects
+ * @walk_lock: lock protecting walk_head
+ * @entries: number of entries in the table
+ */
+struct mesh_table {
+ struct hlist_head known_gates;
+ spinlock_t gates_lock;
+ struct rhashtable rhead;
+ struct hlist_head walk_head;
+ spinlock_t walk_lock;
+ atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
+};
+
struct ieee80211_if_mesh {
struct timer_list housekeeping_timer;
struct timer_list mesh_path_timer;
@@ -721,8 +741,8 @@ struct ieee80211_if_mesh {
/* offset from skb->data while building IE */
int meshconf_offset;
- struct mesh_table *mesh_paths;
- struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
+ struct mesh_table mesh_paths;
+ struct mesh_table mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
int mpp_paths_generation;
};
@@ -1463,7 +1483,7 @@ struct ieee80211_local {
};
static inline struct ieee80211_sub_if_data *
-IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
+IEEE80211_DEV_TO_SUB_IF(const struct net_device *dev)
{
return netdev_priv(dev);
}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 20aa5cc31f77..41531478437c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -789,6 +789,64 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_get_stats64 = ieee80211_get_stats64,
};
+static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_local *local;
+ struct sta_info *sta;
+ int ret = -ENOENT;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(ctx->dev);
+ local = sdata->local;
+
+ if (!local->ops->net_fill_forward_path)
+ return -EOPNOTSUPP;
+
+ rcu_read_lock();
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ sta = rcu_dereference(sdata->u.vlan.sta);
+ if (sta)
+ break;
+ if (sdata->wdev.use_4addr)
+ goto out;
+ if (is_multicast_ether_addr(ctx->daddr))
+ goto out;
+ sta = sta_info_get_bss(sdata, ctx->daddr);
+ break;
+ case NL80211_IFTYPE_AP:
+ if (is_multicast_ether_addr(ctx->daddr))
+ goto out;
+ sta = sta_info_get(sdata, ctx->daddr);
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (sdata->wdev.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) {
+ sta = sta_info_get(sdata, ctx->daddr);
+ if (sta && test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
+ if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
+ goto out;
+
+ break;
+ }
+ }
+
+ sta = sta_info_get(sdata, sdata->u.mgd.bssid);
+ break;
+ default:
+ goto out;
+ }
+
+ if (!sta)
+ goto out;
+
+ ret = drv_net_fill_forward_path(local, sdata, &sta->sta, ctx, path);
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+
static const struct net_device_ops ieee80211_dataif_8023_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
@@ -798,6 +856,7 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = {
.ndo_set_mac_address = ieee80211_change_mac,
.ndo_select_queue = ieee80211_netdev_select_queue,
.ndo_get_stats64 = ieee80211_get_stats64,
+ .ndo_fill_forward_path = ieee80211_netdev_fill_forward_path,
};
static bool ieee80211_iftype_supports_hdr_offload(enum nl80211_iftype iftype)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 45fb517591ee..5311c3cd3050 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1131,17 +1131,14 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->scan_ies_len +=
2 + sizeof(struct ieee80211_vht_cap);
- /* HE cap element is variable in size - set len to allow max size */
/*
- * TODO: 1 is added at the end of the calculation to accommodate for
- * the temporary placing of the HE capabilities IE under EXT.
- * Remove it once it is placed in the final place.
- */
- if (supp_he)
+ * HE cap element is variable in size - set len to allow max size */
+ if (supp_he) {
local->scan_ies_len +=
- 2 + sizeof(struct ieee80211_he_cap_elem) +
+ 3 + sizeof(struct ieee80211_he_cap_elem) +
sizeof(struct ieee80211_he_mcs_nss_supp) +
- IEEE80211_HE_PPE_THRES_MAX_LEN + 1;
+ IEEE80211_HE_PPE_THRES_MAX_LEN;
+ }
if (!local->ops->hw_scan) {
/* For hw_scan, driver needs to set these up. */
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 77080b4f87b8..b2b717a78114 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -127,26 +127,6 @@ struct mesh_path {
u32 path_change_count;
};
-/**
- * struct mesh_table
- *
- * @known_gates: list of known mesh gates and their mpaths by the station. The
- * gate's mpath may or may not be resolved and active.
- * @gates_lock: protects updates to known_gates
- * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
- * @walk_head: linked list containing all mesh_path objects
- * @walk_lock: lock protecting walk_head
- * @entries: number of entries in the table
- */
-struct mesh_table {
- struct hlist_head known_gates;
- spinlock_t gates_lock;
- struct rhashtable rhead;
- struct hlist_head walk_head;
- spinlock_t walk_lock;
- atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
-};
-
/* Recent multicast cache */
/* RMC_BUCKETS must be a power of 2, maximum 256 */
#define RMC_BUCKETS 256
@@ -308,7 +288,7 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta);
void mesh_path_flush_pending(struct mesh_path *mpath);
void mesh_path_tx_pending(struct mesh_path *mpath);
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
+void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata);
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata);
int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr);
void mesh_path_timer(struct timer_list *t);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7cab1cf09bf1..acc1c299f1ae 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -47,32 +47,24 @@ static void mesh_path_rht_free(void *ptr, void *tblptr)
mesh_path_free_rcu(tbl, mpath);
}
-static struct mesh_table *mesh_table_alloc(void)
+static void mesh_table_init(struct mesh_table *tbl)
{
- struct mesh_table *newtbl;
+ INIT_HLIST_HEAD(&tbl->known_gates);
+ INIT_HLIST_HEAD(&tbl->walk_head);
+ atomic_set(&tbl->entries, 0);
+ spin_lock_init(&tbl->gates_lock);
+ spin_lock_init(&tbl->walk_lock);
- newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
- if (!newtbl)
- return NULL;
-
- INIT_HLIST_HEAD(&newtbl->known_gates);
- INIT_HLIST_HEAD(&newtbl->walk_head);
- atomic_set(&newtbl->entries, 0);
- spin_lock_init(&newtbl->gates_lock);
- spin_lock_init(&newtbl->walk_lock);
- if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
- kfree(newtbl);
- return NULL;
- }
-
- return newtbl;
+ /* rhashtable_init() may fail only in case of wrong
+ * mesh_rht_params
+ */
+ WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
}
static void mesh_table_free(struct mesh_table *tbl)
{
rhashtable_free_and_destroy(&tbl->rhead,
mesh_path_rht_free, tbl);
- kfree(tbl);
}
/**
@@ -238,13 +230,13 @@ static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
- return mpath_lookup(sdata->u.mesh.mesh_paths, dst, sdata);
+ return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
}
struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
{
- return mpath_lookup(sdata->u.mesh.mpp_paths, dst, sdata);
+ return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
}
static struct mesh_path *
@@ -281,7 +273,7 @@ __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
- return __mesh_path_lookup_by_idx(sdata->u.mesh.mesh_paths, idx);
+ return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
}
/**
@@ -296,7 +288,7 @@ mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
{
- return __mesh_path_lookup_by_idx(sdata->u.mesh.mpp_paths, idx);
+ return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
}
/**
@@ -309,7 +301,7 @@ int mesh_path_add_gate(struct mesh_path *mpath)
int err;
rcu_read_lock();
- tbl = mpath->sdata->u.mesh.mesh_paths;
+ tbl = &mpath->sdata->u.mesh.mesh_paths;
spin_lock_bh(&mpath->state_lock);
if (mpath->is_gate) {
@@ -418,7 +410,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
if (!new_mpath)
return ERR_PTR(-ENOMEM);
- tbl = sdata->u.mesh.mesh_paths;
+ tbl = &sdata->u.mesh.mesh_paths;
spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
&new_mpath->rhash,
@@ -460,7 +452,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
return -ENOMEM;
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
- tbl = sdata->u.mesh.mpp_paths;
+ tbl = &sdata->u.mesh.mpp_paths;
spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
@@ -489,7 +481,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
void mesh_plink_broken(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
@@ -548,7 +540,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@@ -563,7 +555,7 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
const u8 *proxy)
{
- struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
+ struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
struct hlist_node *n;
@@ -597,8 +589,8 @@ static void table_flush_by_iface(struct mesh_table *tbl)
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
- table_flush_by_iface(sdata->u.mesh.mesh_paths);
- table_flush_by_iface(sdata->u.mesh.mpp_paths);
+ table_flush_by_iface(&sdata->u.mesh.mesh_paths);
+ table_flush_by_iface(&sdata->u.mesh.mpp_paths);
}
/**
@@ -644,7 +636,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
/* flush relevant mpp entries first */
mpp_flush_by_proxy(sdata, addr);
- err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
+ err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
sdata->u.mesh.mesh_paths_generation++;
return err;
}
@@ -682,7 +674,7 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
struct mesh_path *gate;
bool copy = false;
- tbl = sdata->u.mesh.mesh_paths;
+ tbl = &sdata->u.mesh.mesh_paths;
rcu_read_lock();
hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
@@ -762,29 +754,10 @@ void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
mesh_path_tx_pending(mpath);
}
-int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
+void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
{
- struct mesh_table *tbl_path, *tbl_mpp;
- int ret;
-
- tbl_path = mesh_table_alloc();
- if (!tbl_path)
- return -ENOMEM;
-
- tbl_mpp = mesh_table_alloc();
- if (!tbl_mpp) {
- ret = -ENOMEM;
- goto free_path;
- }
-
- sdata->u.mesh.mesh_paths = tbl_path;
- sdata->u.mesh.mpp_paths = tbl_mpp;
-
- return 0;
-
-free_path:
- mesh_table_free(tbl_path);
- return ret;
+ mesh_table_init(&sdata->u.mesh.mesh_paths);
+ mesh_table_init(&sdata->u.mesh.mpp_paths);
}
static
@@ -806,12 +779,12 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
- mesh_path_tbl_expire(sdata, sdata->u.mesh.mesh_paths);
- mesh_path_tbl_expire(sdata, sdata->u.mesh.mpp_paths);
+ mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
+ mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
}
void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
{
- mesh_table_free(sdata->u.mesh.mesh_paths);
- mesh_table_free(sdata->u.mesh.mpp_paths);
+ mesh_table_free(&sdata->u.mesh.mesh_paths);
+ mesh_table_free(&sdata->u.mesh.mpp_paths);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 37f7d975f3da..1eeabdf10052 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -164,12 +164,15 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
chandef->freq1_offset = channel->freq_offset;
if (channel->band == NL80211_BAND_6GHZ) {
- if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, chandef))
+ if (!ieee80211_chandef_he_6ghz_oper(sdata, he_oper, chandef)) {
+ mlme_dbg(sdata,
+ "bad 6 GHz operation, disabling HT/VHT/HE\n");
ret = IEEE80211_STA_DISABLE_HT |
IEEE80211_STA_DISABLE_VHT |
IEEE80211_STA_DISABLE_HE;
- else
+ } else {
ret = 0;
+ }
vht_chandef = *chandef;
goto out;
} else if (sband->band == NL80211_BAND_S1GHZ) {
@@ -190,6 +193,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
if (!ht_oper || !sta_ht_cap.ht_supported) {
+ mlme_dbg(sdata, "HT operation missing / HT not supported\n");
ret = IEEE80211_STA_DISABLE_HT |
IEEE80211_STA_DISABLE_VHT |
IEEE80211_STA_DISABLE_HE;
@@ -223,6 +227,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
ieee80211_chandef_ht_oper(ht_oper, chandef);
} else {
+ mlme_dbg(sdata, "40 MHz not supported\n");
/* 40 MHz (and 80 MHz) must be supported for VHT */
ret = IEEE80211_STA_DISABLE_VHT;
/* also mark 40 MHz disabled */
@@ -231,6 +236,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
}
if (!vht_oper || !sband->vht_cap.vht_supported) {
+ mlme_dbg(sdata, "VHT operation missing / VHT not supported\n");
ret = IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -253,7 +259,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
&vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HE))
sdata_info(sdata,
- "HE AP VHT information is invalid, disable HE\n");
+ "HE AP VHT information is invalid, disabling HE\n");
ret = IEEE80211_STA_DISABLE_HE;
goto out;
}
@@ -263,7 +269,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
&vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
- "AP VHT information is invalid, disable VHT\n");
+ "AP VHT information is invalid, disabling VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -271,7 +277,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!cfg80211_chandef_valid(&vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
- "AP VHT information is invalid, disable VHT\n");
+ "AP VHT information is invalid, disabling VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -284,7 +290,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
- "AP VHT information doesn't match HT, disable VHT\n");
+ "AP VHT information doesn't match HT, disabling VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -649,10 +655,6 @@ static void ieee80211_add_he_ie(struct ieee80211_sub_if_data *sdata,
if (!he_cap || !reg_cap)
return;
- /*
- * TODO: the 1 added is because this temporarily is under the EXTENSION
- * IE. Get rid of it when it moves.
- */
he_cap_size =
2 + 1 + sizeof(he_cap->he_cap_elem) +
ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem) +
@@ -3741,6 +3743,10 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
elems->timeout_int &&
elems->timeout_int->type == WLAN_TIMEOUT_ASSOC_COMEBACK) {
u32 tu, ms;
+
+ cfg80211_assoc_comeback(sdata->dev, assoc_data->bss,
+ le32_to_cpu(elems->timeout_int->value));
+
tu = le32_to_cpu(elems->timeout_int->value);
ms = tu * 1024 / 1000;
sdata_info(sdata,
@@ -4900,7 +4906,7 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- const u8 *ht_cap_ie, *vht_cap_ie;
+ const struct element *ht_cap_elem, *vht_cap_elem;
const struct ieee80211_ht_cap *ht_cap;
const struct ieee80211_vht_cap *vht_cap;
u8 chains = 1;
@@ -4908,9 +4914,9 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_DISABLE_HT)
return chains;
- ht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_HT_CAPABILITY);
- if (ht_cap_ie && ht_cap_ie[1] >= sizeof(*ht_cap)) {
- ht_cap = (void *)(ht_cap_ie + 2);
+ ht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_HT_CAPABILITY);
+ if (ht_cap_elem && ht_cap_elem->datalen >= sizeof(*ht_cap)) {
+ ht_cap = (void *)ht_cap_elem->data;
chains = ieee80211_mcs_to_chains(&ht_cap->mcs);
/*
* TODO: use "Tx Maximum Number Spatial Streams Supported" and
@@ -4921,12 +4927,12 @@ static u8 ieee80211_ht_vht_rx_chains(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
return chains;
- vht_cap_ie = ieee80211_bss_get_ie(cbss, WLAN_EID_VHT_CAPABILITY);
- if (vht_cap_ie && vht_cap_ie[1] >= sizeof(*vht_cap)) {
+ vht_cap_elem = ieee80211_bss_get_elem(cbss, WLAN_EID_VHT_CAPABILITY);
+ if (vht_cap_elem && vht_cap_elem->datalen >= sizeof(*vht_cap)) {
u8 nss;
u16 tx_mcs_map;
- vht_cap = (void *)(vht_cap_ie + 2);
+ vht_cap = (void *)vht_cap_elem->data;
tx_mcs_map = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
for (nss = 8; nss > 0; nss--) {
if (((tx_mcs_map >> (2 * (nss - 1))) & 3) !=
@@ -5043,19 +5049,23 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
/* disable HT/VHT/HE if we don't support them */
if (!sband->ht_cap.ht_supported && !is_6ghz) {
+ mlme_dbg(sdata, "HT not supported, disabling HT/VHT/HE\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
if (!sband->vht_cap.vht_supported && is_5ghz) {
+ mlme_dbg(sdata, "VHT not supported, disabling VHT/HE\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
if (!ieee80211_get_he_iftype_cap(sband,
- ieee80211_vif_type_p2p(&sdata->vif)))
+ ieee80211_vif_type_p2p(&sdata->vif))) {
+ mlme_dbg(sdata, "HE not supported, disabling it\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+ }
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) && !is_6ghz) {
ht_oper = elems->ht_operation;
@@ -5079,6 +5089,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
}
if (!elems->vht_cap_elem) {
+ sdata_info(sdata,
+ "bad VHT capabilities, disabling VHT\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
vht_oper = NULL;
}
@@ -5126,8 +5138,10 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
break;
}
- if (!have_80mhz)
+ if (!have_80mhz) {
+ sdata_info(sdata, "80 MHz not supported, disabling VHT\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ }
if (sband->band == NL80211_BAND_S1GHZ) {
s1g_oper = elems->s1g_oper;
@@ -5265,7 +5279,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
*/
if (new_sta) {
u32 rates = 0, basic_rates = 0;
- bool have_higher_than_11mbit;
+ bool have_higher_than_11mbit = false;
int min_rate = INT_MAX, min_rate_index = -1;
const struct cfg80211_bss_ies *ies;
int shift = ieee80211_vif_get_shift(&sdata->vif);
@@ -5691,12 +5705,14 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
else if (!is_6ghz)
ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
vht_elem = ieee80211_bss_get_elem(req->bss, WLAN_EID_VHT_CAPABILITY);
- if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap))
+ if (vht_elem && vht_elem->datalen >= sizeof(struct ieee80211_vht_cap)) {
memcpy(&assoc_data->ap_vht_cap, vht_elem->data,
sizeof(struct ieee80211_vht_cap));
- else if (is_5ghz)
+ } else if (is_5ghz) {
+ sdata_info(sdata, "VHT capa missing/short, disabling VHT/HE\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT |
IEEE80211_STA_DISABLE_HE;
+ }
rcu_read_unlock();
if (WARN((sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_UAPSD) &&
@@ -5770,16 +5786,21 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
}
if (req->flags & ASSOC_REQ_DISABLE_HT) {
+ mlme_dbg(sdata, "HT disabled by flag, disabling HT/VHT/HE\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_HT;
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
}
- if (req->flags & ASSOC_REQ_DISABLE_VHT)
+ if (req->flags & ASSOC_REQ_DISABLE_VHT) {
+ mlme_dbg(sdata, "VHT disabled by flag, disabling VHT\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ }
- if (req->flags & ASSOC_REQ_DISABLE_HE)
+ if (req->flags & ASSOC_REQ_DISABLE_HE) {
+ mlme_dbg(sdata, "HE disabled by flag, disabling VHT\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_HE;
+ }
err = ieee80211_prep_connection(sdata, req->bss, true, override);
if (err)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 72b44d4c42d0..9c3b7fc377c1 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -18,8 +18,6 @@
#define AVG_AMPDU_SIZE 16
#define AVG_PKT_SIZE 1200
-#define SAMPLE_SWITCH_THR 100
-
/* Number of bits for an average sized packet */
#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0544563ede52..93680af62c47 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -465,7 +465,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
unsigned int stbc;
rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS));
- *pos++ = local->hw.radiotap_mcs_details;
+ *pos = local->hw.radiotap_mcs_details;
+ if (status->enc_flags & RX_ENC_FLAG_HT_GF)
+ *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
+ if (status->enc_flags & RX_ENC_FLAG_LDPC)
+ *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC;
+ pos++;
*pos = 0;
if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_MCS_SGI;
@@ -4924,7 +4929,7 @@ void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
goto drop;
break;
case RX_ENC_VHT:
- if (WARN_ONCE(status->rate_idx > 9 ||
+ if (WARN_ONCE(status->rate_idx > 11 ||
!status->nss ||
status->nss > 8,
"Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 9e8381bef7ed..d91498f77796 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2892,6 +2892,13 @@ TRACE_EVENT(drv_twt_teardown_request,
)
);
+DEFINE_EVENT(sta_event, drv_net_fill_forward_path,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta),
+ TP_ARGS(local, sdata, sta)
+);
+
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 86a54df3aabd..6d054fed062f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3821,7 +3821,7 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
{
struct ieee80211_local *local = hw_to_local(hw);
struct airtime_sched_info *air_sched;
- u64 now = ktime_get_boottime_ns();
+ u64 now = ktime_get_coarse_boottime_ns();
struct ieee80211_txq *ret = NULL;
struct airtime_info *air_info;
struct txq_info *txqi = NULL;
@@ -3948,7 +3948,7 @@ void ieee80211_update_airtime_weight(struct ieee80211_local *local,
u64 weight_sum = 0;
if (unlikely(!now))
- now = ktime_get_boottime_ns();
+ now = ktime_get_coarse_boottime_ns();
lockdep_assert_held(&air_sched->lock);
@@ -3974,7 +3974,7 @@ void ieee80211_schedule_txq(struct ieee80211_hw *hw,
struct ieee80211_local *local = hw_to_local(hw);
struct txq_info *txqi = to_txq_info(txq);
struct airtime_sched_info *air_sched;
- u64 now = ktime_get_boottime_ns();
+ u64 now = ktime_get_coarse_boottime_ns();
struct airtime_info *air_info;
u8 ac = txq->ac;
bool was_active;
@@ -4032,7 +4032,7 @@ static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw,
if (!purge)
airtime_set_active(air_sched, air_info,
- ktime_get_boottime_ns());
+ ktime_get_coarse_boottime_ns());
rb_erase_cached(&txqi->schedule_order,
&air_sched->active_txqs);
@@ -4120,7 +4120,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
if (RB_EMPTY_NODE(&txqi->schedule_order))
goto out;
- now = ktime_get_boottime_ns();
+ now = ktime_get_coarse_boottime_ns();
/* Like in ieee80211_next_txq(), make sure the first station in the
* scheduling order is eligible for transmission to avoid starvation.
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0e4e1956bcea..f71b042a5c8b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -862,6 +862,19 @@ static void __iterate_stations(struct ieee80211_local *local,
}
}
+void ieee80211_iterate_stations(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ mutex_lock(&local->sta_mtx);
+ __iterate_stations(local, iterator, data);
+ mutex_unlock(&local->sta_mtx);
+}
+EXPORT_SYMBOL_GPL(ieee80211_iterate_stations);
+
void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
void (*iterator)(void *data,
struct ieee80211_sta *sta),
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 4eed23e27610..7ed0d268aff2 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -449,7 +449,6 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb,
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
return 0;
- hdr = (struct ieee80211_hdr *) pos;
pos += hdrlen;
pn64 = atomic64_inc_return(&key->conf.tx_pn);
@@ -686,7 +685,6 @@ static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
return 0;
- hdr = (struct ieee80211_hdr *)pos;
pos += hdrlen;
pn64 = atomic64_inc_return(&key->conf.tx_pn);
@@ -881,8 +879,6 @@ ieee80211_crypto_cs_decrypt(struct ieee80211_rx_data *rx)
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
- hdr = (struct ieee80211_hdr *)rx->skb->data;
-
rx_pn = key->u.gen.rx_pn[qos_tid];
skb_pn = rx->skb->data + hdrlen + cs->pn_off;
diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c
index 871cf6266125..c921de63b494 100644
--- a/net/mctp/af_mctp.c
+++ b/net/mctp/af_mctp.c
@@ -405,8 +405,7 @@ static void mctp_sk_unhash(struct sock *sk)
trace_mctp_key_release(key, MCTP_TRACE_KEY_CLOSED);
spin_lock(&key->lock);
- if (key->reasm_head)
- kfree_skb(key->reasm_head);
+ kfree_skb(key->reasm_head);
key->reasm_head = NULL;
key->reasm_dead = true;
key->valid = false;
diff --git a/net/mctp/device.c b/net/mctp/device.c
index 8799ee77e7b7..ef2755f82f87 100644
--- a/net/mctp/device.c
+++ b/net/mctp/device.c
@@ -35,14 +35,24 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->mctp_ptr);
}
-static int mctp_fill_addrinfo(struct sk_buff *skb, struct netlink_callback *cb,
- struct mctp_dev *mdev, mctp_eid_t eid)
+static int mctp_addrinfo_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+ + nla_total_size(1) // IFA_LOCAL
+ + nla_total_size(1) // IFA_ADDRESS
+ ;
+}
+
+/* flag should be NLM_F_MULTI for dump calls */
+static int mctp_fill_addrinfo(struct sk_buff *skb,
+ struct mctp_dev *mdev, mctp_eid_t eid,
+ int msg_type, u32 portid, u32 seq, int flag)
{
struct ifaddrmsg *hdr;
struct nlmsghdr *nlh;
- nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
- RTM_NEWADDR, sizeof(*hdr), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq,
+ msg_type, sizeof(*hdr), flag);
if (!nlh)
return -EMSGSIZE;
@@ -72,10 +82,14 @@ static int mctp_dump_dev_addrinfo(struct mctp_dev *mdev, struct sk_buff *skb,
struct netlink_callback *cb)
{
struct mctp_dump_cb *mcb = (void *)cb->ctx;
+ u32 portid, seq;
int rc = 0;
+ portid = NETLINK_CB(cb->skb).portid;
+ seq = cb->nlh->nlmsg_seq;
for (; mcb->a_idx < mdev->num_addrs; mcb->a_idx++) {
- rc = mctp_fill_addrinfo(skb, cb, mdev, mdev->addrs[mcb->a_idx]);
+ rc = mctp_fill_addrinfo(skb, mdev, mdev->addrs[mcb->a_idx],
+ RTM_NEWADDR, portid, seq, NLM_F_MULTI);
if (rc < 0)
break;
}
@@ -127,6 +141,32 @@ out:
return skb->len;
}
+static void mctp_addr_notify(struct mctp_dev *mdev, mctp_eid_t eid, int msg_type,
+ struct sk_buff *req_skb, struct nlmsghdr *req_nlh)
+{
+ u32 portid = NETLINK_CB(req_skb).portid;
+ struct net *net = dev_net(mdev->dev);
+ struct sk_buff *skb;
+ int rc = -ENOBUFS;
+
+ skb = nlmsg_new(mctp_addrinfo_size(), GFP_KERNEL);
+ if (!skb)
+ goto out;
+
+ rc = mctp_fill_addrinfo(skb, mdev, eid, msg_type,
+ portid, req_nlh->nlmsg_seq, 0);
+ if (rc < 0) {
+ WARN_ON_ONCE(rc == -EMSGSIZE);
+ goto out;
+ }
+
+ rtnl_notify(skb, net, portid, RTNLGRP_MCTP_IFADDR, req_nlh, GFP_KERNEL);
+ return;
+out:
+ kfree_skb(skb);
+ rtnl_set_sk_err(net, RTNLGRP_MCTP_IFADDR, rc);
+}
+
static const struct nla_policy ifa_mctp_policy[IFA_MAX + 1] = {
[IFA_ADDRESS] = { .type = NLA_U8 },
[IFA_LOCAL] = { .type = NLA_U8 },
@@ -189,6 +229,7 @@ static int mctp_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
kfree(tmp_addrs);
+ mctp_addr_notify(mdev, addr->s_addr, RTM_NEWADDR, skb, nlh);
mctp_route_add_local(mdev, addr->s_addr);
return 0;
@@ -244,6 +285,8 @@ static int mctp_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
mdev->num_addrs--;
spin_unlock_irqrestore(&mdev->addrs_lock, flags);
+ mctp_addr_notify(mdev, addr->s_addr, RTM_DELADDR, skb, nlh);
+
return 0;
}
diff --git a/net/mctp/neigh.c b/net/mctp/neigh.c
index 5cc042121493..6ad3e33bd4d4 100644
--- a/net/mctp/neigh.c
+++ b/net/mctp/neigh.c
@@ -85,8 +85,8 @@ void mctp_neigh_remove_dev(struct mctp_dev *mdev)
mutex_unlock(&net->mctp.neigh_lock);
}
-// TODO: add a "source" flag so netlink can only delete static neighbours?
-static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
+static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid,
+ enum mctp_neigh_source source)
{
struct net *net = dev_net(mdev->dev);
struct mctp_neigh *neigh, *tmp;
@@ -94,7 +94,8 @@ static int mctp_neigh_remove(struct mctp_dev *mdev, mctp_eid_t eid)
mutex_lock(&net->mctp.neigh_lock);
list_for_each_entry_safe(neigh, tmp, &net->mctp.neighbours, list) {
- if (neigh->dev == mdev && neigh->eid == eid) {
+ if (neigh->dev == mdev && neigh->eid == eid &&
+ neigh->source == source) {
list_del_rcu(&neigh->list);
/* TODO: immediate RTM_DELNEIGH */
call_rcu(&neigh->rcu, __mctp_neigh_free);
@@ -202,7 +203,7 @@ static int mctp_rtm_delneigh(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!mdev)
return -ENODEV;
- return mctp_neigh_remove(mdev, eid);
+ return mctp_neigh_remove(mdev, eid, MCTP_NEIGH_STATIC);
}
static int mctp_fill_neigh(struct sk_buff *skb, u32 portid, u32 seq, int event,
diff --git a/net/mctp/route.c b/net/mctp/route.c
index cdf09c2a7007..8d9f4ff3e285 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -231,9 +231,7 @@ static void __mctp_key_unlock_drop(struct mctp_sk_key *key, struct net *net,
/* and one for the local reference */
mctp_key_unref(key);
- if (skb)
- kfree_skb(skb);
-
+ kfree_skb(skb);
}
#ifdef CONFIG_MCTP_FLOWS
@@ -892,8 +890,7 @@ out_release:
if (!ext_rt)
mctp_route_release(rt);
- if (dev)
- dev_put(dev);
+ dev_put(dev);
return rc;
diff --git a/net/mctp/test/route-test.c b/net/mctp/test/route-test.c
index 36fac3daf86a..86ad15abf897 100644
--- a/net/mctp/test/route-test.c
+++ b/net/mctp/test/route-test.c
@@ -150,11 +150,6 @@ static void mctp_test_fragment(struct kunit *test)
rt = mctp_test_create_route(&init_net, NULL, 10, mtu);
KUNIT_ASSERT_TRUE(test, rt);
- /* The refcount would usually be incremented as part of a route lookup,
- * but we're setting the route directly here.
- */
- refcount_inc(&rt->rt.refs);
-
rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER);
KUNIT_EXPECT_FALSE(test, rc);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 0c7bde1c14a6..48f75a56f4ae 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -230,8 +230,8 @@ static struct mpls_nh *mpls_get_nexthop(struct mpls_route *rt, u8 index)
* Since those fields can change at any moment, use READ_ONCE to
* access both.
*/
-static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
- struct sk_buff *skb)
+static const struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
+ struct sk_buff *skb)
{
u32 hash = 0;
int nh_index = 0;
@@ -343,8 +343,8 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
{
struct net *net = dev_net(dev);
struct mpls_shim_hdr *hdr;
+ const struct mpls_nh *nh;
struct mpls_route *rt;
- struct mpls_nh *nh;
struct mpls_entry_decoded dec;
struct net_device *out_dev;
struct mpls_dev *out_mdev;
@@ -2360,12 +2360,12 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
u32 labels[MAX_NEW_LABELS];
struct mpls_shim_hdr *hdr;
unsigned int hdr_size = 0;
+ const struct mpls_nh *nh;
struct net_device *dev;
struct mpls_route *rt;
struct rtmsg *rtm, *r;
struct nlmsghdr *nlh;
struct sk_buff *skb;
- struct mpls_nh *nh;
u8 n_labels;
int err;
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 893df00b77b6..b9f492ddf93b 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -158,17 +158,16 @@ struct mpls_route { /* next hop label forwarding entry */
};
#define for_nexthops(rt) { \
- int nhsel; struct mpls_nh *nh; u8 *__nh; \
- for (nhsel = 0, nh = (rt)->rt_nh, __nh = (u8 *)((rt)->rt_nh); \
+ int nhsel; const struct mpls_nh *nh; \
+ for (nhsel = 0, nh = (rt)->rt_nh; \
nhsel < (rt)->rt_nhn; \
- __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
+ nh = (void *)nh + (rt)->rt_nh_size, nhsel++)
#define change_nexthops(rt) { \
- int nhsel; struct mpls_nh *nh; u8 *__nh; \
- for (nhsel = 0, nh = (struct mpls_nh *)((rt)->rt_nh), \
- __nh = (u8 *)((rt)->rt_nh); \
+ int nhsel; struct mpls_nh *nh; \
+ for (nhsel = 0, nh = (rt)->rt_nh; \
nhsel < (rt)->rt_nhn; \
- __nh += rt->rt_nh_size, nh = (struct mpls_nh *)__nh, nhsel++)
+ nh = (void *)nh + (rt)->rt_nh_size, nhsel++)
#define endfor_nexthops(rt) }
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index fe98e4f475ba..645dd984fef0 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -768,6 +768,28 @@ static noinline bool mptcp_established_options_rst(struct sock *sk, struct sk_bu
return true;
}
+static bool mptcp_established_options_fastclose(struct sock *sk,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+
+ if (likely(!subflow->send_fastclose))
+ return false;
+
+ if (remaining < TCPOLEN_MPTCP_FASTCLOSE)
+ return false;
+
+ *size = TCPOLEN_MPTCP_FASTCLOSE;
+ opts->suboptions |= OPTION_MPTCP_FASTCLOSE;
+ opts->rcvr_key = msk->remote_key;
+
+ pr_debug("FASTCLOSE key=%llu", opts->rcvr_key);
+ return true;
+}
+
static bool mptcp_established_options_mp_fail(struct sock *sk,
unsigned int *size,
unsigned int remaining,
@@ -806,10 +828,12 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
return false;
if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST)) {
- if (mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) {
+ if (mptcp_established_options_fastclose(sk, &opt_size, remaining, opts) ||
+ mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) {
*size += opt_size;
remaining -= opt_size;
}
+ /* MP_RST can be used with MP_FASTCLOSE and MP_FAIL if there is room */
if (mptcp_established_options_rst(sk, skb, &opt_size, remaining, opts)) {
*size += opt_size;
remaining -= opt_size;
@@ -821,10 +845,13 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
if (mptcp_established_options_mp(sk, skb, snd_data_fin, &opt_size, remaining, opts))
ret = true;
else if (mptcp_established_options_dss(sk, skb, snd_data_fin, &opt_size, remaining, opts)) {
+ unsigned int mp_fail_size;
+
ret = true;
- if (mptcp_established_options_mp_fail(sk, &opt_size, remaining, opts)) {
- *size += opt_size;
- remaining -= opt_size;
+ if (mptcp_established_options_mp_fail(sk, &mp_fail_size,
+ remaining - opt_size, opts)) {
+ *size += opt_size + mp_fail_size;
+ remaining -= opt_size - mp_fail_size;
return true;
}
}
@@ -1209,7 +1236,7 @@ static void mptcp_set_rwin(const struct tcp_sock *tp)
WRITE_ONCE(msk->rcv_wnd_sent, ack_seq);
}
-static u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __sum16 sum)
+u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum)
{
struct csum_pseudo_header header;
__wsum csum;
@@ -1224,14 +1251,14 @@ static u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __sum1
header.data_len = htons(data_len);
header.csum = 0;
- csum = csum_partial(&header, sizeof(header), ~csum_unfold(sum));
+ csum = csum_partial(&header, sizeof(header), sum);
return (__force u16)csum_fold(csum);
}
static u16 mptcp_make_csum(const struct mptcp_ext *mpext)
{
return __mptcp_make_csum(mpext->data_seq, mpext->subflow_seq, mpext->data_len,
- mpext->csum);
+ ~csum_unfold(mpext->csum));
}
void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
@@ -1251,17 +1278,8 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
ptr += 2;
}
- /* RST is mutually exclusive with everything else */
- if (unlikely(OPTION_MPTCP_RST & opts->suboptions)) {
- *ptr++ = mptcp_option(MPTCPOPT_RST,
- TCPOLEN_MPTCP_RST,
- opts->reset_transient,
- opts->reset_reason);
- return;
- }
-
- /* DSS, MPC, MPJ and ADD_ADDR are mutually exclusive, see
- * mptcp_established_options*()
+ /* DSS, MPC, MPJ, ADD_ADDR, FASTCLOSE and RST are mutually exclusive,
+ * see mptcp_established_options*()
*/
if (likely(OPTION_MPTCP_DSS & opts->suboptions)) {
struct mptcp_ext *mpext = &opts->ext_copy;
@@ -1316,6 +1334,7 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
put_unaligned_be32(mpext->data_len << 16 |
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
}
+ ptr += 1;
}
} else if (OPTIONS_MPTCP_MPC & opts->suboptions) {
u8 len, flag = MPTCP_CAP_HMAC_SHA256;
@@ -1361,7 +1380,7 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
__mptcp_make_csum(opts->data_seq,
opts->subflow_seq,
opts->data_len,
- opts->csum), ptr);
+ ~csum_unfold(opts->csum)), ptr);
} else {
put_unaligned_be32(opts->data_len << 16 |
TCPOPT_NOP << 8 | TCPOPT_NOP, ptr);
@@ -1370,27 +1389,29 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
/* MPC is additionally mutually exclusive with MP_PRIO */
goto mp_capable_done;
- } else if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) {
- *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
- TCPOLEN_MPTCP_MPJ_SYN,
- opts->backup, opts->join_id);
- put_unaligned_be32(opts->token, ptr);
- ptr += 1;
- put_unaligned_be32(opts->nonce, ptr);
- ptr += 1;
- } else if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) {
- *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
- TCPOLEN_MPTCP_MPJ_SYNACK,
- opts->backup, opts->join_id);
- put_unaligned_be64(opts->thmac, ptr);
- ptr += 2;
- put_unaligned_be32(opts->nonce, ptr);
- ptr += 1;
- } else if (OPTION_MPTCP_MPJ_ACK & opts->suboptions) {
- *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
- TCPOLEN_MPTCP_MPJ_ACK, 0, 0);
- memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN);
- ptr += 5;
+ } else if (OPTIONS_MPTCP_MPJ & opts->suboptions) {
+ if (OPTION_MPTCP_MPJ_SYN & opts->suboptions) {
+ *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
+ TCPOLEN_MPTCP_MPJ_SYN,
+ opts->backup, opts->join_id);
+ put_unaligned_be32(opts->token, ptr);
+ ptr += 1;
+ put_unaligned_be32(opts->nonce, ptr);
+ ptr += 1;
+ } else if (OPTION_MPTCP_MPJ_SYNACK & opts->suboptions) {
+ *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
+ TCPOLEN_MPTCP_MPJ_SYNACK,
+ opts->backup, opts->join_id);
+ put_unaligned_be64(opts->thmac, ptr);
+ ptr += 2;
+ put_unaligned_be32(opts->nonce, ptr);
+ ptr += 1;
+ } else {
+ *ptr++ = mptcp_option(MPTCPOPT_MP_JOIN,
+ TCPOLEN_MPTCP_MPJ_ACK, 0, 0);
+ memcpy(ptr, opts->hmac, MPTCPOPT_HMAC_LEN);
+ ptr += 5;
+ }
} else if (OPTION_MPTCP_ADD_ADDR & opts->suboptions) {
u8 len = TCPOLEN_MPTCP_ADD_ADDR_BASE;
u8 echo = MPTCP_ADDR_ECHO;
@@ -1447,6 +1468,24 @@ void mptcp_write_options(__be32 *ptr, const struct tcp_sock *tp,
ptr += 1;
}
}
+ } else if (unlikely(OPTION_MPTCP_FASTCLOSE & opts->suboptions)) {
+ /* FASTCLOSE is mutually exclusive with others except RST */
+ *ptr++ = mptcp_option(MPTCPOPT_MP_FASTCLOSE,
+ TCPOLEN_MPTCP_FASTCLOSE,
+ 0, 0);
+ put_unaligned_be64(opts->rcvr_key, ptr);
+ ptr += 2;
+
+ if (OPTION_MPTCP_RST & opts->suboptions)
+ goto mp_rst;
+ return;
+ } else if (unlikely(OPTION_MPTCP_RST & opts->suboptions)) {
+mp_rst:
+ *ptr++ = mptcp_option(MPTCPOPT_RST,
+ TCPOLEN_MPTCP_RST,
+ opts->reset_transient,
+ opts->reset_reason);
+ return;
}
if (OPTION_MPTCP_PRIO & opts->suboptions) {
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 6ab386ff3294..696b2c4613a7 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -172,9 +172,28 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk)
spin_unlock_bh(&pm->lock);
}
-void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
+void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
+ const struct mptcp_subflow_context *subflow)
{
- pr_debug("msk=%p", msk);
+ struct mptcp_pm_data *pm = &msk->pm;
+ bool update_subflows;
+
+ update_subflows = (ssk->sk_state == TCP_CLOSE) &&
+ (subflow->request_join || subflow->mp_join);
+ if (!READ_ONCE(pm->work_pending) && !update_subflows)
+ return;
+
+ spin_lock_bh(&pm->lock);
+ if (update_subflows)
+ pm->subflows--;
+
+ /* Even if this subflow is not really established, tell the PM to try
+ * to pick the next ones, if possible.
+ */
+ if (mptcp_pm_nl_check_work_pending(msk))
+ mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
+
+ spin_unlock_bh(&pm->lock);
}
void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
@@ -356,7 +375,7 @@ void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
}
}
-void mptcp_pm_data_init(struct mptcp_sock *msk)
+void mptcp_pm_data_reset(struct mptcp_sock *msk)
{
msk->pm.add_addr_signaled = 0;
msk->pm.add_addr_accepted = 0;
@@ -370,11 +389,16 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
WRITE_ONCE(msk->pm.accept_subflow, false);
WRITE_ONCE(msk->pm.remote_deny_join_id0, false);
msk->pm.status = 0;
+ bitmap_fill(msk->pm.id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
+
+ mptcp_pm_nl_data_init(msk);
+}
+void mptcp_pm_data_init(struct mptcp_sock *msk)
+{
spin_lock_init(&msk->pm.lock);
INIT_LIST_HEAD(&msk->pm.anno_list);
-
- mptcp_pm_nl_data_init(msk);
+ mptcp_pm_data_reset(msk);
}
void __init mptcp_pm_init(void)
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index f523051f5aef..75af1f701e1d 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -38,9 +38,6 @@ struct mptcp_pm_add_entry {
u8 retrans_times;
};
-#define MAX_ADDR_ID 255
-#define BITMAP_SZ DIV_ROUND_UP(MAX_ADDR_ID + 1, BITS_PER_LONG)
-
struct pm_nl_pernet {
/* protects pernet updates */
spinlock_t lock;
@@ -52,14 +49,14 @@ struct pm_nl_pernet {
unsigned int local_addr_max;
unsigned int subflows_max;
unsigned int next_id;
- unsigned long id_bitmap[BITMAP_SZ];
+ DECLARE_BITMAP(id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
};
#define MPTCP_PM_ADDR_MAX 8
#define ADD_ADDR_RETRANS_MAX 3
static bool addresses_equal(const struct mptcp_addr_info *a,
- struct mptcp_addr_info *b, bool use_port)
+ const struct mptcp_addr_info *b, bool use_port)
{
bool addr_equals = false;
@@ -168,11 +165,13 @@ select_local_address(const struct pm_nl_pernet *pernet,
msk_owned_by_me(msk);
rcu_read_lock();
- __mptcp_flush_join_list(msk);
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
continue;
+ if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
+ continue;
+
if (entry->addr.family != sk->sk_family) {
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
if ((entry->addr.family == AF_INET &&
@@ -183,23 +182,17 @@ select_local_address(const struct pm_nl_pernet *pernet,
continue;
}
- /* avoid any address already in use by subflows and
- * pending join
- */
- if (!lookup_subflow_by_saddr(&msk->conn_list, &entry->addr)) {
- ret = entry;
- break;
- }
+ ret = entry;
+ break;
}
rcu_read_unlock();
return ret;
}
static struct mptcp_pm_addr_entry *
-select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
+select_signal_address(struct pm_nl_pernet *pernet, struct mptcp_sock *msk)
{
struct mptcp_pm_addr_entry *entry, *ret = NULL;
- int i = 0;
rcu_read_lock();
/* do not keep any additional per socket state, just signal
@@ -208,12 +201,14 @@ select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
* can lead to additional addresses not being announced.
*/
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+ if (!test_bit(entry->addr.id, msk->pm.id_avail_bitmap))
+ continue;
+
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
continue;
- if (i++ == pos) {
- ret = entry;
- break;
- }
+
+ ret = entry;
+ break;
}
rcu_read_unlock();
return ret;
@@ -255,12 +250,17 @@ unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk)
}
EXPORT_SYMBOL_GPL(mptcp_pm_get_local_addr_max);
-static void check_work_pending(struct mptcp_sock *msk)
+bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk)
{
- if (msk->pm.add_addr_signaled == mptcp_pm_get_add_addr_signal_max(msk) &&
- (msk->pm.local_addr_used == mptcp_pm_get_local_addr_max(msk) ||
- msk->pm.subflows == mptcp_pm_get_subflows_max(msk)))
+ struct pm_nl_pernet *pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id);
+
+ if (msk->pm.subflows == mptcp_pm_get_subflows_max(msk) ||
+ (find_next_and_bit(pernet->id_bitmap, msk->pm.id_avail_bitmap,
+ MPTCP_PM_MAX_ADDR_ID + 1, 0) == MPTCP_PM_MAX_ADDR_ID + 1)) {
WRITE_ONCE(msk->pm.work_pending, false);
+ return false;
+ }
+ return true;
}
struct mptcp_pm_add_entry *
@@ -429,6 +429,7 @@ static bool lookup_address_in_vec(struct mptcp_addr_info *addrs, unsigned int nr
static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullmesh,
struct mptcp_addr_info *addrs)
{
+ bool deny_id0 = READ_ONCE(msk->pm.remote_deny_join_id0);
struct sock *sk = (struct sock *)msk, *ssk;
struct mptcp_subflow_context *subflow;
struct mptcp_addr_info remote = { 0 };
@@ -436,22 +437,28 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullm
int i = 0;
subflows_max = mptcp_pm_get_subflows_max(msk);
+ remote_address((struct sock_common *)sk, &remote);
/* Non-fullmesh endpoint, fill in the single entry
* corresponding to the primary MPC subflow remote address
*/
if (!fullmesh) {
- remote_address((struct sock_common *)sk, &remote);
+ if (deny_id0)
+ return 0;
+
msk->pm.subflows++;
addrs[i++] = remote;
} else {
mptcp_for_each_subflow(msk, subflow) {
ssk = mptcp_subflow_tcp_sock(subflow);
- remote_address((struct sock_common *)ssk, &remote);
- if (!lookup_address_in_vec(addrs, i, &remote) &&
+ remote_address((struct sock_common *)ssk, &addrs[i]);
+ if (deny_id0 && addresses_equal(&addrs[i], &remote, false))
+ continue;
+
+ if (!lookup_address_in_vec(addrs, i, &addrs[i]) &&
msk->pm.subflows < subflows_max) {
msk->pm.subflows++;
- addrs[i++] = remote;
+ i++;
}
}
}
@@ -459,6 +466,35 @@ static unsigned int fill_remote_addresses_vec(struct mptcp_sock *msk, bool fullm
return i;
}
+static struct mptcp_pm_addr_entry *
+__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
+{
+ struct mptcp_pm_addr_entry *entry;
+
+ list_for_each_entry(entry, &pernet->local_addr_list, list) {
+ if (entry->addr.id == id)
+ return entry;
+ }
+ return NULL;
+}
+
+static int
+lookup_id_by_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_addr_entry *entry;
+ int ret = -1;
+
+ rcu_read_lock();
+ list_for_each_entry(entry, &pernet->local_addr_list, list) {
+ if (addresses_equal(&entry->addr, addr, entry->addr.port)) {
+ ret = entry->addr.id;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
@@ -474,6 +510,19 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
local_addr_max = mptcp_pm_get_local_addr_max(msk);
subflows_max = mptcp_pm_get_subflows_max(msk);
+ /* do lazy endpoint usage accounting for the MPC subflows */
+ if (unlikely(!(msk->pm.status & BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED))) && msk->first) {
+ struct mptcp_addr_info mpc_addr;
+ int mpc_id;
+
+ local_address((struct sock_common *)msk->first, &mpc_addr);
+ mpc_id = lookup_id_by_addr(pernet, &mpc_addr);
+ if (mpc_id >= 0)
+ __clear_bit(mpc_id, msk->pm.id_avail_bitmap);
+
+ msk->pm.status |= BIT(MPTCP_PM_MPC_ENDPOINT_ACCOUNTED);
+ }
+
pr_debug("local %d:%d signal %d:%d subflows %d:%d\n",
msk->pm.local_addr_used, local_addr_max,
msk->pm.add_addr_signaled, add_addr_signal_max,
@@ -481,47 +530,41 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
/* check first for announce */
if (msk->pm.add_addr_signaled < add_addr_signal_max) {
- local = select_signal_address(pernet,
- msk->pm.add_addr_signaled);
+ local = select_signal_address(pernet, msk);
if (local) {
if (mptcp_pm_alloc_anno_list(msk, local)) {
+ __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
msk->pm.add_addr_signaled++;
mptcp_pm_announce_addr(msk, &local->addr, false);
mptcp_pm_nl_addr_send_ack(msk);
}
- } else {
- /* pick failed, avoid fourther attempts later */
- msk->pm.local_addr_used = add_addr_signal_max;
}
-
- check_work_pending(msk);
}
/* check if should create a new subflow */
- if (msk->pm.local_addr_used < local_addr_max &&
- msk->pm.subflows < subflows_max &&
- !READ_ONCE(msk->pm.remote_deny_join_id0)) {
+ while (msk->pm.local_addr_used < local_addr_max &&
+ msk->pm.subflows < subflows_max) {
+ struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
+ bool fullmesh;
+ int i, nr;
+
local = select_local_address(pernet, msk);
- if (local) {
- bool fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
- struct mptcp_addr_info addrs[MPTCP_PM_ADDR_MAX];
- int i, nr;
+ if (!local)
+ break;
- msk->pm.local_addr_used++;
- check_work_pending(msk);
- nr = fill_remote_addresses_vec(msk, fullmesh, addrs);
- spin_unlock_bh(&msk->pm.lock);
- for (i = 0; i < nr; i++)
- __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
- spin_lock_bh(&msk->pm.lock);
- return;
- }
+ fullmesh = !!(local->flags & MPTCP_PM_ADDR_FLAG_FULLMESH);
- /* lookup failed, avoid fourther attempts later */
- msk->pm.local_addr_used = local_addr_max;
- check_work_pending(msk);
+ msk->pm.local_addr_used++;
+ nr = fill_remote_addresses_vec(msk, fullmesh, addrs);
+ if (nr)
+ __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
+ spin_unlock_bh(&msk->pm.lock);
+ for (i = 0; i < nr; i++)
+ __mptcp_subflow_connect(sk, &local->addr, &addrs[i]);
+ spin_lock_bh(&msk->pm.lock);
}
+ mptcp_pm_nl_check_work_pending(msk);
}
static void mptcp_pm_nl_fully_established(struct mptcp_sock *msk)
@@ -551,7 +594,6 @@ static unsigned int fill_local_addresses_vec(struct mptcp_sock *msk,
subflows_max = mptcp_pm_get_subflows_max(msk);
rcu_read_lock();
- __mptcp_flush_join_list(msk);
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
if (!(entry->flags & MPTCP_PM_ADDR_FLAG_FULLMESH))
continue;
@@ -640,7 +682,6 @@ void mptcp_pm_nl_addr_send_ack(struct mptcp_sock *msk)
!mptcp_pm_should_rm_signal(msk))
return;
- __mptcp_flush_join_list(msk);
subflow = list_first_entry_or_null(&msk->conn_list, typeof(*subflow), node);
if (subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -710,6 +751,8 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
return;
for (i = 0; i < rm_list->nr; i++) {
+ bool removed = false;
+
list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
@@ -726,18 +769,24 @@ static void mptcp_pm_nl_rm_addr_or_subflow(struct mptcp_sock *msk,
i, rm_list->ids[i], subflow->local_id, subflow->remote_id);
spin_unlock_bh(&msk->pm.lock);
mptcp_subflow_shutdown(sk, ssk, how);
+
+ /* the following takes care of updating the subflows counter */
mptcp_close_ssk(sk, ssk, subflow);
spin_lock_bh(&msk->pm.lock);
- if (rm_type == MPTCP_MIB_RMADDR) {
- msk->pm.add_addr_accepted--;
- WRITE_ONCE(msk->pm.accept_addr, true);
- } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
- msk->pm.local_addr_used--;
- }
- msk->pm.subflows--;
+ removed = true;
__MPTCP_INC_STATS(sock_net(sk), rm_type);
}
+ __set_bit(rm_list->ids[1], msk->pm.id_avail_bitmap);
+ if (!removed)
+ continue;
+
+ if (rm_type == MPTCP_MIB_RMADDR) {
+ msk->pm.add_addr_accepted--;
+ WRITE_ONCE(msk->pm.accept_addr, true);
+ } else if (rm_type == MPTCP_MIB_RMSUBFLOW) {
+ msk->pm.local_addr_used--;
+ }
}
}
@@ -758,6 +807,9 @@ void mptcp_pm_nl_work(struct mptcp_sock *msk)
msk_owned_by_me(msk);
+ if (!(pm->status & MPTCP_PM_WORK_MASK))
+ return;
+
spin_lock_bh(&msk->pm.lock);
pr_debug("msk=%p status=%x", msk, pm->status);
@@ -803,7 +855,7 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
/* to keep the code simple, don't do IDR-like allocation for address ID,
* just bail when we exceed limits
*/
- if (pernet->next_id == MAX_ADDR_ID)
+ if (pernet->next_id == MPTCP_PM_MAX_ADDR_ID)
pernet->next_id = 1;
if (pernet->addrs >= MPTCP_PM_ADDR_MAX)
goto out;
@@ -823,16 +875,15 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
if (!entry->addr.id) {
find_next:
entry->addr.id = find_next_zero_bit(pernet->id_bitmap,
- MAX_ADDR_ID + 1,
+ MPTCP_PM_MAX_ADDR_ID + 1,
pernet->next_id);
- if ((!entry->addr.id || entry->addr.id > MAX_ADDR_ID) &&
- pernet->next_id != 1) {
+ if (!entry->addr.id && pernet->next_id != 1) {
pernet->next_id = 1;
goto find_next;
}
}
- if (!entry->addr.id || entry->addr.id > MAX_ADDR_ID)
+ if (!entry->addr.id)
goto out;
__set_bit(entry->addr.id, pernet->id_bitmap);
@@ -1191,18 +1242,6 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static struct mptcp_pm_addr_entry *
-__lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
-{
- struct mptcp_pm_addr_entry *entry;
-
- list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if (entry->addr.id == id)
- return entry;
- }
- return NULL;
-}
-
int mptcp_pm_get_flags_and_ifindex_by_id(struct net *net, unsigned int id,
u8 *flags, int *ifindex)
{
@@ -1461,7 +1500,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
list_splice_init(&pernet->local_addr_list, &free_list);
__reset_counters(pernet);
pernet->next_id = 1;
- bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1);
+ bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
spin_unlock_bh(&pernet->lock);
mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
synchronize_rcu();
@@ -1571,7 +1610,7 @@ static int mptcp_nl_cmd_dump_addrs(struct sk_buff *msg,
pernet = net_generic(net, pm_nl_pernet_id);
spin_lock_bh(&pernet->lock);
- for (i = id; i < MAX_ADDR_ID + 1; i++) {
+ for (i = id; i < MPTCP_PM_MAX_ADDR_ID + 1; i++) {
if (test_bit(i, pernet->id_bitmap)) {
entry = __lookup_addr_by_id(pernet, i);
if (!entry)
@@ -1705,22 +1744,28 @@ next:
static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
{
+ struct mptcp_pm_addr_entry addr = { .addr = { .family = AF_UNSPEC }, }, *entry;
struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
struct pm_nl_pernet *pernet = genl_info_pm_nl(info);
- struct mptcp_pm_addr_entry addr, *entry;
struct net *net = sock_net(skb->sk);
- u8 bkup = 0;
+ u8 bkup = 0, lookup_by_id = 0;
int ret;
- ret = mptcp_pm_parse_addr(attr, info, true, &addr);
+ ret = mptcp_pm_parse_addr(attr, info, false, &addr);
if (ret < 0)
return ret;
if (addr.flags & MPTCP_PM_ADDR_FLAG_BACKUP)
bkup = 1;
+ if (addr.addr.family == AF_UNSPEC) {
+ lookup_by_id = 1;
+ if (!addr.addr.id)
+ return -EOPNOTSUPP;
+ }
list_for_each_entry(entry, &pernet->local_addr_list, list) {
- if (addresses_equal(&entry->addr, &addr.addr, true)) {
+ if ((!lookup_by_id && addresses_equal(&entry->addr, &addr.addr, true)) ||
+ (lookup_by_id && entry->addr.id == addr.addr.id)) {
mptcp_nl_addr_backup(net, &entry->addr, bkup);
if (bkup)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 54613f5b7521..f60f01b14fac 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -22,6 +22,7 @@
#endif
#include <net/mptcp.h>
#include <net/xfrm.h>
+#include <asm/ioctls.h>
#include "protocol.h"
#include "mib.h"
@@ -46,9 +47,10 @@ struct mptcp_skb_cb {
enum {
MPTCP_CMSG_TS = BIT(0),
+ MPTCP_CMSG_INQ = BIT(1),
};
-static struct percpu_counter mptcp_sockets_allocated;
+static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_smp;
static void __mptcp_destroy_sock(struct sock *sk);
static void __mptcp_check_send_data_fin(struct sock *sk);
@@ -738,6 +740,7 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
delta);
MPTCP_SKB_CB(skb)->offset += delta;
+ MPTCP_SKB_CB(skb)->map_seq += delta;
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
msk->ack_seq = end_seq;
@@ -760,7 +763,7 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
if (!sock_owned_by_user(sk))
__mptcp_error_report(sk);
else
- set_bit(MPTCP_ERROR_REPORT, &msk->flags);
+ __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags);
}
/* If the moves have caught up with the DATA_FIN sequence number
@@ -805,47 +808,38 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
mptcp_data_unlock(sk);
}
-static bool mptcp_do_flush_join_list(struct mptcp_sock *msk)
+static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
{
- struct mptcp_subflow_context *subflow;
- bool ret = false;
+ struct sock *sk = (struct sock *)msk;
- if (likely(list_empty(&msk->join_list)))
+ if (sk->sk_state != TCP_ESTABLISHED)
return false;
- spin_lock_bh(&msk->join_list_lock);
- list_for_each_entry(subflow, &msk->join_list, node) {
- u32 sseq = READ_ONCE(subflow->setsockopt_seq);
-
- mptcp_propagate_sndbuf((struct sock *)msk, mptcp_subflow_tcp_sock(subflow));
- if (READ_ONCE(msk->setsockopt_seq) != sseq)
- ret = true;
- }
- list_splice_tail_init(&msk->join_list, &msk->conn_list);
- spin_unlock_bh(&msk->join_list_lock);
-
- return ret;
-}
-
-void __mptcp_flush_join_list(struct mptcp_sock *msk)
-{
- if (likely(!mptcp_do_flush_join_list(msk)))
- return;
+ /* attach to msk socket only after we are sure we will deal with it
+ * at close time
+ */
+ if (sk->sk_socket && !ssk->sk_socket)
+ mptcp_sock_graft(ssk, sk->sk_socket);
- if (!test_and_set_bit(MPTCP_WORK_SYNC_SETSOCKOPT, &msk->flags))
- mptcp_schedule_work((struct sock *)msk);
+ mptcp_propagate_sndbuf((struct sock *)msk, ssk);
+ mptcp_sockopt_sync_locked(msk, ssk);
+ return true;
}
-static void mptcp_flush_join_list(struct mptcp_sock *msk)
+static void __mptcp_flush_join_list(struct sock *sk)
{
- bool sync_needed = test_and_clear_bit(MPTCP_WORK_SYNC_SETSOCKOPT, &msk->flags);
-
- might_sleep();
+ struct mptcp_subflow_context *tmp, *subflow;
+ struct mptcp_sock *msk = mptcp_sk(sk);
- if (!mptcp_do_flush_join_list(msk) && !sync_needed)
- return;
+ list_for_each_entry_safe(subflow, tmp, &msk->join_list, node) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ bool slow = lock_sock_fast(ssk);
- mptcp_sockopt_sync_all(msk);
+ list_move_tail(&subflow->node, &msk->conn_list);
+ if (!__mptcp_finish_join(msk, ssk))
+ mptcp_subflow_reset(ssk);
+ unlock_sock_fast(ssk, slow);
+ }
}
static bool mptcp_timer_pending(struct sock *sk)
@@ -972,7 +966,9 @@ static void __mptcp_mem_reclaim_partial(struct sock *sk)
lockdep_assert_held_once(&sk->sk_lock.slock);
- __mptcp_rmem_reclaim(sk, reclaimable - 1);
+ if (reclaimable > SK_MEM_QUANTUM)
+ __mptcp_rmem_reclaim(sk, reclaimable - 1);
+
sk_mem_reclaim_partial(sk);
}
@@ -1369,7 +1365,7 @@ out:
struct subflow_send_info {
struct sock *ssk;
- u64 ratio;
+ u64 linger_time;
};
void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow)
@@ -1394,20 +1390,24 @@ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
return __mptcp_subflow_active(subflow);
}
+#define SSK_MODE_ACTIVE 0
+#define SSK_MODE_BACKUP 1
+#define SSK_MODE_MAX 2
+
/* implement the mptcp packet scheduler;
* returns the subflow that will transmit the next DSS
* additionally updates the rtx timeout
*/
static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
{
- struct subflow_send_info send_info[2];
+ struct subflow_send_info send_info[SSK_MODE_MAX];
struct mptcp_subflow_context *subflow;
struct sock *sk = (struct sock *)msk;
+ u32 pace, burst, wmem;
int i, nr_active = 0;
struct sock *ssk;
+ u64 linger_time;
long tout = 0;
- u64 ratio;
- u32 pace;
sock_owned_by_me(sk);
@@ -1426,10 +1426,11 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
}
/* pick the subflow with the lower wmem/wspace ratio */
- for (i = 0; i < 2; ++i) {
+ for (i = 0; i < SSK_MODE_MAX; ++i) {
send_info[i].ssk = NULL;
- send_info[i].ratio = -1;
+ send_info[i].linger_time = -1;
}
+
mptcp_for_each_subflow(msk, subflow) {
trace_mptcp_subflow_get_send(subflow);
ssk = mptcp_subflow_tcp_sock(subflow);
@@ -1438,34 +1439,51 @@ static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
tout = max(tout, mptcp_timeout_from_subflow(subflow));
nr_active += !subflow->backup;
- if (!sk_stream_memory_free(subflow->tcp_sock) || !tcp_sk(ssk)->snd_wnd)
- continue;
-
- pace = READ_ONCE(ssk->sk_pacing_rate);
- if (!pace)
- continue;
+ pace = subflow->avg_pacing_rate;
+ if (unlikely(!pace)) {
+ /* init pacing rate from socket */
+ subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
+ pace = subflow->avg_pacing_rate;
+ if (!pace)
+ continue;
+ }
- ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
- pace);
- if (ratio < send_info[subflow->backup].ratio) {
+ linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
+ if (linger_time < send_info[subflow->backup].linger_time) {
send_info[subflow->backup].ssk = ssk;
- send_info[subflow->backup].ratio = ratio;
+ send_info[subflow->backup].linger_time = linger_time;
}
}
__mptcp_set_timeout(sk, tout);
/* pick the best backup if no other subflow is active */
if (!nr_active)
- send_info[0].ssk = send_info[1].ssk;
-
- if (send_info[0].ssk) {
- msk->last_snd = send_info[0].ssk;
- msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
- tcp_sk(msk->last_snd)->snd_wnd);
- return msk->last_snd;
- }
+ send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
+
+ /* According to the blest algorithm, to avoid HoL blocking for the
+ * faster flow, we need to:
+ * - estimate the faster flow linger time
+ * - use the above to estimate the amount of byte transferred
+ * by the faster flow
+ * - check that the amount of queued data is greter than the above,
+ * otherwise do not use the picked, slower, subflow
+ * We select the subflow with the shorter estimated time to flush
+ * the queued mem, which basically ensure the above. We just need
+ * to check that subflow has a non empty cwin.
+ */
+ ssk = send_info[SSK_MODE_ACTIVE].ssk;
+ if (!ssk || !sk_stream_memory_free(ssk) || !tcp_sk(ssk)->snd_wnd)
+ return NULL;
- return NULL;
+ burst = min_t(int, MPTCP_SEND_BURST_SIZE, tcp_sk(ssk)->snd_wnd);
+ wmem = READ_ONCE(ssk->sk_wmem_queued);
+ subflow = mptcp_subflow_ctx(ssk);
+ subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
+ READ_ONCE(ssk->sk_pacing_rate) * burst,
+ burst + wmem);
+ msk->last_snd = ssk;
+ msk->snd_burst = burst;
+ return ssk;
}
static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
@@ -1499,11 +1517,10 @@ static void mptcp_update_post_push(struct mptcp_sock *msk,
msk->snd_nxt = snd_nxt_new;
}
-static void mptcp_check_and_set_pending(struct sock *sk)
+void mptcp_check_and_set_pending(struct sock *sk)
{
- if (mptcp_send_head(sk) &&
- !test_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
- set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+ if (mptcp_send_head(sk))
+ mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING);
}
void __mptcp_push_pending(struct sock *sk, unsigned int flags)
@@ -1524,7 +1541,6 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
int ret = 0;
prev_ssk = ssk;
- __mptcp_flush_join_list(msk);
ssk = mptcp_subflow_get_send(msk);
/* First check. If the ssk has changed since
@@ -1784,8 +1800,10 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
copied += count;
if (count < data_len) {
- if (!(flags & MSG_PEEK))
+ if (!(flags & MSG_PEEK)) {
MPTCP_SKB_CB(skb)->offset += count;
+ MPTCP_SKB_CB(skb)->map_seq += count;
+ }
break;
}
@@ -1927,7 +1945,6 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
unsigned int moved = 0;
bool ret, done;
- mptcp_flush_join_list(msk);
do {
struct sock *ssk = mptcp_subflow_recv_lookup(msk);
bool slowpath;
@@ -1965,6 +1982,27 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
return !skb_queue_empty(&msk->receive_queue);
}
+static unsigned int mptcp_inq_hint(const struct sock *sk)
+{
+ const struct mptcp_sock *msk = mptcp_sk(sk);
+ const struct sk_buff *skb;
+
+ skb = skb_peek(&msk->receive_queue);
+ if (skb) {
+ u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+
+ if (hint_val >= INT_MAX)
+ return INT_MAX;
+
+ return (unsigned int)hint_val;
+ }
+
+ if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
+ return 1;
+
+ return 0;
+}
+
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len)
{
@@ -1989,6 +2027,9 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
len = min_t(size_t, len, INT_MAX);
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+ if (unlikely(msk->recvmsg_inq))
+ cmsg_flags = MPTCP_CMSG_INQ;
+
while (copied < len) {
int bytes_read;
@@ -2062,6 +2103,12 @@ out_err:
if (cmsg_flags && copied >= 0) {
if (cmsg_flags & MPTCP_CMSG_TS)
tcp_recv_timestamp(msg, sk, &tss);
+
+ if (cmsg_flags & MPTCP_CMSG_INQ) {
+ unsigned int inq = mptcp_inq_hint(sk);
+
+ put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
+ }
}
pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
@@ -2088,7 +2135,7 @@ static void mptcp_retransmit_timer(struct timer_list *t)
mptcp_schedule_work(sk);
} else {
/* delegate our work to tcp_release_cb() */
- set_bit(MPTCP_RETRANSMIT, &msk->flags);
+ __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags);
}
bh_unlock_sock(sk);
sock_put(sk);
@@ -2196,6 +2243,10 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
return true;
}
+/* flags for __mptcp_close_ssk() */
+#define MPTCP_CF_PUSH BIT(1)
+#define MPTCP_CF_FASTCLOSE BIT(2)
+
/* subflow sockets can be either outgoing (connect) or incoming
* (accept).
*
@@ -2205,22 +2256,37 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
* parent socket.
*/
static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
- struct mptcp_subflow_context *subflow)
+ struct mptcp_subflow_context *subflow,
+ unsigned int flags)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- bool need_push;
+ bool need_push, dispose_it;
- list_del(&subflow->node);
+ dispose_it = !msk->subflow || ssk != msk->subflow->sk;
+ if (dispose_it)
+ list_del(&subflow->node);
lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
+ if (flags & MPTCP_CF_FASTCLOSE)
+ subflow->send_fastclose = 1;
+
+ need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
+ if (!dispose_it) {
+ tcp_disconnect(ssk, 0);
+ msk->subflow->state = SS_UNCONNECTED;
+ mptcp_subflow_ctx_reset(subflow);
+ release_sock(ssk);
+
+ goto out;
+ }
+
/* if we are invoked by the msk cleanup code, the subflow is
* already orphaned
*/
if (ssk->sk_socket)
sock_orphan(ssk);
- need_push = __mptcp_retransmit_pending_data(sk);
subflow->disposable = 1;
/* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
@@ -2240,14 +2306,12 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
sock_put(ssk);
- if (ssk == msk->last_snd)
- msk->last_snd = NULL;
-
if (ssk == msk->first)
msk->first = NULL;
- if (msk->subflow && ssk == msk->subflow->sk)
- mptcp_dispose_initial_subflow(msk);
+out:
+ if (ssk == msk->last_snd)
+ msk->last_snd = NULL;
if (need_push)
__mptcp_push_pending(sk, 0);
@@ -2258,7 +2322,13 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
{
if (sk->sk_state == TCP_ESTABLISHED)
mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
- __mptcp_close_ssk(sk, ssk, subflow);
+
+ /* subflow aborted before reaching the fully_established status
+ * attempt the creation of the next subflow
+ */
+ mptcp_pm_subflow_check_next(mptcp_sk(sk), ssk, subflow);
+
+ __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
}
static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
@@ -2410,12 +2480,10 @@ static void mptcp_worker(struct work_struct *work)
goto unlock;
mptcp_check_data_fin_ack(sk);
- mptcp_flush_join_list(msk);
mptcp_check_fastclose(msk);
- if (msk->pm.status)
- mptcp_pm_nl_work(msk);
+ mptcp_pm_nl_work(msk);
if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
mptcp_check_for_eof(msk);
@@ -2449,8 +2517,6 @@ static int __mptcp_init_sock(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- spin_lock_init(&msk->join_list_lock);
-
INIT_LIST_HEAD(&msk->conn_list);
INIT_LIST_HEAD(&msk->join_list);
INIT_LIST_HEAD(&msk->rtx_queue);
@@ -2476,9 +2542,20 @@ static int __mptcp_init_sock(struct sock *sk)
return 0;
}
-static int mptcp_init_sock(struct sock *sk)
+static void mptcp_ca_reset(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
+
+ tcp_assign_congestion_control(sk);
+ strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
+
+ /* no need to keep a reference to the ops, the name will suffice */
+ tcp_cleanup_congestion_control(sk);
+ icsk->icsk_ca_ops = NULL;
+}
+
+static int mptcp_init_sock(struct sock *sk)
+{
struct net *net = sock_net(sk);
int ret;
@@ -2499,12 +2576,7 @@ static int mptcp_init_sock(struct sock *sk)
/* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
* propagate the correct value
*/
- tcp_assign_congestion_control(sk);
- strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name);
-
- /* no need to keep a reference to the ops, the name will suffice */
- tcp_cleanup_congestion_control(sk);
- icsk->icsk_ca_ops = NULL;
+ mptcp_ca_reset(sk);
sk_sockets_allocated_inc(sk);
sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
@@ -2609,6 +2681,7 @@ static void __mptcp_check_send_data_fin(struct sock *sk)
* state now
*/
if (__mptcp_check_fallback(msk)) {
+ WRITE_ONCE(msk->snd_una, msk->write_seq);
if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
inet_sk_state_store(sk, TCP_CLOSE);
mptcp_close_wake_up(sk);
@@ -2617,7 +2690,6 @@ static void __mptcp_check_send_data_fin(struct sock *sk)
}
}
- mptcp_flush_join_list(msk);
mptcp_for_each_subflow(msk, subflow) {
struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
@@ -2650,21 +2722,20 @@ static void __mptcp_destroy_sock(struct sock *sk)
might_sleep();
- /* be sure to always acquire the join list lock, to sync vs
- * mptcp_finish_join().
- */
- spin_lock_bh(&msk->join_list_lock);
- list_splice_tail_init(&msk->join_list, &msk->conn_list);
- spin_unlock_bh(&msk->join_list_lock);
+ /* join list will be eventually flushed (with rst) at sock lock release time*/
list_splice_init(&msk->conn_list, &conn_list);
sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
sk_stop_timer(sk, &sk->sk_timer);
msk->pm.status = 0;
+ /* clears msk->subflow, allowing the following loop to close
+ * even the initial subflow
+ */
+ mptcp_dispose_initial_subflow(msk);
list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- __mptcp_close_ssk(sk, ssk, subflow);
+ __mptcp_close_ssk(sk, ssk, subflow, 0);
}
sk->sk_prot->destroy(sk);
@@ -2675,7 +2746,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
xfrm_sk_free_policy(sk);
sk_refcnt_debug_release(sk);
- mptcp_dispose_initial_subflow(msk);
sock_put(sk);
}
@@ -2711,6 +2781,9 @@ cleanup:
sock_hold(sk);
pr_debug("msk=%p state=%d", sk, sk->sk_state);
+ if (mptcp_sk(sk)->token)
+ mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
+
if (sk->sk_state == TCP_CLOSE) {
__mptcp_destroy_sock(sk);
do_cancel_work = true;
@@ -2721,9 +2794,6 @@ cleanup:
if (do_cancel_work)
mptcp_cancel_work(sk);
- if (mptcp_sk(sk)->token)
- mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
-
sock_put(sk);
}
@@ -2755,15 +2825,38 @@ static int mptcp_disconnect(struct sock *sk, int flags)
struct mptcp_subflow_context *subflow;
struct mptcp_sock *msk = mptcp_sk(sk);
- mptcp_do_flush_join_list(msk);
+ inet_sk_state_store(sk, TCP_CLOSE);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- lock_sock(ssk);
- tcp_disconnect(ssk, flags);
- release_sock(ssk);
+ __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_FASTCLOSE);
}
+
+ sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
+
+ if (mptcp_sk(sk)->token)
+ mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
+
+ mptcp_destroy_common(msk);
+ msk->last_snd = NULL;
+ WRITE_ONCE(msk->flags, 0);
+ msk->cb_flags = 0;
+ msk->push_pending = 0;
+ msk->recovery = false;
+ msk->can_ack = false;
+ msk->fully_established = false;
+ msk->rcv_data_fin = false;
+ msk->snd_data_fin_enable = false;
+ msk->rcv_fastclose = false;
+ msk->use_64bit_ack = false;
+ WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
+ mptcp_pm_data_reset(msk);
+ mptcp_ca_reset(sk);
+
+ sk->sk_shutdown = 0;
+ sk_error_report(sk);
return 0;
}
@@ -2903,9 +2996,11 @@ void mptcp_destroy_common(struct mptcp_sock *msk)
__mptcp_clear_xmit(sk);
/* move to sk_receive_queue, sk_stream_kill_queues will purge it */
+ mptcp_data_lock(sk);
skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_receive_queue);
skb_rbtree_purge(&msk->out_of_order_queue);
+ mptcp_data_unlock(sk);
/* move all the rx fwd alloc into the sk_mem_reclaim_final in
* inet_sock_destruct() will dispose it
@@ -2929,7 +3024,7 @@ void __mptcp_data_acked(struct sock *sk)
if (!sock_owned_by_user(sk))
__mptcp_clean_una(sk);
else
- set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags);
+ __set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->cb_flags);
if (mptcp_pending_data_fin_ack(sk))
mptcp_schedule_work(sk);
@@ -2948,20 +3043,23 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
else if (xmit_ssk)
mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
} else {
- set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+ __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
}
}
+#define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
+ BIT(MPTCP_RETRANSMIT) | \
+ BIT(MPTCP_FLUSH_JOIN_LIST))
+
/* processes deferred events and flush wmem */
static void mptcp_release_cb(struct sock *sk)
+ __must_hold(&sk->sk_lock.slock)
{
- for (;;) {
- unsigned long flags = 0;
+ struct mptcp_sock *msk = mptcp_sk(sk);
- if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
- flags |= BIT(MPTCP_PUSH_PENDING);
- if (test_and_clear_bit(MPTCP_RETRANSMIT, &mptcp_sk(sk)->flags))
- flags |= BIT(MPTCP_RETRANSMIT);
+ for (;;) {
+ unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) |
+ msk->push_pending;
if (!flags)
break;
@@ -2972,8 +3070,11 @@ static void mptcp_release_cb(struct sock *sk)
* datapath acquires the msk socket spinlock while helding
* the subflow socket lock
*/
-
+ msk->push_pending = 0;
+ msk->cb_flags &= ~flags;
spin_unlock_bh(&sk->sk_lock.slock);
+ if (flags & BIT(MPTCP_FLUSH_JOIN_LIST))
+ __mptcp_flush_join_list(sk);
if (flags & BIT(MPTCP_PUSH_PENDING))
__mptcp_push_pending(sk, 0);
if (flags & BIT(MPTCP_RETRANSMIT))
@@ -2986,11 +3087,11 @@ static void mptcp_release_cb(struct sock *sk)
/* be sure to set the current sk state before tacking actions
* depending on sk_state
*/
- if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
+ if (__test_and_clear_bit(MPTCP_CONNECTED, &msk->cb_flags))
__mptcp_set_connected(sk);
- if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
+ if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
__mptcp_clean_una_wakeup(sk);
- if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
+ if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
__mptcp_error_report(sk);
__mptcp_update_rmem(sk);
@@ -3032,7 +3133,7 @@ void mptcp_subflow_process_delegated(struct sock *ssk)
if (!sock_owned_by_user(sk))
__mptcp_subflow_push_pending(sk, ssk);
else
- set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+ __set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
mptcp_data_unlock(sk);
mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
}
@@ -3118,8 +3219,7 @@ bool mptcp_finish_join(struct sock *ssk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct sock *parent = (void *)msk;
- struct socket *parent_sock;
- bool ret;
+ bool ret = true;
pr_debug("msk=%p, subflow=%p", msk, subflow);
@@ -3132,35 +3232,38 @@ bool mptcp_finish_join(struct sock *ssk)
if (!msk->pm.server_side)
goto out;
- if (!mptcp_pm_allow_new_subflow(msk)) {
- subflow->reset_reason = MPTCP_RST_EPROHIBIT;
- return false;
- }
+ if (!mptcp_pm_allow_new_subflow(msk))
+ goto err_prohibited;
+
+ if (WARN_ON_ONCE(!list_empty(&subflow->node)))
+ goto err_prohibited;
- /* active connections are already on conn_list, and we can't acquire
- * msk lock here.
- * use the join list lock as synchronization point and double-check
- * msk status to avoid racing with __mptcp_destroy_sock()
+ /* active connections are already on conn_list.
+ * If we can't acquire msk socket lock here, let the release callback
+ * handle it
*/
- spin_lock_bh(&msk->join_list_lock);
- ret = inet_sk_state_load(parent) == TCP_ESTABLISHED;
- if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) {
- list_add_tail(&subflow->node, &msk->join_list);
+ mptcp_data_lock(parent);
+ if (!sock_owned_by_user(parent)) {
+ ret = __mptcp_finish_join(msk, ssk);
+ if (ret) {
+ sock_hold(ssk);
+ list_add_tail(&subflow->node, &msk->conn_list);
+ }
+ } else {
sock_hold(ssk);
+ list_add_tail(&subflow->node, &msk->join_list);
+ __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
}
- spin_unlock_bh(&msk->join_list_lock);
+ mptcp_data_unlock(parent);
+
if (!ret) {
+err_prohibited:
subflow->reset_reason = MPTCP_RST_EPROHIBIT;
return false;
}
- /* attach to msk socket only after we are sure he will deal with us
- * at close time
- */
- parent_sock = READ_ONCE(parent->sk_socket);
- if (parent_sock && !ssk->sk_socket)
- mptcp_sock_graft(ssk, parent_sock);
subflow->map_seq = READ_ONCE(msk->ack_seq);
+
out:
mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
return true;
@@ -3179,6 +3282,57 @@ static int mptcp_forward_alloc_get(const struct sock *sk)
return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc;
}
+static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
+{
+ const struct sock *sk = (void *)msk;
+ u64 delta;
+
+ if (sk->sk_state == TCP_LISTEN)
+ return -EINVAL;
+
+ if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+ return 0;
+
+ delta = msk->write_seq - v;
+ if (delta > INT_MAX)
+ delta = INT_MAX;
+
+ return (int)delta;
+}
+
+static int mptcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
+{
+ struct mptcp_sock *msk = mptcp_sk(sk);
+ bool slow;
+ int answ;
+
+ switch (cmd) {
+ case SIOCINQ:
+ if (sk->sk_state == TCP_LISTEN)
+ return -EINVAL;
+
+ lock_sock(sk);
+ __mptcp_move_skbs(msk);
+ answ = mptcp_inq_hint(sk);
+ release_sock(sk);
+ break;
+ case SIOCOUTQ:
+ slow = lock_sock_fast(sk);
+ answ = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una));
+ unlock_sock_fast(sk, slow);
+ break;
+ case SIOCOUTQNSD:
+ slow = lock_sock_fast(sk);
+ answ = mptcp_ioctl_outq(msk, msk->snd_nxt);
+ unlock_sock_fast(sk, slow);
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return put_user(answ, (int __user *)arg);
+}
+
static struct proto mptcp_prot = {
.name = "MPTCP",
.owner = THIS_MODULE,
@@ -3191,6 +3345,7 @@ static struct proto mptcp_prot = {
.shutdown = mptcp_shutdown,
.destroy = mptcp_destroy,
.sendmsg = mptcp_sendmsg,
+ .ioctl = mptcp_ioctl,
.recvmsg = mptcp_recvmsg,
.release_cb = mptcp_release_cb,
.hash = mptcp_hash,
@@ -3243,9 +3398,20 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct mptcp_sock *msk = mptcp_sk(sock->sk);
struct mptcp_subflow_context *subflow;
struct socket *ssock;
- int err;
+ int err = -EINVAL;
lock_sock(sock->sk);
+ if (uaddr) {
+ if (addr_len < sizeof(uaddr->sa_family))
+ goto unlock;
+
+ if (uaddr->sa_family == AF_UNSPEC) {
+ err = mptcp_disconnect(sock->sk, flags);
+ sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
+ goto unlock;
+ }
+ }
+
if (sock->state != SS_UNCONNECTED && msk->subflow) {
/* pending connection or invalid state, let existing subflow
* cope with that
@@ -3255,10 +3421,8 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
}
ssock = __mptcp_nmpc_socket(msk);
- if (!ssock) {
- err = -EINVAL;
+ if (!ssock)
goto unlock;
- }
mptcp_token_destroy(msk);
inet_sk_state_store(sock->sk, TCP_SYN_SENT);
@@ -3332,17 +3496,9 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
pr_debug("msk=%p", msk);
- lock_sock(sock->sk);
- if (sock->sk->sk_state != TCP_LISTEN)
- goto unlock_fail;
-
ssock = __mptcp_nmpc_socket(msk);
if (!ssock)
- goto unlock_fail;
-
- clear_bit(MPTCP_DATA_READY, &msk->flags);
- sock_hold(ssock->sk);
- release_sock(sock->sk);
+ return -EINVAL;
err = ssock->ops->accept(sock, newsock, flags, kern);
if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
@@ -3372,7 +3528,6 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
/* set ssk->sk_socket of accept()ed flows to mptcp socket.
* This is needed so NOSPACE flag can be set from tcp stack.
*/
- mptcp_flush_join_list(msk);
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
@@ -3382,14 +3537,7 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
release_sock(newsk);
}
- if (inet_csk_listen_poll(ssock->sk))
- set_bit(MPTCP_DATA_READY, &msk->flags);
- sock_put(ssock->sk);
return err;
-
-unlock_fail:
- release_sock(sock->sk);
- return -EINVAL;
}
static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
@@ -3435,8 +3583,12 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
state = inet_sk_state_load(sk);
pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
- if (state == TCP_LISTEN)
- return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : 0;
+ if (state == TCP_LISTEN) {
+ if (WARN_ON_ONCE(!msk->subflow || !msk->subflow->sk))
+ return 0;
+
+ return inet_csk_listen_poll(msk->subflow->sk);
+ }
if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
mask |= mptcp_check_readable(msk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index d87cc040352e..0e6b42c76ea0 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -110,19 +110,20 @@
/* MPTCP TCPRST flags */
#define MPTCP_RST_TRANSIENT BIT(0)
-/* MPTCP socket flags */
-#define MPTCP_DATA_READY 0
+/* MPTCP socket atomic flags */
#define MPTCP_NOSPACE 1
#define MPTCP_WORK_RTX 2
#define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4
#define MPTCP_WORK_CLOSE_SUBFLOW 5
-#define MPTCP_PUSH_PENDING 6
-#define MPTCP_CLEAN_UNA 7
-#define MPTCP_ERROR_REPORT 8
-#define MPTCP_RETRANSMIT 9
-#define MPTCP_WORK_SYNC_SETSOCKOPT 10
-#define MPTCP_CONNECTED 11
+
+/* MPTCP socket release cb flags */
+#define MPTCP_PUSH_PENDING 1
+#define MPTCP_CLEAN_UNA 2
+#define MPTCP_ERROR_REPORT 3
+#define MPTCP_RETRANSMIT 4
+#define MPTCP_FLUSH_JOIN_LIST 5
+#define MPTCP_CONNECTED 6
static inline bool before64(__u64 seq1, __u64 seq2)
{
@@ -174,16 +175,25 @@ enum mptcp_pm_status {
MPTCP_PM_ADD_ADDR_SEND_ACK,
MPTCP_PM_RM_ADDR_RECEIVED,
MPTCP_PM_ESTABLISHED,
- MPTCP_PM_ALREADY_ESTABLISHED, /* persistent status, set after ESTABLISHED event */
MPTCP_PM_SUBFLOW_ESTABLISHED,
+ MPTCP_PM_ALREADY_ESTABLISHED, /* persistent status, set after ESTABLISHED event */
+ MPTCP_PM_MPC_ENDPOINT_ACCOUNTED /* persistent status, set after MPC local address is
+ * accounted int id_avail_bitmap
+ */
};
+/* Status bits below MPTCP_PM_ALREADY_ESTABLISHED need pm worker actions */
+#define MPTCP_PM_WORK_MASK ((1 << MPTCP_PM_ALREADY_ESTABLISHED) - 1)
+
enum mptcp_addr_signal_status {
MPTCP_ADD_ADDR_SIGNAL,
MPTCP_ADD_ADDR_ECHO,
MPTCP_RM_ADDR_SIGNAL,
};
+/* max value of mptcp_addr_info.id */
+#define MPTCP_PM_MAX_ADDR_ID U8_MAX
+
struct mptcp_pm_data {
struct mptcp_addr_info local;
struct mptcp_addr_info remote;
@@ -202,6 +212,7 @@ struct mptcp_pm_data {
u8 local_addr_used;
u8 subflows;
u8 status;
+ DECLARE_BITMAP(id_avail_bitmap, MPTCP_PM_MAX_ADDR_ID + 1);
struct mptcp_rm_list rm_list_tx;
struct mptcp_rm_list rm_list_rx;
};
@@ -241,6 +252,8 @@ struct mptcp_sock {
u32 token;
int rmem_released;
unsigned long flags;
+ unsigned long cb_flags;
+ unsigned long push_pending;
bool recovery; /* closing subflow write queue reinjected */
bool can_ack;
bool fully_established;
@@ -249,7 +262,9 @@ struct mptcp_sock {
bool rcv_fastclose;
bool use_64bit_ack; /* Set when we received a 64-bit DSN */
bool csum_enabled;
- spinlock_t join_list_lock;
+ u8 recvmsg_inq:1,
+ cork:1,
+ nodelay:1;
struct work_struct work;
struct sk_buff *ooo_last_skb;
struct rb_root out_of_order_queue;
@@ -392,6 +407,10 @@ DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
/* MPTCP subflow context */
struct mptcp_subflow_context {
struct list_head node;/* conn_list of subflows */
+
+ char reset_start[0];
+
+ unsigned long avg_pacing_rate; /* protected by msk socket lock */
u64 local_key;
u64 remote_key;
u64 idsn;
@@ -419,6 +438,7 @@ struct mptcp_subflow_context {
backup : 1,
send_mp_prio : 1,
send_mp_fail : 1,
+ send_fastclose : 1,
rx_eof : 1,
can_ack : 1, /* only after processing the remote a key */
disposable : 1, /* ctx can be free at ulp release time */
@@ -437,6 +457,9 @@ struct mptcp_subflow_context {
u8 stale_count;
long delegated_status;
+
+ char reset_end[0];
+
struct list_head delegated_node; /* link into delegated_action, protected by local BH */
u32 setsockopt_seq;
@@ -468,6 +491,13 @@ mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow)
return subflow->tcp_sock;
}
+static inline void
+mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow)
+{
+ memset(subflow->reset_start, 0, subflow->reset_end - subflow->reset_start);
+ subflow->request_mptcp = 1;
+}
+
static inline u64
mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow)
{
@@ -482,15 +512,6 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
return subflow->map_seq + mptcp_subflow_get_map_offset(subflow);
}
-static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
- struct mptcp_subflow_context *subflow)
-{
- sock_hold(mptcp_subflow_tcp_sock(subflow));
- spin_lock_bh(&msk->join_list_lock);
- list_add_tail(&subflow->node, &msk->join_list);
- spin_unlock_bh(&msk->join_list_lock);
-}
-
void mptcp_subflow_process_delegated(struct sock *ssk);
static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
@@ -554,6 +575,7 @@ unsigned int mptcp_stale_loss_cnt(const struct net *net);
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool __mptcp_retransmit_pending_data(struct sock *sk);
+void mptcp_check_and_set_pending(struct sock *sk);
void __mptcp_push_pending(struct sock *sk, unsigned int flags);
bool mptcp_subflow_data_available(struct sock *sk);
void __init mptcp_subflow_init(void);
@@ -654,7 +676,6 @@ void __mptcp_data_acked(struct sock *sk);
void __mptcp_error_report(struct sock *sk);
void mptcp_subflow_eof(struct sock *sk);
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
-void __mptcp_flush_join_list(struct mptcp_sock *msk);
static inline bool mptcp_data_fin_enabled(const struct mptcp_sock *msk)
{
return READ_ONCE(msk->snd_data_fin_enable) &&
@@ -704,9 +725,11 @@ void mptcp_token_destroy(struct mptcp_sock *msk);
void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn);
void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
+u16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum);
void __init mptcp_pm_init(void);
void mptcp_pm_data_init(struct mptcp_sock *msk);
+void mptcp_pm_data_reset(struct mptcp_sock *msk);
void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
@@ -714,7 +737,9 @@ void mptcp_pm_fully_established(struct mptcp_sock *msk, const struct sock *ssk,
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk);
void mptcp_pm_connection_closed(struct mptcp_sock *msk);
void mptcp_pm_subflow_established(struct mptcp_sock *msk);
-void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id);
+bool mptcp_pm_nl_check_work_pending(struct mptcp_sock *msk);
+void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
+ const struct mptcp_subflow_context *subflow);
void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
void mptcp_pm_add_addr_echoed(struct mptcp_sock *msk,
@@ -811,7 +836,7 @@ unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk);
unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk);
void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk);
-void mptcp_sockopt_sync_all(struct mptcp_sock *msk);
+void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb)
{
diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c
index f8efd478ac97..dacf3cee0027 100644
--- a/net/mptcp/sockopt.c
+++ b/net/mptcp/sockopt.c
@@ -390,6 +390,8 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
switch (optname) {
case IPV6_V6ONLY:
+ case IPV6_TRANSPARENT:
+ case IPV6_FREEBIND:
lock_sock(sk);
ssock = __mptcp_nmpc_socket(msk);
if (!ssock) {
@@ -398,8 +400,24 @@ static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
}
ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
- if (ret == 0)
+ if (ret != 0) {
+ release_sock(sk);
+ return ret;
+ }
+
+ sockopt_seq_inc(msk);
+
+ switch (optname) {
+ case IPV6_V6ONLY:
sk->sk_ipv6only = ssock->sk->sk_ipv6only;
+ break;
+ case IPV6_TRANSPARENT:
+ inet_sk(sk)->transparent = inet_sk(ssock->sk)->transparent;
+ break;
+ case IPV6_FREEBIND:
+ inet_sk(sk)->freebind = inet_sk(ssock->sk)->freebind;
+ break;
+ }
release_sock(sk);
break;
@@ -538,6 +556,7 @@ static bool mptcp_supported_sockopt(int level, int optname)
case TCP_TIMESTAMP:
case TCP_NOTSENT_LOWAT:
case TCP_TX_DELAY:
+ case TCP_INQ:
return true;
}
@@ -549,7 +568,6 @@ static bool mptcp_supported_sockopt(int level, int optname)
/* TCP_FASTOPEN_KEY, TCP_FASTOPEN TCP_FASTOPEN_CONNECT, TCP_FASTOPEN_NO_COOKIE,
* are not supported fastopen is currently unsupported
*/
- /* TCP_INQ is currently unsupported, needs some recvmsg work */
}
return false;
}
@@ -597,14 +615,171 @@ static int mptcp_setsockopt_sol_tcp_congestion(struct mptcp_sock *msk, sockptr_t
return ret;
}
+static int mptcp_setsockopt_sol_tcp_cork(struct mptcp_sock *msk, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ int val;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ lock_sock(sk);
+ sockopt_seq_inc(msk);
+ msk->cork = !!val;
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ lock_sock(ssk);
+ __tcp_sock_set_cork(ssk, !!val);
+ release_sock(ssk);
+ }
+ if (!val)
+ mptcp_check_and_set_pending(sk);
+ release_sock(sk);
+
+ return 0;
+}
+
+static int mptcp_setsockopt_sol_tcp_nodelay(struct mptcp_sock *msk, sockptr_t optval,
+ unsigned int optlen)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ int val;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ lock_sock(sk);
+ sockopt_seq_inc(msk);
+ msk->nodelay = !!val;
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ lock_sock(ssk);
+ __tcp_sock_set_nodelay(ssk, !!val);
+ release_sock(ssk);
+ }
+ if (val)
+ mptcp_check_and_set_pending(sk);
+ release_sock(sk);
+
+ return 0;
+}
+
+static int mptcp_setsockopt_sol_ip_set_transparent(struct mptcp_sock *msk, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct sock *sk = (struct sock *)msk;
+ struct inet_sock *issk;
+ struct socket *ssock;
+ int err;
+
+ err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen);
+ if (err != 0)
+ return err;
+
+ lock_sock(sk);
+
+ ssock = __mptcp_nmpc_socket(msk);
+ if (!ssock) {
+ release_sock(sk);
+ return -EINVAL;
+ }
+
+ issk = inet_sk(ssock->sk);
+
+ switch (optname) {
+ case IP_FREEBIND:
+ issk->freebind = inet_sk(sk)->freebind;
+ break;
+ case IP_TRANSPARENT:
+ issk->transparent = inet_sk(sk)->transparent;
+ break;
+ default:
+ release_sock(sk);
+ WARN_ON_ONCE(1);
+ return -EOPNOTSUPP;
+ }
+
+ sockopt_seq_inc(msk);
+ release_sock(sk);
+ return 0;
+}
+
+static int mptcp_setsockopt_v4_set_tos(struct mptcp_sock *msk, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ int err, val;
+
+ err = ip_setsockopt(sk, SOL_IP, optname, optval, optlen);
+
+ if (err != 0)
+ return err;
+
+ lock_sock(sk);
+ sockopt_seq_inc(msk);
+ val = inet_sk(sk)->tos;
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ __ip_sock_set_tos(ssk, val);
+ }
+ release_sock(sk);
+
+ return err;
+}
+
+static int mptcp_setsockopt_v4(struct mptcp_sock *msk, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ switch (optname) {
+ case IP_FREEBIND:
+ case IP_TRANSPARENT:
+ return mptcp_setsockopt_sol_ip_set_transparent(msk, optname, optval, optlen);
+ case IP_TOS:
+ return mptcp_setsockopt_v4_set_tos(msk, optname, optval, optlen);
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int mptcp_setsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
sockptr_t optval, unsigned int optlen)
{
+ struct sock *sk = (void *)msk;
+ int ret, val;
+
switch (optname) {
+ case TCP_INQ:
+ ret = mptcp_get_int_option(msk, optval, optlen, &val);
+ if (ret)
+ return ret;
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ lock_sock(sk);
+ msk->recvmsg_inq = !!val;
+ release_sock(sk);
+ return 0;
case TCP_ULP:
return -EOPNOTSUPP;
case TCP_CONGESTION:
return mptcp_setsockopt_sol_tcp_congestion(msk, optval, optlen);
+ case TCP_CORK:
+ return mptcp_setsockopt_sol_tcp_cork(msk, optval, optlen);
+ case TCP_NODELAY:
+ return mptcp_setsockopt_sol_tcp_nodelay(msk, optval, optlen);
}
return -EOPNOTSUPP;
@@ -636,6 +811,9 @@ int mptcp_setsockopt(struct sock *sk, int level, int optname,
if (ssk)
return tcp_setsockopt(ssk, level, optname, optval, optlen);
+ if (level == SOL_IP)
+ return mptcp_setsockopt_v4(msk, optname, optval, optlen);
+
if (level == SOL_IPV6)
return mptcp_setsockopt_v6(msk, optname, optval, optlen);
@@ -931,6 +1109,35 @@ static int mptcp_getsockopt_subflow_addrs(struct mptcp_sock *msk, char __user *o
return 0;
}
+static int mptcp_put_int_option(struct mptcp_sock *msk, char __user *optval,
+ int __user *optlen, int val)
+{
+ int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
+ if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
+ unsigned char ucval = (unsigned char)val;
+
+ len = 1;
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &ucval, 1))
+ return -EFAULT;
+ } else {
+ len = min_t(unsigned int, len, sizeof(int));
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
char __user *optval, int __user *optlen)
{
@@ -941,7 +1148,26 @@ static int mptcp_getsockopt_sol_tcp(struct mptcp_sock *msk, int optname,
case TCP_CC_INFO:
return mptcp_getsockopt_first_sf_only(msk, SOL_TCP, optname,
optval, optlen);
+ case TCP_INQ:
+ return mptcp_put_int_option(msk, optval, optlen, msk->recvmsg_inq);
+ case TCP_CORK:
+ return mptcp_put_int_option(msk, optval, optlen, msk->cork);
+ case TCP_NODELAY:
+ return mptcp_put_int_option(msk, optval, optlen, msk->nodelay);
+ }
+ return -EOPNOTSUPP;
+}
+
+static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = (void *)msk;
+
+ switch (optname) {
+ case IP_TOS:
+ return mptcp_put_int_option(msk, optval, optlen, inet_sk(sk)->tos);
}
+
return -EOPNOTSUPP;
}
@@ -980,6 +1206,8 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname,
if (ssk)
return tcp_getsockopt(ssk, level, optname, optval, option);
+ if (level == SOL_IP)
+ return mptcp_getsockopt_v4(msk, optname, optval, option);
if (level == SOL_TCP)
return mptcp_getsockopt_sol_tcp(msk, optname, optval, option);
if (level == SOL_MPTCP)
@@ -1002,6 +1230,7 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
ssk->sk_priority = sk->sk_priority;
ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
+ __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
if (sk->sk_userlocks & tx_rx_locks) {
ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
@@ -1027,6 +1256,11 @@ static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
tcp_set_congestion_control(ssk, msk->ca_name, false, true);
+ __tcp_sock_set_cork(ssk, !!msk->cork);
+ __tcp_sock_set_nodelay(ssk, !!msk->nodelay);
+
+ inet_sk(ssk)->transparent = inet_sk(sk)->transparent;
+ inet_sk(ssk)->freebind = inet_sk(sk)->freebind;
}
static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
@@ -1051,27 +1285,15 @@ void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
}
}
-void mptcp_sockopt_sync_all(struct mptcp_sock *msk)
+void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk)
{
- struct mptcp_subflow_context *subflow;
- struct sock *sk = (struct sock *)msk;
- u32 seq;
-
- seq = sockopt_seq_reset(sk);
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
- mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- u32 sseq = READ_ONCE(subflow->setsockopt_seq);
+ msk_owned_by_me(msk);
- if (sseq != msk->setsockopt_seq) {
- __mptcp_sockopt_sync(msk, ssk);
- WRITE_ONCE(subflow->setsockopt_seq, seq);
- } else if (sseq != seq) {
- WRITE_ONCE(subflow->setsockopt_seq, seq);
- }
+ if (READ_ONCE(subflow->setsockopt_seq) != msk->setsockopt_seq) {
+ sync_socket_options(msk, ssk);
- cond_resched();
+ subflow->setsockopt_seq = msk->setsockopt_seq;
}
-
- msk->setsockopt_seq = seq;
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 6172f380dfb7..bea47a1180dc 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -388,7 +388,7 @@ static void mptcp_set_connected(struct sock *sk)
if (!sock_owned_by_user(sk))
__mptcp_set_connected(sk);
else
- set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags);
+ __set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->cb_flags);
mptcp_data_unlock(sk);
}
@@ -845,9 +845,8 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
bool csum_reqd)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
- struct csum_pseudo_header header;
u32 offset, seq, delta;
- __wsum csum;
+ u16 csum;
int len;
if (!csum_reqd)
@@ -908,13 +907,11 @@ static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *
* while the pseudo header requires the original DSS data len,
* including that
*/
- header.data_seq = cpu_to_be64(subflow->map_seq);
- header.subflow_seq = htonl(subflow->map_subflow_seq);
- header.data_len = htons(subflow->map_data_len + subflow->map_data_fin);
- header.csum = 0;
-
- csum = csum_partial(&header, sizeof(header), subflow->map_data_csum);
- if (unlikely(csum_fold(csum))) {
+ csum = __mptcp_make_csum(subflow->map_seq,
+ subflow->map_subflow_seq,
+ subflow->map_data_len + subflow->map_data_fin,
+ subflow->map_data_csum);
+ if (unlikely(csum)) {
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
subflow->send_mp_fail = 1;
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPFAILTX);
@@ -1274,7 +1271,7 @@ static void subflow_error_report(struct sock *ssk)
if (!sock_owned_by_user(sk))
__mptcp_error_report(sk);
else
- set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags);
+ __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags);
mptcp_data_unlock(sk);
}
@@ -1293,7 +1290,6 @@ static void subflow_data_ready(struct sock *sk)
if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
return;
- set_bit(MPTCP_DATA_READY, &msk->flags);
parent->sk_data_ready(parent);
return;
}
@@ -1425,6 +1421,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
if (addr.ss_family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
#endif
+ mptcp_sockopt_sync(msk, ssk);
+
ssk->sk_bound_dev_if = ifindex;
err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
if (err)
@@ -1440,8 +1438,8 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
- mptcp_add_pending_subflow(msk, subflow);
- mptcp_sockopt_sync(msk, ssk);
+ sock_hold(ssk);
+ list_add_tail(&subflow->node, &msk->conn_list);
err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
if (err && err != -EINPROGRESS)
goto failed_unlink;
@@ -1452,9 +1450,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
return err;
failed_unlink:
- spin_lock_bh(&msk->join_list_lock);
list_del(&subflow->node);
- spin_unlock_bh(&msk->join_list_lock);
sock_put(mptcp_subflow_tcp_sock(subflow));
failed:
@@ -1533,10 +1529,8 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
* needs it.
*/
sf->sk->sk_net_refcnt = 1;
- get_net(net);
-#ifdef CONFIG_PROC_FS
- this_cpu_add(*net->core.sock_inuse, 1);
-#endif
+ get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
+ sock_inuse_add(net, 1);
err = tcp_set_ulp(sf->sk, "mptcp");
release_sock(sf->sk);
diff --git a/net/mptcp/token.c b/net/mptcp/token.c
index e581b341c5be..f52ee7b26aed 100644
--- a/net/mptcp/token.c
+++ b/net/mptcp/token.c
@@ -384,6 +384,7 @@ void mptcp_token_destroy(struct mptcp_sock *msk)
bucket->chain_len--;
}
spin_unlock_bh(&bucket->lock);
+ WRITE_ONCE(msk->token, 0);
}
void __init mptcp_token_init(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 3646fc195e7d..ddc54b6d18ee 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -515,12 +515,6 @@ config NFT_FLOW_OFFLOAD
This option adds the "flow_offload" expression that you can use to
choose what flows are placed into the hardware.
-config NFT_COUNTER
- tristate "Netfilter nf_tables counter module"
- help
- This option adds the "counter" expression that you can use to
- include packet and byte counters in a rule.
-
config NFT_CONNLIMIT
tristate "Netfilter nf_tables connlimit module"
depends on NF_CONNTRACK
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index aab20e575ecd..a135b1a46014 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -75,7 +75,7 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
nf_tables_trace.o nft_immediate.o nft_cmp.o nft_range.o \
nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o nft_last.o \
- nft_chain_route.o nf_tables_offload.o \
+ nft_counter.o nft_chain_route.o nf_tables_offload.o \
nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o \
nft_set_pipapo.o
@@ -100,7 +100,6 @@ obj-$(CONFIG_NFT_REJECT) += nft_reject.o
obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
obj-$(CONFIG_NFT_REJECT_NETDEV) += nft_reject_netdev.o
obj-$(CONFIG_NFT_TUNNEL) += nft_tunnel.o
-obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o
obj-$(CONFIG_NFT_MASQ) += nft_masq.o
obj-$(CONFIG_NFT_REDIR) += nft_redir.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 6dec9cd395f1..354cb472f386 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -666,32 +666,29 @@ EXPORT_SYMBOL(nf_hook_slow_list);
/* This needs to be compiled in any case to avoid dependencies between the
* nfnetlink_queue code and nf_conntrack.
*/
-struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
+const struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
EXPORT_SYMBOL_GPL(nfnl_ct_hook);
-struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
+const struct nf_ct_hook __rcu *nf_ct_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_hook);
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-/* This does not belong here, but locally generated errors need it if connection
- tracking in use: without this, connection may not be in hash table, and hence
- manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
- __rcu __read_mostly;
-EXPORT_SYMBOL(ip_ct_attach);
-
-struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
+const struct nf_nat_hook __rcu *nf_nat_hook __read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_hook);
+/* This does not belong here, but locally generated errors need it if connection
+ * tracking in use: without this, connection may not be in hash table, and hence
+ * manufactured ICMP or RST packets will not be associated with it.
+ */
void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
{
- void (*attach)(struct sk_buff *, const struct sk_buff *);
+ const struct nf_ct_hook *ct_hook;
if (skb->_nfct) {
rcu_read_lock();
- attach = rcu_dereference(ip_ct_attach);
- if (attach)
- attach(new, skb);
+ ct_hook = rcu_dereference(nf_ct_hook);
+ if (ct_hook)
+ ct_hook->attach(new, skb);
rcu_read_unlock();
}
}
@@ -699,7 +696,7 @@ EXPORT_SYMBOL(nf_ct_attach);
void nf_conntrack_destroy(struct nf_conntrack *nfct)
{
- struct nf_ct_hook *ct_hook;
+ const struct nf_ct_hook *ct_hook;
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
@@ -712,7 +709,7 @@ EXPORT_SYMBOL(nf_conntrack_destroy);
bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
const struct sk_buff *skb)
{
- struct nf_ct_hook *ct_hook;
+ const struct nf_ct_hook *ct_hook;
bool ret = false;
rcu_read_lock();
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 39c523bd775c..7f645328b47f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -960,8 +960,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
* Create a destination for the given service
*/
static int
-ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
- struct ip_vs_dest **dest_p)
+ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
{
struct ip_vs_dest *dest;
unsigned int atype, i;
@@ -1021,8 +1020,6 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
spin_lock_init(&dest->stats.lock);
__ip_vs_update_dest(svc, dest, udest, 1);
- *dest_p = dest;
-
LeaveFunction(2);
return 0;
@@ -1096,7 +1093,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
/*
* Allocate and initialize the dest structure
*/
- ret = ip_vs_new_dest(svc, udest, &dest);
+ ret = ip_vs_new_dest(svc, udest);
}
LeaveFunction(2);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 4712a90a1820..894a325d39f2 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -47,6 +47,7 @@
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
+#include <net/netfilter/nf_conntrack_act_ct.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netns/hash.h>
@@ -189,7 +190,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
unsigned int nf_conntrack_max __read_mostly;
EXPORT_SYMBOL_GPL(nf_conntrack_max);
seqcount_spinlock_t nf_conntrack_generation __read_mostly;
-static siphash_key_t nf_conntrack_hash_rnd __read_mostly;
+static siphash_aligned_key_t nf_conntrack_hash_rnd;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
unsigned int zoneid,
@@ -482,7 +483,7 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
*/
u32 nf_ct_get_id(const struct nf_conn *ct)
{
- static __read_mostly siphash_key_t ct_id_seed;
+ static siphash_aligned_key_t ct_id_seed;
unsigned long a, b, c, d;
net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
@@ -558,7 +559,7 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
-/* Released via destroy_conntrack() */
+/* Released via nf_ct_destroy() */
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
const struct nf_conntrack_zone *zone,
gfp_t flags)
@@ -585,7 +586,7 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
tmpl->status = IPS_TEMPLATE;
write_pnet(&tmpl->ct_net, net);
nf_ct_zone_add(tmpl, zone);
- atomic_set(&tmpl->ct_general.use, 0);
+ refcount_set(&tmpl->ct_general.use, 1);
return tmpl;
}
@@ -612,13 +613,12 @@ static void destroy_gre_conntrack(struct nf_conn *ct)
#endif
}
-static void
-destroy_conntrack(struct nf_conntrack *nfct)
+void nf_ct_destroy(struct nf_conntrack *nfct)
{
struct nf_conn *ct = (struct nf_conn *)nfct;
- pr_debug("destroy_conntrack(%p)\n", ct);
- WARN_ON(atomic_read(&nfct->use) != 0);
+ pr_debug("%s(%p)\n", __func__, ct);
+ WARN_ON(refcount_read(&nfct->use) != 0);
if (unlikely(nf_ct_is_template(ct))) {
nf_ct_tmpl_free(ct);
@@ -643,9 +643,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
if (ct->master)
nf_ct_put(ct->master);
- pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct);
+ pr_debug("%s: returning ct=%p to slab\n", __func__, ct);
nf_conntrack_free(ct);
}
+EXPORT_SYMBOL(nf_ct_destroy);
static void nf_ct_delete_from_lists(struct nf_conn *ct)
{
@@ -742,7 +743,7 @@ nf_ct_match(const struct nf_conn *ct1, const struct nf_conn *ct2)
/* caller must hold rcu readlock and none of the nf_conntrack_locks */
static void nf_ct_gc_expired(struct nf_conn *ct)
{
- if (!atomic_inc_not_zero(&ct->ct_general.use))
+ if (!refcount_inc_not_zero(&ct->ct_general.use))
return;
if (nf_ct_should_gc(ct))
@@ -810,7 +811,7 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
* in, try to obtain a reference and re-check tuple
*/
ct = nf_ct_tuplehash_to_ctrack(h);
- if (likely(atomic_inc_not_zero(&ct->ct_general.use))) {
+ if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
goto found;
@@ -907,7 +908,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
smp_wmb();
/* The caller holds a reference to this object */
- atomic_set(&ct->ct_general.use, 2);
+ refcount_set(&ct->ct_general.use, 2);
__nf_conntrack_hash_insert(ct, hash, reply_hash);
nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert);
@@ -958,7 +959,7 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
{
struct nf_conn_tstamp *tstamp;
- atomic_inc(&ct->ct_general.use);
+ refcount_inc(&ct->ct_general.use);
ct->status |= IPS_CONFIRMED;
/* set conntrack timestamp, if enabled. */
@@ -989,7 +990,7 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
nf_ct_acct_merge(ct, ctinfo, loser_ct);
nf_ct_add_to_dying_list(loser_ct);
- nf_conntrack_put(&loser_ct->ct_general);
+ nf_ct_put(loser_ct);
nf_ct_set(skb, ct, ctinfo);
NF_CT_STAT_INC(net, clash_resolve);
@@ -1351,7 +1352,7 @@ static unsigned int early_drop_list(struct net *net,
nf_ct_is_dying(tmp))
continue;
- if (!atomic_inc_not_zero(&tmp->ct_general.use))
+ if (!refcount_inc_not_zero(&tmp->ct_general.use))
continue;
/* kill only if still in same netns -- might have moved due to
@@ -1469,7 +1470,7 @@ static void gc_worker(struct work_struct *work)
continue;
/* need to take reference to avoid possible races */
- if (!atomic_inc_not_zero(&tmp->ct_general.use))
+ if (!refcount_inc_not_zero(&tmp->ct_general.use))
continue;
if (gc_worker_skip_ct(tmp)) {
@@ -1562,16 +1563,14 @@ __nf_conntrack_alloc(struct net *net,
ct->status = 0;
WRITE_ONCE(ct->timeout, 0);
write_pnet(&ct->ct_net, net);
- memset(&ct->__nfct_init_offset, 0,
- offsetof(struct nf_conn, proto) -
- offsetof(struct nf_conn, __nfct_init_offset));
+ memset_after(ct, 0, __nfct_init_offset);
nf_ct_zone_add(ct, zone);
/* Because we use RCU lookups, we set ct_general.use to zero before
* this is inserted in any list.
*/
- atomic_set(&ct->ct_general.use, 0);
+ refcount_set(&ct->ct_general.use, 0);
return ct;
out:
atomic_dec(&cnet->count);
@@ -1596,7 +1595,7 @@ void nf_conntrack_free(struct nf_conn *ct)
/* A freed object has refcnt == 0, that's
* the golden rule for SLAB_TYPESAFE_BY_RCU
*/
- WARN_ON(atomic_read(&ct->ct_general.use) != 0);
+ WARN_ON(refcount_read(&ct->ct_general.use) != 0);
nf_ct_ext_destroy(ct);
kmem_cache_free(nf_conntrack_cachep, ct);
@@ -1688,8 +1687,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (!exp)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
- /* Now it is inserted into the unconfirmed list, bump refcount */
- nf_conntrack_get(&ct->ct_general);
+ /* Now it is inserted into the unconfirmed list, set refcount to 1. */
+ refcount_set(&ct->ct_general.use, 1);
nf_ct_add_to_unconfirmed_list(ct);
local_bh_enable();
@@ -1749,6 +1748,9 @@ resolve_normal_ct(struct nf_conn *tmpl,
return 0;
if (IS_ERR(h))
return PTR_ERR(h);
+
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ ct->local_origin = state->hook == NF_INET_LOCAL_OUT;
}
ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1920,7 +1922,7 @@ repeat:
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
pr_debug("nf_conntrack_in: Can't track with proto module\n");
- nf_conntrack_put(&ct->ct_general);
+ nf_ct_put(ct);
skb->_nfct = 0;
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
if (ret == -NF_DROP)
@@ -2084,9 +2086,9 @@ static int __nf_conntrack_update(struct net *net, struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
+ const struct nf_nat_hook *nat_hook;
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
- struct nf_nat_hook *nat_hook;
unsigned int status;
int dataoff;
u16 l3num;
@@ -2299,7 +2301,7 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
return NULL;
found:
- atomic_inc(&ct->ct_general.use);
+ refcount_inc(&ct->ct_general.use);
spin_unlock(lockp);
local_bh_enable();
return ct;
@@ -2454,7 +2456,6 @@ static int kill_all(struct nf_conn *i, void *data)
void nf_conntrack_cleanup_start(void)
{
conntrack_gc_work.exiting = true;
- RCU_INIT_POINTER(ip_ct_attach, NULL);
}
void nf_conntrack_cleanup_end(void)
@@ -2590,7 +2591,6 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
- old_size = nf_conntrack_htable_size;
old_hash = nf_conntrack_hash;
nf_conntrack_hash = hash;
@@ -2629,7 +2629,7 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
static __always_inline unsigned int total_extension_size(void)
{
/* remember to add new extensions below */
- BUILD_BUG_ON(NF_CT_EXT_NUM > 9);
+ BUILD_BUG_ON(NF_CT_EXT_NUM > 10);
return sizeof(struct nf_ct_ext) +
sizeof(struct nf_conn_help)
@@ -2653,6 +2653,9 @@ static __always_inline unsigned int total_extension_size(void)
#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ sizeof(struct nf_conn_synproxy)
#endif
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ + sizeof(struct nf_conn_act_ct_ext)
+#endif
;
};
@@ -2770,16 +2773,15 @@ err_cachep:
return ret;
}
-static struct nf_ct_hook nf_conntrack_hook = {
+static const struct nf_ct_hook nf_conntrack_hook = {
.update = nf_conntrack_update,
- .destroy = destroy_conntrack,
+ .destroy = nf_ct_destroy,
.get_tuple_skb = nf_conntrack_get_tuple_skb,
+ .attach = nf_conntrack_attach,
};
void nf_conntrack_init_end(void)
{
- /* For use by REJECT target */
- RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
RCU_INIT_POINTER(nf_ct_hook, &nf_conntrack_hook);
}
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index f562eeef4234..96948e98ec53 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
unsigned int nf_ct_expect_max __read_mostly;
static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
-static siphash_key_t nf_ct_expect_hashrnd __read_mostly;
+static siphash_aligned_key_t nf_ct_expect_hashrnd;
/* nf_conntrack_expect helper functions */
void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
@@ -203,12 +203,12 @@ nf_ct_find_expectation(struct net *net,
* about to invoke ->destroy(), or nf_ct_delete() via timeout
* or early_drop().
*
- * The atomic_inc_not_zero() check tells: If that fails, we
+ * The refcount_inc_not_zero() check tells: If that fails, we
* know that the ct is being destroyed. If it succeeds, we
* can be sure the ct cannot disappear underneath.
*/
if (unlikely(nf_ct_is_dying(exp->master) ||
- !atomic_inc_not_zero(&exp->master->ct_general.use)))
+ !refcount_inc_not_zero(&exp->master->ct_general.use)))
return NULL;
if (exp->flags & NF_CT_EXPECT_PERMANENT) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ec4164c32d27..ac438370f94a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -508,7 +508,7 @@ nla_put_failure:
static int ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
{
- if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
+ if (nla_put_be32(skb, CTA_USE, htonl(refcount_read(&ct->ct_general.use))))
goto nla_put_failure;
return 0;
@@ -1198,7 +1198,7 @@ restart:
ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(ct)) {
if (i < ARRAY_SIZE(nf_ct_evict) &&
- atomic_inc_not_zero(&ct->ct_general.use))
+ refcount_inc_not_zero(&ct->ct_general.use))
nf_ct_evict[i++] = ct;
continue;
}
@@ -1747,9 +1747,9 @@ restart:
res = ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
- ct, dying ? true : false, 0);
+ ct, dying, 0);
if (res < 0) {
- if (!atomic_inc_not_zero(&ct->ct_general.use))
+ if (!refcount_inc_not_zero(&ct->ct_general.use))
continue;
cb->args[0] = cpu;
cb->args[1] = (unsigned long)ct;
@@ -1820,7 +1820,7 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
const struct nlattr *attr)
__must_hold(RCU)
{
- struct nf_nat_hook *nat_hook;
+ const struct nf_nat_hook *nat_hook;
int err;
nat_hook = rcu_dereference(nf_nat_hook);
@@ -2922,7 +2922,7 @@ static void ctnetlink_glue_seqadj(struct sk_buff *skb, struct nf_conn *ct,
nf_ct_tcp_seqadj_set(skb, ct, ctinfo, diff);
}
-static struct nfnl_ct_hook ctnetlink_glue_hook = {
+static const struct nfnl_ct_hook ctnetlink_glue_hook = {
.build_size = ctnetlink_glue_build_size,
.build = ctnetlink_glue_build,
.parse = ctnetlink_glue_parse,
@@ -2996,7 +2996,7 @@ static const union nf_inet_addr any_addr;
static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
{
- static __read_mostly siphash_key_t exp_id_seed;
+ static siphash_aligned_key_t exp_id_seed;
unsigned long a, b, c, d;
net_get_random_once(&exp_id_seed, sizeof(exp_id_seed));
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 80f675d884b2..3e1afd10a9b6 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -303,7 +303,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
int ret = 0;
WARN_ON(!ct);
- if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ if (unlikely(!refcount_inc_not_zero(&ct->ct_general.use)))
return 0;
if (nf_ct_should_gc(ct)) {
@@ -370,7 +370,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR);
ct_show_delta_time(s, ct);
- seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use));
+ seq_printf(s, "use=%u\n", refcount_read(&ct->ct_general.use));
if (seq_has_overflowed(s))
goto release;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index ed37bb9b4e58..b90eca7a2f22 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -48,7 +48,7 @@ struct flow_offload *flow_offload_alloc(struct nf_conn *ct)
struct flow_offload *flow;
if (unlikely(nf_ct_is_dying(ct) ||
- !atomic_inc_not_zero(&ct->ct_general.use)))
+ !refcount_inc_not_zero(&ct->ct_general.use)))
return NULL;
flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index bc4126d8ef65..5c57ade6bd05 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -54,8 +54,30 @@ static struct nf_flowtable_type flowtable_inet = {
.owner = THIS_MODULE,
};
+static struct nf_flowtable_type flowtable_ipv4 = {
+ .family = NFPROTO_IPV4,
+ .init = nf_flow_table_init,
+ .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_ipv4,
+ .free = nf_flow_table_free,
+ .hook = nf_flow_offload_ip_hook,
+ .owner = THIS_MODULE,
+};
+
+static struct nf_flowtable_type flowtable_ipv6 = {
+ .family = NFPROTO_IPV6,
+ .init = nf_flow_table_init,
+ .setup = nf_flow_table_offload_setup,
+ .action = nf_flow_rule_route_ipv6,
+ .free = nf_flow_table_free,
+ .hook = nf_flow_offload_ipv6_hook,
+ .owner = THIS_MODULE,
+};
+
static int __init nf_flow_inet_module_init(void)
{
+ nft_register_flowtable_type(&flowtable_ipv4);
+ nft_register_flowtable_type(&flowtable_ipv6);
nft_register_flowtable_type(&flowtable_inet);
return 0;
@@ -64,6 +86,8 @@ static int __init nf_flow_inet_module_init(void)
static void __exit nf_flow_inet_module_exit(void)
{
nft_unregister_flowtable_type(&flowtable_inet);
+ nft_unregister_flowtable_type(&flowtable_ipv6);
+ nft_unregister_flowtable_type(&flowtable_ipv4);
}
module_init(nf_flow_inet_module_init);
@@ -71,5 +95,7 @@ module_exit(nf_flow_inet_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
+MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module");
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 4d50d51db796..2d06a66899b2 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -34,7 +34,7 @@ static unsigned int nat_net_id __read_mostly;
static struct hlist_head *nf_nat_bysource __read_mostly;
static unsigned int nf_nat_htable_size __read_mostly;
-static siphash_key_t nf_nat_hash_rnd __read_mostly;
+static siphash_aligned_key_t nf_nat_hash_rnd;
struct nf_nat_lookup_hook_priv {
struct nf_hook_entries __rcu *entries;
@@ -494,6 +494,38 @@ another_round:
goto another_round;
}
+static bool tuple_force_port_remap(const struct nf_conntrack_tuple *tuple)
+{
+ u16 sp, dp;
+
+ switch (tuple->dst.protonum) {
+ case IPPROTO_TCP:
+ sp = ntohs(tuple->src.u.tcp.port);
+ dp = ntohs(tuple->dst.u.tcp.port);
+ break;
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ sp = ntohs(tuple->src.u.udp.port);
+ dp = ntohs(tuple->dst.u.udp.port);
+ break;
+ default:
+ return false;
+ }
+
+ /* IANA: System port range: 1-1023,
+ * user port range: 1024-49151,
+ * private port range: 49152-65535.
+ *
+ * Linux default ephemeral port range is 32768-60999.
+ *
+ * Enforce port remapping if sport is significantly lower
+ * than dport to prevent NAT port shadowing, i.e.
+ * accidental match of 'new' inbound connection vs.
+ * existing outbound one.
+ */
+ return sp < 16384 && dp >= 32768;
+}
+
/* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
* we change the source to map into the range. For NF_INET_PRE_ROUTING
* and NF_INET_LOCAL_OUT, we change the destination to map into the
@@ -507,11 +539,17 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
struct nf_conn *ct,
enum nf_nat_manip_type maniptype)
{
+ bool random_port = range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL;
const struct nf_conntrack_zone *zone;
struct net *net = nf_ct_net(ct);
zone = nf_ct_zone(ct);
+ if (maniptype == NF_NAT_MANIP_SRC &&
+ !random_port &&
+ !ct->local_origin)
+ random_port = tuple_force_port_remap(orig_tuple);
+
/* 1) If this srcip/proto/src-proto-part is currently mapped,
* and that same mapping gives a unique tuple within the given
* range, use that.
@@ -520,8 +558,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
* So far, we don't do local source mappings, so multiple
* manips not an issue.
*/
- if (maniptype == NF_NAT_MANIP_SRC &&
- !(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
+ if (maniptype == NF_NAT_MANIP_SRC && !random_port) {
/* try the original tuple first */
if (in_range(orig_tuple, range)) {
if (!nf_nat_used_tuple(orig_tuple, ct)) {
@@ -545,7 +582,7 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
*/
/* Only bother mapping if it's not already in range and unique */
- if (!(range->flags & NF_NAT_RANGE_PROTO_RANDOM_ALL)) {
+ if (!random_port) {
if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
if (!(range->flags & NF_NAT_RANGE_PROTO_OFFSET) &&
l4proto_in_range(tuple, maniptype,
@@ -1130,7 +1167,7 @@ static struct pernet_operations nat_net_ops = {
.size = sizeof(struct nat_net),
};
-static struct nf_nat_hook nat_hook = {
+static const struct nf_nat_hook nat_hook = {
.parse_nat_setup = nfnetlink_parse_nat_setup,
#ifdef CONFIG_XFRM
.decode_session = __nf_nat_decode_session,
diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c
index acd73f717a08..e32fac374608 100644
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -12,6 +12,7 @@
struct masq_dev_work {
struct work_struct work;
struct net *net;
+ netns_tracker ns_tracker;
union nf_inet_addr addr;
int ifindex;
int (*iter)(struct nf_conn *i, void *data);
@@ -82,7 +83,7 @@ static void iterate_cleanup_work(struct work_struct *work)
nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
- put_net(w->net);
+ put_net_track(w->net, &w->ns_tracker);
kfree(w);
atomic_dec(&masq_worker_count);
module_put(THIS_MODULE);
@@ -119,6 +120,7 @@ static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
INIT_WORK(&w->work, iterate_cleanup_work);
w->ifindex = ifindex;
w->net = net;
+ netns_tracker_alloc(net, &w->ns_tracker, gfp_flags);
w->iter = iter;
if (addr)
w->addr = *addr;
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 3d6d49420db8..2dfc5dae0656 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -349,7 +349,6 @@ static int __net_init synproxy_net_init(struct net *net)
goto err2;
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
- nf_conntrack_get(&ct->ct_general);
snet->tmpl = ct;
snet->stats = alloc_percpu(struct synproxy_stats);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index c20772822637..77938b1042f3 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1747,16 +1747,16 @@ static void nft_chain_stats_replace(struct nft_trans *trans)
static void nf_tables_chain_free_chain_rules(struct nft_chain *chain)
{
- struct nft_rule **g0 = rcu_dereference_raw(chain->rules_gen_0);
- struct nft_rule **g1 = rcu_dereference_raw(chain->rules_gen_1);
+ struct nft_rule_blob *g0 = rcu_dereference_raw(chain->blob_gen_0);
+ struct nft_rule_blob *g1 = rcu_dereference_raw(chain->blob_gen_1);
if (g0 != g1)
kvfree(g1);
kvfree(g0);
/* should be NULL either via abort or via successful commit */
- WARN_ON_ONCE(chain->rules_next);
- kvfree(chain->rules_next);
+ WARN_ON_ONCE(chain->blob_next);
+ kvfree(chain->blob_next);
}
void nf_tables_chain_destroy(struct nft_ctx *ctx)
@@ -2002,23 +2002,39 @@ static void nft_chain_release_hook(struct nft_chain_hook *hook)
struct nft_rules_old {
struct rcu_head h;
- struct nft_rule **start;
+ struct nft_rule_blob *blob;
};
-static struct nft_rule **nf_tables_chain_alloc_rules(const struct nft_chain *chain,
- unsigned int alloc)
+static void nft_last_rule(struct nft_rule_blob *blob, const void *ptr)
{
- if (alloc > INT_MAX)
+ struct nft_rule_dp *prule;
+
+ prule = (struct nft_rule_dp *)ptr;
+ prule->is_last = 1;
+ ptr += offsetof(struct nft_rule_dp, data);
+ /* blob size does not include the trailer rule */
+}
+
+static struct nft_rule_blob *nf_tables_chain_alloc_rules(unsigned int size)
+{
+ struct nft_rule_blob *blob;
+
+ /* size must include room for the last rule */
+ if (size < offsetof(struct nft_rule_dp, data))
+ return NULL;
+
+ size += sizeof(struct nft_rule_blob) + sizeof(struct nft_rules_old);
+ if (size > INT_MAX)
return NULL;
- alloc += 1; /* NULL, ends rules */
- if (sizeof(struct nft_rule *) > INT_MAX / alloc)
+ blob = kvmalloc(size, GFP_KERNEL);
+ if (!blob)
return NULL;
- alloc *= sizeof(struct nft_rule *);
- alloc += sizeof(struct nft_rules_old);
+ blob->size = 0;
+ nft_last_rule(blob, blob->data);
- return kvmalloc(alloc, GFP_KERNEL);
+ return blob;
}
static void nft_basechain_hook_init(struct nf_hook_ops *ops, u8 family,
@@ -2091,9 +2107,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
struct nft_stats __percpu *stats;
struct net *net = ctx->net;
char name[NFT_NAME_MAXLEN];
+ struct nft_rule_blob *blob;
struct nft_trans *trans;
struct nft_chain *chain;
- struct nft_rule **rules;
+ unsigned int data_size;
int err;
if (table->use == UINT_MAX)
@@ -2178,15 +2195,15 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
chain->udlen = nla_len(nla[NFTA_CHAIN_USERDATA]);
}
- rules = nf_tables_chain_alloc_rules(chain, 0);
- if (!rules) {
+ data_size = offsetof(struct nft_rule_dp, data); /* last rule */
+ blob = nf_tables_chain_alloc_rules(data_size);
+ if (!blob) {
err = -ENOMEM;
goto err_destroy_chain;
}
- *rules = NULL;
- rcu_assign_pointer(chain->rules_gen_0, rules);
- rcu_assign_pointer(chain->rules_gen_1, rules);
+ RCU_INIT_POINTER(chain->blob_gen_0, blob);
+ RCU_INIT_POINTER(chain->blob_gen_1, blob);
err = nf_tables_register_hook(net, table, chain);
if (err < 0)
@@ -8241,32 +8258,84 @@ EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain)
{
+ const struct nft_expr *expr, *last;
+ struct nft_regs_track track = {};
+ unsigned int size, data_size;
+ void *data, *data_boundary;
+ struct nft_rule_dp *prule;
struct nft_rule *rule;
- unsigned int alloc = 0;
int i;
/* already handled or inactive chain? */
- if (chain->rules_next || !nft_is_active_next(net, chain))
+ if (chain->blob_next || !nft_is_active_next(net, chain))
return 0;
rule = list_entry(&chain->rules, struct nft_rule, list);
i = 0;
+ data_size = 0;
list_for_each_entry_continue(rule, &chain->rules, list) {
- if (nft_is_active_next(net, rule))
- alloc++;
+ if (nft_is_active_next(net, rule)) {
+ data_size += sizeof(*prule) + rule->dlen;
+ if (data_size > INT_MAX)
+ return -ENOMEM;
+ }
}
+ data_size += offsetof(struct nft_rule_dp, data); /* last rule */
- chain->rules_next = nf_tables_chain_alloc_rules(chain, alloc);
- if (!chain->rules_next)
+ chain->blob_next = nf_tables_chain_alloc_rules(data_size);
+ if (!chain->blob_next)
return -ENOMEM;
+ data = (void *)chain->blob_next->data;
+ data_boundary = data + data_size;
+ size = 0;
+
list_for_each_entry_continue(rule, &chain->rules, list) {
- if (nft_is_active_next(net, rule))
- chain->rules_next[i++] = rule;
+ if (!nft_is_active_next(net, rule))
+ continue;
+
+ prule = (struct nft_rule_dp *)data;
+ data += offsetof(struct nft_rule_dp, data);
+ if (WARN_ON_ONCE(data > data_boundary))
+ return -ENOMEM;
+
+ size = 0;
+ track.last = last;
+ nft_rule_for_each_expr(expr, last, rule) {
+ track.cur = expr;
+
+ if (expr->ops->reduce &&
+ expr->ops->reduce(&track, expr)) {
+ expr = track.cur;
+ continue;
+ }
+
+ if (WARN_ON_ONCE(data + expr->ops->size > data_boundary))
+ return -ENOMEM;
+
+ memcpy(data + size, expr, expr->ops->size);
+ size += expr->ops->size;
+ }
+ if (WARN_ON_ONCE(size >= 1 << 12))
+ return -ENOMEM;
+
+ prule->handle = rule->handle;
+ prule->dlen = size;
+ prule->is_last = 0;
+
+ data += size;
+ size = 0;
+ chain->blob_next->size += (unsigned long)(data - (void *)prule);
}
- chain->rules_next[i] = NULL;
+ prule = (struct nft_rule_dp *)data;
+ data += offsetof(struct nft_rule_dp, data);
+ if (WARN_ON_ONCE(data > data_boundary))
+ return -ENOMEM;
+
+ nft_last_rule(chain->blob_next, prule);
+
return 0;
}
@@ -8280,8 +8349,8 @@ static void nf_tables_commit_chain_prepare_cancel(struct net *net)
if (trans->msg_type == NFT_MSG_NEWRULE ||
trans->msg_type == NFT_MSG_DELRULE) {
- kvfree(chain->rules_next);
- chain->rules_next = NULL;
+ kvfree(chain->blob_next);
+ chain->blob_next = NULL;
}
}
}
@@ -8290,38 +8359,34 @@ static void __nf_tables_commit_chain_free_rules_old(struct rcu_head *h)
{
struct nft_rules_old *o = container_of(h, struct nft_rules_old, h);
- kvfree(o->start);
+ kvfree(o->blob);
}
-static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules)
+static void nf_tables_commit_chain_free_rules_old(struct nft_rule_blob *blob)
{
- struct nft_rule **r = rules;
struct nft_rules_old *old;
- while (*r)
- r++;
-
- r++; /* rcu_head is after end marker */
- old = (void *) r;
- old->start = rules;
+ /* rcu_head is after end marker */
+ old = (void *)blob + sizeof(*blob) + blob->size;
+ old->blob = blob;
call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old);
}
static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
{
- struct nft_rule **g0, **g1;
+ struct nft_rule_blob *g0, *g1;
bool next_genbit;
next_genbit = nft_gencursor_next(net);
- g0 = rcu_dereference_protected(chain->rules_gen_0,
+ g0 = rcu_dereference_protected(chain->blob_gen_0,
lockdep_commit_lock_is_held(net));
- g1 = rcu_dereference_protected(chain->rules_gen_1,
+ g1 = rcu_dereference_protected(chain->blob_gen_1,
lockdep_commit_lock_is_held(net));
/* No changes to this chain? */
- if (chain->rules_next == NULL) {
+ if (chain->blob_next == NULL) {
/* chain had no change in last or next generation */
if (g0 == g1)
return;
@@ -8330,10 +8395,10 @@ static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
* one uses same rules as current generation.
*/
if (next_genbit) {
- rcu_assign_pointer(chain->rules_gen_1, g0);
+ rcu_assign_pointer(chain->blob_gen_1, g0);
nf_tables_commit_chain_free_rules_old(g1);
} else {
- rcu_assign_pointer(chain->rules_gen_0, g1);
+ rcu_assign_pointer(chain->blob_gen_0, g1);
nf_tables_commit_chain_free_rules_old(g0);
}
@@ -8341,11 +8406,11 @@ static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
}
if (next_genbit)
- rcu_assign_pointer(chain->rules_gen_1, chain->rules_next);
+ rcu_assign_pointer(chain->blob_gen_1, chain->blob_next);
else
- rcu_assign_pointer(chain->rules_gen_0, chain->rules_next);
+ rcu_assign_pointer(chain->blob_gen_0, chain->blob_next);
- chain->rules_next = NULL;
+ chain->blob_next = NULL;
if (g0 == g1)
return;
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index adc348056076..36e73f9828c5 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -38,7 +38,7 @@ static noinline void __nft_trace_packet(struct nft_traceinfo *info,
static inline void nft_trace_packet(struct nft_traceinfo *info,
const struct nft_chain *chain,
- const struct nft_rule *rule,
+ const struct nft_rule_dp *rule,
enum nft_trace_types type)
{
if (static_branch_unlikely(&nft_trace_enabled)) {
@@ -67,6 +67,36 @@ static void nft_cmp_fast_eval(const struct nft_expr *expr,
regs->verdict.code = NFT_BREAK;
}
+static noinline void __nft_trace_verdict(struct nft_traceinfo *info,
+ const struct nft_chain *chain,
+ const struct nft_regs *regs)
+{
+ enum nft_trace_types type;
+
+ switch (regs->verdict.code) {
+ case NFT_CONTINUE:
+ case NFT_RETURN:
+ type = NFT_TRACETYPE_RETURN;
+ break;
+ default:
+ type = NFT_TRACETYPE_RULE;
+ break;
+ }
+
+ __nft_trace_packet(info, chain, type);
+}
+
+static inline void nft_trace_verdict(struct nft_traceinfo *info,
+ const struct nft_chain *chain,
+ const struct nft_rule_dp *rule,
+ const struct nft_regs *regs)
+{
+ if (static_branch_unlikely(&nft_trace_enabled)) {
+ info->rule = rule;
+ __nft_trace_verdict(info, chain, regs);
+ }
+}
+
static bool nft_payload_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -110,7 +140,6 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
base_chain = nft_base_chain(chain);
- rcu_read_lock();
pstats = READ_ONCE(base_chain->stats);
if (pstats) {
local_bh_disable();
@@ -121,12 +150,12 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
u64_stats_update_end(&stats->syncp);
local_bh_enable();
}
- rcu_read_unlock();
}
struct nft_jumpstack {
- const struct nft_chain *chain;
- struct nft_rule *const *rules;
+ const struct nft_chain *chain;
+ const struct nft_rule_dp *rule;
+ const struct nft_rule_dp *last_rule;
};
static void expr_call_ops_eval(const struct nft_expr *expr,
@@ -141,6 +170,7 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
X(e, nft_payload_eval);
X(e, nft_cmp_eval);
+ X(e, nft_counter_eval);
X(e, nft_meta_get_eval);
X(e, nft_lookup_eval);
X(e, nft_range_eval);
@@ -154,18 +184,28 @@ static void expr_call_ops_eval(const struct nft_expr *expr,
expr->ops->eval(expr, regs, pkt);
}
+#define nft_rule_expr_first(rule) (struct nft_expr *)&rule->data[0]
+#define nft_rule_expr_next(expr) ((void *)expr) + expr->ops->size
+#define nft_rule_expr_last(rule) (struct nft_expr *)&rule->data[rule->dlen]
+#define nft_rule_next(rule) (void *)rule + sizeof(*rule) + rule->dlen
+
+#define nft_rule_dp_for_each_expr(expr, last, rule) \
+ for ((expr) = nft_rule_expr_first(rule), (last) = nft_rule_expr_last(rule); \
+ (expr) != (last); \
+ (expr) = nft_rule_expr_next(expr))
+
unsigned int
nft_do_chain(struct nft_pktinfo *pkt, void *priv)
{
const struct nft_chain *chain = priv, *basechain = chain;
+ const struct nft_rule_dp *rule, *last_rule;
const struct net *net = nft_net(pkt);
- struct nft_rule *const *rules;
- const struct nft_rule *rule;
const struct nft_expr *expr, *last;
struct nft_regs regs;
unsigned int stackptr = 0;
struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
bool genbit = READ_ONCE(net->nft.gencursor);
+ struct nft_rule_blob *blob;
struct nft_traceinfo info;
info.trace = false;
@@ -173,16 +213,16 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv)
nft_trace_init(&info, pkt, &regs.verdict, basechain);
do_chain:
if (genbit)
- rules = rcu_dereference(chain->rules_gen_1);
+ blob = rcu_dereference(chain->blob_gen_1);
else
- rules = rcu_dereference(chain->rules_gen_0);
+ blob = rcu_dereference(chain->blob_gen_0);
+ rule = (struct nft_rule_dp *)blob->data;
+ last_rule = (void *)blob->data + blob->size;
next_rule:
- rule = *rules;
regs.verdict.code = NFT_CONTINUE;
- for (; *rules ; rules++) {
- rule = *rules;
- nft_rule_for_each_expr(expr, last, rule) {
+ for (; rule < last_rule; rule = nft_rule_next(rule)) {
+ nft_rule_dp_for_each_expr(expr, last, rule) {
if (expr->ops == &nft_cmp_fast_ops)
nft_cmp_fast_eval(expr, &regs);
else if (expr->ops == &nft_bitwise_fast_ops)
@@ -207,13 +247,13 @@ next_rule:
break;
}
+ nft_trace_verdict(&info, chain, rule, &regs);
+
switch (regs.verdict.code & NF_VERDICT_MASK) {
case NF_ACCEPT:
case NF_DROP:
case NF_QUEUE:
case NF_STOLEN:
- nft_trace_packet(&info, chain, rule,
- NFT_TRACETYPE_RULE);
return regs.verdict.code;
}
@@ -222,28 +262,25 @@ next_rule:
if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
return NF_DROP;
jumpstack[stackptr].chain = chain;
- jumpstack[stackptr].rules = rules + 1;
+ jumpstack[stackptr].rule = nft_rule_next(rule);
+ jumpstack[stackptr].last_rule = last_rule;
stackptr++;
fallthrough;
case NFT_GOTO:
- nft_trace_packet(&info, chain, rule,
- NFT_TRACETYPE_RULE);
-
chain = regs.verdict.chain;
goto do_chain;
case NFT_CONTINUE:
case NFT_RETURN:
- nft_trace_packet(&info, chain, rule,
- NFT_TRACETYPE_RETURN);
break;
default:
- WARN_ON(1);
+ WARN_ON_ONCE(1);
}
if (stackptr > 0) {
stackptr--;
chain = jumpstack[stackptr].chain;
- rules = jumpstack[stackptr].rules;
+ rule = jumpstack[stackptr].rule;
+ last_rule = jumpstack[stackptr].last_rule;
goto next_rule;
}
@@ -269,18 +306,22 @@ static struct nft_expr_type *nft_basic_types[] = {
&nft_rt_type,
&nft_exthdr_type,
&nft_last_type,
+ &nft_counter_type,
};
static struct nft_object_type *nft_basic_objects[] = {
#ifdef CONFIG_NETWORK_SECMARK
&nft_secmark_obj_type,
#endif
+ &nft_counter_obj_type,
};
int __init nf_tables_core_module_init(void)
{
int err, i, j = 0;
+ nft_counter_init_seqcount();
+
for (i = 0; i < ARRAY_SIZE(nft_basic_objects); i++) {
err = nft_register_obj(nft_basic_objects[i]);
if (err)
diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c
index 84a7dea46efa..5041725423c2 100644
--- a/net/netfilter/nf_tables_trace.c
+++ b/net/netfilter/nf_tables_trace.c
@@ -142,7 +142,7 @@ static int nf_trace_fill_pkt_info(struct sk_buff *nlskb,
static int nf_trace_fill_rule_info(struct sk_buff *nlskb,
const struct nft_traceinfo *info)
{
- if (!info->rule)
+ if (!info->rule || info->rule->is_last)
return 0;
/* a continue verdict with ->type == RETURN means that this is
diff --git a/net/netfilter/nfnetlink_hook.c b/net/netfilter/nfnetlink_hook.c
index d5c719c9e36c..71e29adac48b 100644
--- a/net/netfilter/nfnetlink_hook.c
+++ b/net/netfilter/nfnetlink_hook.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
+#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/skbuff.h>
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 7f83f9697fc1..ae9c0756bba5 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -66,6 +66,7 @@ struct nfulnl_instance {
struct sk_buff *skb; /* pre-allocatd skb */
struct timer_list timer;
struct net *net;
+ netns_tracker ns_tracker;
struct user_namespace *peer_user_ns; /* User namespace of the peer process */
u32 peer_portid; /* PORTID of the peer process */
@@ -140,7 +141,7 @@ static void nfulnl_instance_free_rcu(struct rcu_head *head)
struct nfulnl_instance *inst =
container_of(head, struct nfulnl_instance, rcu);
- put_net(inst->net);
+ put_net_track(inst->net, &inst->ns_tracker);
kfree(inst);
module_put(THIS_MODULE);
}
@@ -187,7 +188,7 @@ instance_create(struct net *net, u_int16_t group_num,
timer_setup(&inst->timer, nfulnl_timer, 0);
- inst->net = get_net(net);
+ inst->net = get_net_track(net, &inst->ns_tracker, GFP_ATOMIC);
inst->peer_user_ns = user_ns;
inst->peer_portid = portid;
inst->group_num = group_num;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index f0b9e21a2452..ea2d9c2a44cf 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -225,7 +225,7 @@ find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
{
- struct nf_ct_hook *ct_hook;
+ const struct nf_ct_hook *ct_hook;
int err;
if (verdict == NF_ACCEPT ||
@@ -388,7 +388,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct net_device *outdev;
struct nf_conn *ct = NULL;
enum ip_conntrack_info ctinfo = 0;
- struct nfnl_ct_hook *nfnl_ct;
+ const struct nfnl_ct_hook *nfnl_ct;
bool csum_verify;
char *secdata = NULL;
u32 seclen = 0;
@@ -1104,7 +1104,7 @@ static int nfqnl_recv_verdict_batch(struct sk_buff *skb,
return 0;
}
-static struct nf_conn *nfqnl_ct_parse(struct nfnl_ct_hook *nfnl_ct,
+static struct nf_conn *nfqnl_ct_parse(const struct nfnl_ct_hook *nfnl_ct,
const struct nlmsghdr *nlh,
const struct nlattr * const nfqa[],
struct nf_queue_entry *entry,
@@ -1171,11 +1171,11 @@ static int nfqnl_recv_verdict(struct sk_buff *skb, const struct nfnl_info *info,
{
struct nfnl_queue_net *q = nfnl_queue_pernet(info->net);
u_int16_t queue_num = ntohs(info->nfmsg->res_id);
+ const struct nfnl_ct_hook *nfnl_ct;
struct nfqnl_msg_verdict_hdr *vhdr;
enum ip_conntrack_info ctinfo;
struct nfqnl_instance *queue;
struct nf_queue_entry *entry;
- struct nfnl_ct_hook *nfnl_ct;
struct nf_conn *ct = NULL;
unsigned int verdict;
int err;
@@ -1528,15 +1528,9 @@ static void __net_exit nfnl_queue_net_exit(struct net *net)
WARN_ON_ONCE(!hlist_empty(&q->instance_table[i]));
}
-static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list)
-{
- synchronize_rcu();
-}
-
static struct pernet_operations nfnl_queue_net_ops = {
.init = nfnl_queue_net_init,
.exit = nfnl_queue_net_exit,
- .exit_batch = nfnl_queue_net_exit_batch,
.id = &nfnl_queue_net_id,
.size = sizeof(struct nfnl_queue_net),
};
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 47b0dba95054..7b727d3ebf9d 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -278,12 +278,52 @@ static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
return 0;
}
+static bool nft_bitwise_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ const struct nft_bitwise *priv = nft_expr_priv(expr);
+ const struct nft_bitwise *bitwise;
+
+ if (!track->regs[priv->sreg].selector)
+ return false;
+
+ bitwise = nft_expr_priv(expr);
+ if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
+ track->regs[priv->dreg].bitwise &&
+ track->regs[priv->dreg].bitwise->ops == expr->ops &&
+ priv->sreg == bitwise->sreg &&
+ priv->dreg == bitwise->dreg &&
+ priv->op == bitwise->op &&
+ priv->len == bitwise->len &&
+ !memcmp(&priv->mask, &bitwise->mask, sizeof(priv->mask)) &&
+ !memcmp(&priv->xor, &bitwise->xor, sizeof(priv->xor)) &&
+ !memcmp(&priv->data, &bitwise->data, sizeof(priv->data))) {
+ track->cur = expr;
+ return true;
+ }
+
+ if (track->regs[priv->sreg].bitwise) {
+ track->regs[priv->dreg].selector = NULL;
+ track->regs[priv->dreg].bitwise = NULL;
+ return false;
+ }
+
+ if (priv->sreg != priv->dreg) {
+ track->regs[priv->dreg].selector =
+ track->regs[priv->sreg].selector;
+ }
+ track->regs[priv->dreg].bitwise = expr;
+
+ return false;
+}
+
static const struct nft_expr_ops nft_bitwise_ops = {
.type = &nft_bitwise_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_bitwise)),
.eval = nft_bitwise_eval,
.init = nft_bitwise_init,
.dump = nft_bitwise_dump,
+ .reduce = nft_bitwise_reduce,
.offload = nft_bitwise_offload,
};
@@ -385,12 +425,49 @@ static int nft_bitwise_fast_offload(struct nft_offload_ctx *ctx,
return 0;
}
+static bool nft_bitwise_fast_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
+ const struct nft_bitwise_fast_expr *bitwise;
+
+ if (!track->regs[priv->sreg].selector)
+ return false;
+
+ bitwise = nft_expr_priv(expr);
+ if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
+ track->regs[priv->dreg].bitwise &&
+ track->regs[priv->dreg].bitwise->ops == expr->ops &&
+ priv->sreg == bitwise->sreg &&
+ priv->dreg == bitwise->dreg &&
+ priv->mask == bitwise->mask &&
+ priv->xor == bitwise->xor) {
+ track->cur = expr;
+ return true;
+ }
+
+ if (track->regs[priv->sreg].bitwise) {
+ track->regs[priv->dreg].selector = NULL;
+ track->regs[priv->dreg].bitwise = NULL;
+ return false;
+ }
+
+ if (priv->sreg != priv->dreg) {
+ track->regs[priv->dreg].selector =
+ track->regs[priv->sreg].selector;
+ }
+ track->regs[priv->dreg].bitwise = expr;
+
+ return false;
+}
+
const struct nft_expr_ops nft_bitwise_fast_ops = {
.type = &nft_bitwise_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_bitwise_fast_expr)),
.eval = NULL, /* inlined */
.init = nft_bitwise_fast_init,
.dump = nft_bitwise_fast_dump,
+ .reduce = nft_bitwise_fast_reduce,
.offload = nft_bitwise_fast_offload,
};
@@ -427,3 +504,21 @@ struct nft_expr_type nft_bitwise_type __read_mostly = {
.maxattr = NFTA_BITWISE_MAX,
.owner = THIS_MODULE,
};
+
+bool nft_expr_reduce_bitwise(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ const struct nft_expr *last = track->last;
+ const struct nft_expr *next;
+
+ if (expr == last)
+ return false;
+
+ next = nft_expr_next(expr);
+ if (next->ops == &nft_bitwise_ops)
+ return nft_bitwise_reduce(track, next);
+ else if (next->ops == &nft_bitwise_fast_ops)
+ return nft_bitwise_fast_reduce(track, next);
+
+ return false;
+}
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index 7d0761fad37e..58dcafe8bf79 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -14,7 +14,7 @@
#include <net/netfilter/nf_conntrack_zones.h>
struct nft_connlimit {
- struct nf_conncount_list list;
+ struct nf_conncount_list *list;
u32 limit;
bool invert;
};
@@ -43,12 +43,12 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
return;
}
- if (nf_conncount_add(nft_net(pkt), &priv->list, tuple_ptr, zone)) {
+ if (nf_conncount_add(nft_net(pkt), priv->list, tuple_ptr, zone)) {
regs->verdict.code = NF_DROP;
return;
}
- count = priv->list.count;
+ count = priv->list->count;
if ((count > priv->limit) ^ priv->invert) {
regs->verdict.code = NFT_BREAK;
@@ -76,7 +76,11 @@ static int nft_connlimit_do_init(const struct nft_ctx *ctx,
invert = true;
}
- nf_conncount_list_init(&priv->list);
+ priv->list = kmalloc(sizeof(*priv->list), GFP_KERNEL);
+ if (!priv->list)
+ return -ENOMEM;
+
+ nf_conncount_list_init(priv->list);
priv->limit = limit;
priv->invert = invert;
@@ -87,7 +91,8 @@ static void nft_connlimit_do_destroy(const struct nft_ctx *ctx,
struct nft_connlimit *priv)
{
nf_ct_netns_put(ctx->net, ctx->family);
- nf_conncount_cache_free(&priv->list);
+ nf_conncount_cache_free(priv->list);
+ kfree(priv->list);
}
static int nft_connlimit_do_dump(struct sk_buff *skb,
@@ -200,7 +205,11 @@ static int nft_connlimit_clone(struct nft_expr *dst, const struct nft_expr *src)
struct nft_connlimit *priv_dst = nft_expr_priv(dst);
struct nft_connlimit *priv_src = nft_expr_priv(src);
- nf_conncount_list_init(&priv_dst->list);
+ priv_dst->list = kmalloc(sizeof(*priv_dst->list), GFP_ATOMIC);
+ if (priv_dst->list)
+ return -ENOMEM;
+
+ nf_conncount_list_init(priv_dst->list);
priv_dst->limit = priv_src->limit;
priv_dst->invert = priv_src->invert;
@@ -212,7 +221,8 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
{
struct nft_connlimit *priv = nft_expr_priv(expr);
- nf_conncount_cache_free(&priv->list);
+ nf_conncount_cache_free(priv->list);
+ kfree(priv->list);
}
static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
@@ -221,7 +231,7 @@ static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
bool ret;
local_bh_disable();
- ret = nf_conncount_gc_list(net, &priv->list);
+ ret = nf_conncount_gc_list(net, priv->list);
local_bh_enable();
return ret;
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 8edd3b3c173d..f179e8c3b0ca 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -13,6 +13,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables_offload.h>
struct nft_counter {
@@ -174,7 +175,7 @@ static const struct nla_policy nft_counter_policy[NFTA_COUNTER_MAX + 1] = {
[NFTA_COUNTER_BYTES] = { .type = NLA_U64 },
};
-static struct nft_object_type nft_counter_obj_type;
+struct nft_object_type nft_counter_obj_type;
static const struct nft_object_ops nft_counter_obj_ops = {
.type = &nft_counter_obj_type,
.size = sizeof(struct nft_counter_percpu_priv),
@@ -184,7 +185,7 @@ static const struct nft_object_ops nft_counter_obj_ops = {
.dump = nft_counter_obj_dump,
};
-static struct nft_object_type nft_counter_obj_type __read_mostly = {
+struct nft_object_type nft_counter_obj_type __read_mostly = {
.type = NFT_OBJECT_COUNTER,
.ops = &nft_counter_obj_ops,
.maxattr = NFTA_COUNTER_MAX,
@@ -192,9 +193,8 @@ static struct nft_object_type nft_counter_obj_type __read_mostly = {
.owner = THIS_MODULE,
};
-static void nft_counter_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+void nft_counter_eval(const struct nft_expr *expr, struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
@@ -275,7 +275,15 @@ static void nft_counter_offload_stats(struct nft_expr *expr,
preempt_enable();
}
-static struct nft_expr_type nft_counter_type;
+void nft_counter_init_seqcount(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu));
+}
+
+struct nft_expr_type nft_counter_type;
static const struct nft_expr_ops nft_counter_ops = {
.type = &nft_counter_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
@@ -289,7 +297,7 @@ static const struct nft_expr_ops nft_counter_ops = {
.offload_stats = nft_counter_offload_stats,
};
-static struct nft_expr_type nft_counter_type __read_mostly = {
+struct nft_expr_type nft_counter_type __read_mostly = {
.name = "counter",
.ops = &nft_counter_ops,
.policy = nft_counter_policy,
@@ -297,39 +305,3 @@ static struct nft_expr_type nft_counter_type __read_mostly = {
.flags = NFT_EXPR_STATEFUL,
.owner = THIS_MODULE,
};
-
-static int __init nft_counter_module_init(void)
-{
- int cpu, err;
-
- for_each_possible_cpu(cpu)
- seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu));
-
- err = nft_register_obj(&nft_counter_obj_type);
- if (err < 0)
- return err;
-
- err = nft_register_expr(&nft_counter_type);
- if (err < 0)
- goto err1;
-
- return 0;
-err1:
- nft_unregister_obj(&nft_counter_obj_type);
- return err;
-}
-
-static void __exit nft_counter_module_exit(void)
-{
- nft_unregister_expr(&nft_counter_type);
- nft_unregister_obj(&nft_counter_obj_type);
-}
-
-module_init(nft_counter_module_init);
-module_exit(nft_counter_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_EXPR("counter");
-MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_COUNTER);
-MODULE_DESCRIPTION("nftables counter rule support");
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 99b1de14ff7e..518d96c8c247 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -259,7 +259,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
ct = this_cpu_read(nft_ct_pcpu_template);
- if (likely(atomic_read(&ct->ct_general.use) == 1)) {
+ if (likely(refcount_read(&ct->ct_general.use) == 1)) {
nf_ct_zone_add(ct, &zone);
} else {
/* previous skb got queued to userspace */
@@ -270,7 +270,6 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr,
}
}
- atomic_inc(&ct->ct_general.use);
nf_ct_set(skb, ct, IP_CT_NEW);
}
#endif
@@ -375,7 +374,6 @@ static bool nft_ct_tmpl_alloc_pcpu(void)
return false;
}
- atomic_set(&tmp->ct_general.use, 1);
per_cpu(nft_ct_pcpu_template, cpu) = tmp;
}
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index cd59afde5b2f..fa9301ca6033 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -27,9 +27,11 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
{
struct nft_fwd_netdev *priv = nft_expr_priv(expr);
int oif = regs->data[priv->sreg_dev];
+ struct sk_buff *skb = pkt->skb;
/* This is used by ifb only. */
- skb_set_redirected(pkt->skb, true);
+ skb->skb_iif = skb->dev->ifindex;
+ skb_set_redirected(skb, nft_hook(pkt) == NF_NETDEV_INGRESS);
nf_fwd_netdev_egress(pkt, oif);
regs->verdict.code = NF_STOLEN;
@@ -198,7 +200,8 @@ static int nft_fwd_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
{
- return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
+ return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS) |
+ (1 << NF_NETDEV_EGRESS));
}
static struct nft_expr_type nft_fwd_netdev_type;
diff --git a/net/netfilter/nft_last.c b/net/netfilter/nft_last.c
index 304e33cbed9b..5ee33d0ccd4e 100644
--- a/net/netfilter/nft_last.c
+++ b/net/netfilter/nft_last.c
@@ -8,9 +8,13 @@
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_tables.h>
+struct nft_last {
+ unsigned long jiffies;
+ unsigned int set;
+};
+
struct nft_last_priv {
- unsigned long last_jiffies;
- unsigned int last_set;
+ struct nft_last *last;
};
static const struct nla_policy nft_last_policy[NFTA_LAST_MAX + 1] = {
@@ -22,47 +26,55 @@ static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[])
{
struct nft_last_priv *priv = nft_expr_priv(expr);
+ struct nft_last *last;
u64 last_jiffies;
- u32 last_set = 0;
int err;
- if (tb[NFTA_LAST_SET]) {
- last_set = ntohl(nla_get_be32(tb[NFTA_LAST_SET]));
- if (last_set == 1)
- priv->last_set = 1;
- }
+ last = kzalloc(sizeof(*last), GFP_KERNEL);
+ if (!last)
+ return -ENOMEM;
+
+ if (tb[NFTA_LAST_SET])
+ last->set = ntohl(nla_get_be32(tb[NFTA_LAST_SET]));
- if (last_set && tb[NFTA_LAST_MSECS]) {
+ if (last->set && tb[NFTA_LAST_MSECS]) {
err = nf_msecs_to_jiffies64(tb[NFTA_LAST_MSECS], &last_jiffies);
if (err < 0)
- return err;
+ goto err;
- priv->last_jiffies = jiffies - (unsigned long)last_jiffies;
+ last->jiffies = jiffies - (unsigned long)last_jiffies;
}
+ priv->last = last;
return 0;
+err:
+ kfree(last);
+
+ return err;
}
static void nft_last_eval(const struct nft_expr *expr,
struct nft_regs *regs, const struct nft_pktinfo *pkt)
{
struct nft_last_priv *priv = nft_expr_priv(expr);
+ struct nft_last *last = priv->last;
- if (READ_ONCE(priv->last_jiffies) != jiffies)
- WRITE_ONCE(priv->last_jiffies, jiffies);
- if (READ_ONCE(priv->last_set) == 0)
- WRITE_ONCE(priv->last_set, 1);
+ if (READ_ONCE(last->jiffies) != jiffies)
+ WRITE_ONCE(last->jiffies, jiffies);
+ if (READ_ONCE(last->set) == 0)
+ WRITE_ONCE(last->set, 1);
}
static int nft_last_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
struct nft_last_priv *priv = nft_expr_priv(expr);
- unsigned long last_jiffies = READ_ONCE(priv->last_jiffies);
- u32 last_set = READ_ONCE(priv->last_set);
+ struct nft_last *last = priv->last;
+ unsigned long last_jiffies = READ_ONCE(last->jiffies);
+ u32 last_set = READ_ONCE(last->set);
__be64 msecs;
if (time_before(jiffies, last_jiffies)) {
- WRITE_ONCE(priv->last_set, 0);
+ WRITE_ONCE(last->set, 0);
last_set = 0;
}
@@ -81,11 +93,32 @@ nla_put_failure:
return -1;
}
+static void nft_last_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_last_priv *priv = nft_expr_priv(expr);
+
+ kfree(priv->last);
+}
+
+static int nft_last_clone(struct nft_expr *dst, const struct nft_expr *src)
+{
+ struct nft_last_priv *priv_dst = nft_expr_priv(dst);
+
+ priv_dst->last = kzalloc(sizeof(*priv_dst->last), GFP_ATOMIC);
+ if (priv_dst->last)
+ return -ENOMEM;
+
+ return 0;
+}
+
static const struct nft_expr_ops nft_last_ops = {
.type = &nft_last_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_last_priv)),
.eval = nft_last_eval,
.init = nft_last_init,
+ .destroy = nft_last_destroy,
+ .clone = nft_last_clone,
.dump = nft_last_dump,
};
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 82ec27bdf941..f04be5be73a0 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -18,6 +18,10 @@ struct nft_limit {
spinlock_t lock;
u64 last;
u64 tokens;
+};
+
+struct nft_limit_priv {
+ struct nft_limit *limit;
u64 tokens_max;
u64 rate;
u64 nsecs;
@@ -25,33 +29,33 @@ struct nft_limit {
bool invert;
};
-static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
+static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
{
u64 now, tokens;
s64 delta;
- spin_lock_bh(&limit->lock);
+ spin_lock_bh(&priv->limit->lock);
now = ktime_get_ns();
- tokens = limit->tokens + now - limit->last;
- if (tokens > limit->tokens_max)
- tokens = limit->tokens_max;
+ tokens = priv->limit->tokens + now - priv->limit->last;
+ if (tokens > priv->tokens_max)
+ tokens = priv->tokens_max;
- limit->last = now;
+ priv->limit->last = now;
delta = tokens - cost;
if (delta >= 0) {
- limit->tokens = delta;
- spin_unlock_bh(&limit->lock);
- return limit->invert;
+ priv->limit->tokens = delta;
+ spin_unlock_bh(&priv->limit->lock);
+ return priv->invert;
}
- limit->tokens = tokens;
- spin_unlock_bh(&limit->lock);
- return !limit->invert;
+ priv->limit->tokens = tokens;
+ spin_unlock_bh(&priv->limit->lock);
+ return !priv->invert;
}
/* Use same default as in iptables. */
#define NFT_LIMIT_PKT_BURST_DEFAULT 5
-static int nft_limit_init(struct nft_limit *limit,
+static int nft_limit_init(struct nft_limit_priv *priv,
const struct nlattr * const tb[], bool pkts)
{
u64 unit, tokens;
@@ -60,58 +64,62 @@ static int nft_limit_init(struct nft_limit *limit,
tb[NFTA_LIMIT_UNIT] == NULL)
return -EINVAL;
- limit->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+ priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
- limit->nsecs = unit * NSEC_PER_SEC;
- if (limit->rate == 0 || limit->nsecs < unit)
+ priv->nsecs = unit * NSEC_PER_SEC;
+ if (priv->rate == 0 || priv->nsecs < unit)
return -EOVERFLOW;
if (tb[NFTA_LIMIT_BURST])
- limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+ priv->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
- if (pkts && limit->burst == 0)
- limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
+ if (pkts && priv->burst == 0)
+ priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
- if (limit->rate + limit->burst < limit->rate)
+ if (priv->rate + priv->burst < priv->rate)
return -EOVERFLOW;
if (pkts) {
- tokens = div64_u64(limit->nsecs, limit->rate) * limit->burst;
+ tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
} else {
/* The token bucket size limits the number of tokens can be
* accumulated. tokens_max specifies the bucket size.
* tokens_max = unit * (rate + burst) / rate.
*/
- tokens = div64_u64(limit->nsecs * (limit->rate + limit->burst),
- limit->rate);
+ tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
+ priv->rate);
}
- limit->tokens = tokens;
- limit->tokens_max = limit->tokens;
+ priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL);
+ if (!priv->limit)
+ return -ENOMEM;
+
+ priv->limit->tokens = tokens;
+ priv->tokens_max = priv->limit->tokens;
if (tb[NFTA_LIMIT_FLAGS]) {
u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
if (flags & NFT_LIMIT_F_INV)
- limit->invert = true;
+ priv->invert = true;
}
- limit->last = ktime_get_ns();
- spin_lock_init(&limit->lock);
+ priv->limit->last = ktime_get_ns();
+ spin_lock_init(&priv->limit->lock);
return 0;
}
-static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
+static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit_priv *priv,
enum nft_limit_type type)
{
- u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0;
- u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
+ u32 flags = priv->invert ? NFT_LIMIT_F_INV : 0;
+ u64 secs = div_u64(priv->nsecs, NSEC_PER_SEC);
- if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate),
+ if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(priv->rate),
NFTA_LIMIT_PAD) ||
nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
NFTA_LIMIT_PAD) ||
- nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(limit->burst)) ||
+ nla_put_be32(skb, NFTA_LIMIT_BURST, htonl(priv->burst)) ||
nla_put_be32(skb, NFTA_LIMIT_TYPE, htonl(type)) ||
nla_put_be32(skb, NFTA_LIMIT_FLAGS, htonl(flags)))
goto nla_put_failure;
@@ -121,8 +129,34 @@ nla_put_failure:
return -1;
}
-struct nft_limit_pkts {
- struct nft_limit limit;
+static void nft_limit_destroy(const struct nft_ctx *ctx,
+ const struct nft_limit_priv *priv)
+{
+ kfree(priv->limit);
+}
+
+static int nft_limit_clone(struct nft_limit_priv *priv_dst,
+ const struct nft_limit_priv *priv_src)
+{
+ priv_dst->tokens_max = priv_src->tokens_max;
+ priv_dst->rate = priv_src->rate;
+ priv_dst->nsecs = priv_src->nsecs;
+ priv_dst->burst = priv_src->burst;
+ priv_dst->invert = priv_src->invert;
+
+ priv_dst->limit = kmalloc(sizeof(*priv_dst->limit), GFP_ATOMIC);
+ if (priv_dst->limit)
+ return -ENOMEM;
+
+ spin_lock_init(&priv_dst->limit->lock);
+ priv_dst->limit->tokens = priv_src->tokens_max;
+ priv_dst->limit->last = ktime_get_ns();
+
+ return 0;
+}
+
+struct nft_limit_priv_pkts {
+ struct nft_limit_priv limit;
u64 cost;
};
@@ -130,7 +164,7 @@ static void nft_limit_pkts_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
- struct nft_limit_pkts *priv = nft_expr_priv(expr);
+ struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
if (nft_limit_eval(&priv->limit, priv->cost))
regs->verdict.code = NFT_BREAK;
@@ -148,7 +182,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
- struct nft_limit_pkts *priv = nft_expr_priv(expr);
+ struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
int err;
err = nft_limit_init(&priv->limit, tb, true);
@@ -161,17 +195,35 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
static int nft_limit_pkts_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
- const struct nft_limit_pkts *priv = nft_expr_priv(expr);
+ const struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
}
+static void nft_limit_pkts_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ const struct nft_limit_priv_pkts *priv = nft_expr_priv(expr);
+
+ nft_limit_destroy(ctx, &priv->limit);
+}
+
+static int nft_limit_pkts_clone(struct nft_expr *dst, const struct nft_expr *src)
+{
+ struct nft_limit_priv_pkts *priv_dst = nft_expr_priv(dst);
+ struct nft_limit_priv_pkts *priv_src = nft_expr_priv(src);
+
+ return nft_limit_clone(&priv_dst->limit, &priv_src->limit);
+}
+
static struct nft_expr_type nft_limit_type;
static const struct nft_expr_ops nft_limit_pkts_ops = {
.type = &nft_limit_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
.eval = nft_limit_pkts_eval,
.init = nft_limit_pkts_init,
+ .destroy = nft_limit_pkts_destroy,
+ .clone = nft_limit_pkts_clone,
.dump = nft_limit_pkts_dump,
};
@@ -179,7 +231,7 @@ static void nft_limit_bytes_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
- struct nft_limit *priv = nft_expr_priv(expr);
+ struct nft_limit_priv *priv = nft_expr_priv(expr);
u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
if (nft_limit_eval(priv, cost))
@@ -190,7 +242,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
{
- struct nft_limit *priv = nft_expr_priv(expr);
+ struct nft_limit_priv *priv = nft_expr_priv(expr);
return nft_limit_init(priv, tb, false);
}
@@ -198,17 +250,35 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
static int nft_limit_bytes_dump(struct sk_buff *skb,
const struct nft_expr *expr)
{
- const struct nft_limit *priv = nft_expr_priv(expr);
+ const struct nft_limit_priv *priv = nft_expr_priv(expr);
return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
}
+static void nft_limit_bytes_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ const struct nft_limit_priv *priv = nft_expr_priv(expr);
+
+ nft_limit_destroy(ctx, priv);
+}
+
+static int nft_limit_bytes_clone(struct nft_expr *dst, const struct nft_expr *src)
+{
+ struct nft_limit_priv *priv_dst = nft_expr_priv(dst);
+ struct nft_limit_priv *priv_src = nft_expr_priv(src);
+
+ return nft_limit_clone(priv_dst, priv_src);
+}
+
static const struct nft_expr_ops nft_limit_bytes_ops = {
.type = &nft_limit_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_limit)),
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv)),
.eval = nft_limit_bytes_eval,
.init = nft_limit_bytes_init,
.dump = nft_limit_bytes_dump,
+ .clone = nft_limit_bytes_clone,
+ .destroy = nft_limit_bytes_destroy,
};
static const struct nft_expr_ops *
@@ -240,7 +310,7 @@ static void nft_limit_obj_pkts_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
- struct nft_limit_pkts *priv = nft_obj_data(obj);
+ struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
if (nft_limit_eval(&priv->limit, priv->cost))
regs->verdict.code = NFT_BREAK;
@@ -250,7 +320,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
- struct nft_limit_pkts *priv = nft_obj_data(obj);
+ struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
int err;
err = nft_limit_init(&priv->limit, tb, true);
@@ -265,7 +335,7 @@ static int nft_limit_obj_pkts_dump(struct sk_buff *skb,
struct nft_object *obj,
bool reset)
{
- const struct nft_limit_pkts *priv = nft_obj_data(obj);
+ const struct nft_limit_priv_pkts *priv = nft_obj_data(obj);
return nft_limit_dump(skb, &priv->limit, NFT_LIMIT_PKTS);
}
@@ -273,7 +343,7 @@ static int nft_limit_obj_pkts_dump(struct sk_buff *skb,
static struct nft_object_type nft_limit_obj_type;
static const struct nft_object_ops nft_limit_obj_pkts_ops = {
.type = &nft_limit_obj_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_pkts)),
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_limit_priv_pkts)),
.init = nft_limit_obj_pkts_init,
.eval = nft_limit_obj_pkts_eval,
.dump = nft_limit_obj_pkts_dump,
@@ -283,7 +353,7 @@ static void nft_limit_obj_bytes_eval(struct nft_object *obj,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
{
- struct nft_limit *priv = nft_obj_data(obj);
+ struct nft_limit_priv *priv = nft_obj_data(obj);
u64 cost = div64_u64(priv->nsecs * pkt->skb->len, priv->rate);
if (nft_limit_eval(priv, cost))
@@ -294,7 +364,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
- struct nft_limit *priv = nft_obj_data(obj);
+ struct nft_limit_priv *priv = nft_obj_data(obj);
return nft_limit_init(priv, tb, false);
}
@@ -303,7 +373,7 @@ static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
struct nft_object *obj,
bool reset)
{
- const struct nft_limit *priv = nft_obj_data(obj);
+ const struct nft_limit_priv *priv = nft_obj_data(obj);
return nft_limit_dump(skb, priv, NFT_LIMIT_PKT_BYTES);
}
@@ -311,7 +381,7 @@ static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
static struct nft_object_type nft_limit_obj_type;
static const struct nft_object_ops nft_limit_obj_bytes_ops = {
.type = &nft_limit_obj_type,
- .size = sizeof(struct nft_limit),
+ .size = sizeof(struct nft_limit_priv),
.init = nft_limit_obj_bytes_init,
.eval = nft_limit_obj_bytes_eval,
.dump = nft_limit_obj_bytes_dump,
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index fe91ff5f8fbe..5ab4df56c945 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -750,16 +750,63 @@ static int nft_meta_get_offload(struct nft_offload_ctx *ctx,
return 0;
}
+static bool nft_meta_get_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ const struct nft_meta *priv = nft_expr_priv(expr);
+ const struct nft_meta *meta;
+
+ if (!track->regs[priv->dreg].selector ||
+ track->regs[priv->dreg].selector->ops != expr->ops) {
+ track->regs[priv->dreg].selector = expr;
+ track->regs[priv->dreg].bitwise = NULL;
+ return false;
+ }
+
+ meta = nft_expr_priv(track->regs[priv->dreg].selector);
+ if (priv->key != meta->key ||
+ priv->dreg != meta->dreg) {
+ track->regs[priv->dreg].selector = expr;
+ track->regs[priv->dreg].bitwise = NULL;
+ return false;
+ }
+
+ if (!track->regs[priv->dreg].bitwise)
+ return true;
+
+ return nft_expr_reduce_bitwise(track, expr);
+}
+
static const struct nft_expr_ops nft_meta_get_ops = {
.type = &nft_meta_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
.eval = nft_meta_get_eval,
.init = nft_meta_get_init,
.dump = nft_meta_get_dump,
+ .reduce = nft_meta_get_reduce,
.validate = nft_meta_get_validate,
.offload = nft_meta_get_offload,
};
+static bool nft_meta_set_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ int i;
+
+ for (i = 0; i < NFT_REG32_NUM; i++) {
+ if (!track->regs[i].selector)
+ continue;
+
+ if (track->regs[i].selector->ops != &nft_meta_get_ops)
+ continue;
+
+ track->regs[i].selector = NULL;
+ track->regs[i].bitwise = NULL;
+ }
+
+ return false;
+}
+
static const struct nft_expr_ops nft_meta_set_ops = {
.type = &nft_meta_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_meta)),
@@ -767,6 +814,7 @@ static const struct nft_expr_ops nft_meta_set_ops = {
.init = nft_meta_set_init,
.destroy = nft_meta_set_destroy,
.dump = nft_meta_set_dump,
+ .reduce = nft_meta_set_reduce,
.validate = nft_meta_set_validate,
};
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 722cac1e90e0..1d378efd8823 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct rnd_state, nft_numgen_prandom_state);
struct nft_ng_inc {
u8 dreg;
u32 modulus;
- atomic_t counter;
+ atomic_t *counter;
u32 offset;
};
@@ -27,9 +27,9 @@ static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
u32 nval, oval;
do {
- oval = atomic_read(&priv->counter);
+ oval = atomic_read(priv->counter);
nval = (oval + 1 < priv->modulus) ? oval + 1 : 0;
- } while (atomic_cmpxchg(&priv->counter, oval, nval) != oval);
+ } while (atomic_cmpxchg(priv->counter, oval, nval) != oval);
return nval + priv->offset;
}
@@ -55,6 +55,7 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[])
{
struct nft_ng_inc *priv = nft_expr_priv(expr);
+ int err;
if (tb[NFTA_NG_OFFSET])
priv->offset = ntohl(nla_get_be32(tb[NFTA_NG_OFFSET]));
@@ -66,10 +67,22 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
if (priv->offset + priv->modulus - 1 < priv->offset)
return -EOVERFLOW;
- atomic_set(&priv->counter, priv->modulus - 1);
+ priv->counter = kmalloc(sizeof(*priv->counter), GFP_KERNEL);
+ if (!priv->counter)
+ return -ENOMEM;
- return nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
- NULL, NFT_DATA_VALUE, sizeof(u32));
+ atomic_set(priv->counter, priv->modulus - 1);
+
+ err = nft_parse_register_store(ctx, tb[NFTA_NG_DREG], &priv->dreg,
+ NULL, NFT_DATA_VALUE, sizeof(u32));
+ if (err < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(priv->counter);
+
+ return err;
}
static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
@@ -98,6 +111,14 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
priv->offset);
}
+static void nft_ng_inc_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ const struct nft_ng_inc *priv = nft_expr_priv(expr);
+
+ kfree(priv->counter);
+}
+
struct nft_ng_random {
u8 dreg;
u32 modulus;
@@ -157,6 +178,7 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
.eval = nft_ng_inc_eval,
.init = nft_ng_inc_init,
+ .destroy = nft_ng_inc_destroy,
.dump = nft_ng_inc_dump,
};
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index bd689938a2e0..940fed9a760b 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -157,7 +157,8 @@ void nft_payload_eval(const struct nft_expr *expr,
goto err;
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
+ goto err;
}
offset += priv->offset;
@@ -209,6 +210,34 @@ nla_put_failure:
return -1;
}
+static bool nft_payload_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ const struct nft_payload *priv = nft_expr_priv(expr);
+ const struct nft_payload *payload;
+
+ if (!track->regs[priv->dreg].selector ||
+ track->regs[priv->dreg].selector->ops != expr->ops) {
+ track->regs[priv->dreg].selector = expr;
+ track->regs[priv->dreg].bitwise = NULL;
+ return false;
+ }
+
+ payload = nft_expr_priv(track->regs[priv->dreg].selector);
+ if (priv->base != payload->base ||
+ priv->offset != payload->offset ||
+ priv->len != payload->len) {
+ track->regs[priv->dreg].selector = expr;
+ track->regs[priv->dreg].bitwise = NULL;
+ return false;
+ }
+
+ if (!track->regs[priv->dreg].bitwise)
+ return true;
+
+ return nft_expr_reduce_bitwise(track, expr);
+}
+
static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
u32 priv_len, u32 field_len)
{
@@ -512,6 +541,7 @@ static const struct nft_expr_ops nft_payload_ops = {
.eval = nft_payload_eval,
.init = nft_payload_init,
.dump = nft_payload_dump,
+ .reduce = nft_payload_reduce,
.offload = nft_payload_offload,
};
@@ -521,6 +551,7 @@ const struct nft_expr_ops nft_payload_fast_ops = {
.eval = nft_payload_eval,
.init = nft_payload_init,
.dump = nft_payload_dump,
+ .reduce = nft_payload_reduce,
.offload = nft_payload_offload,
};
@@ -546,6 +577,9 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
struct sk_buff *skb,
unsigned int *l4csum_offset)
{
+ if (pkt->fragoff)
+ return -1;
+
switch (pkt->tprot) {
case IPPROTO_TCP:
*l4csum_offset = offsetof(struct tcphdr, check);
@@ -664,7 +698,8 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
goto err;
break;
default:
- BUG();
+ WARN_ON_ONCE(1);
+ goto err;
}
csum_offset = offset + priv->csum_offset;
@@ -766,12 +801,33 @@ nla_put_failure:
return -1;
}
+static bool nft_payload_set_reduce(struct nft_regs_track *track,
+ const struct nft_expr *expr)
+{
+ int i;
+
+ for (i = 0; i < NFT_REG32_NUM; i++) {
+ if (!track->regs[i].selector)
+ continue;
+
+ if (track->regs[i].selector->ops != &nft_payload_ops &&
+ track->regs[i].selector->ops != &nft_payload_fast_ops)
+ continue;
+
+ track->regs[i].selector = NULL;
+ track->regs[i].bitwise = NULL;
+ }
+
+ return false;
+}
+
static const struct nft_expr_ops nft_payload_set_ops = {
.type = &nft_payload_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
.eval = nft_payload_set_eval,
.init = nft_payload_set_init,
.dump = nft_payload_set_dump,
+ .reduce = nft_payload_set_reduce,
};
static const struct nft_expr_ops *
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index c4d1389f7185..0484aef74273 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -15,13 +15,13 @@
struct nft_quota {
atomic64_t quota;
unsigned long flags;
- atomic64_t consumed;
+ atomic64_t *consumed;
};
static inline bool nft_overquota(struct nft_quota *priv,
const struct sk_buff *skb)
{
- return atomic64_add_return(skb->len, &priv->consumed) >=
+ return atomic64_add_return(skb->len, priv->consumed) >=
atomic64_read(&priv->quota);
}
@@ -90,13 +90,23 @@ static int nft_quota_do_init(const struct nlattr * const tb[],
return -EOPNOTSUPP;
}
+ priv->consumed = kmalloc(sizeof(*priv->consumed), GFP_KERNEL);
+ if (!priv->consumed)
+ return -ENOMEM;
+
atomic64_set(&priv->quota, quota);
priv->flags = flags;
- atomic64_set(&priv->consumed, consumed);
+ atomic64_set(priv->consumed, consumed);
return 0;
}
+static void nft_quota_do_destroy(const struct nft_ctx *ctx,
+ struct nft_quota *priv)
+{
+ kfree(priv->consumed);
+}
+
static int nft_quota_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
@@ -128,7 +138,7 @@ static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
* that we see, don't go over the quota boundary in what we send to
* userspace.
*/
- consumed = atomic64_read(&priv->consumed);
+ consumed = atomic64_read(priv->consumed);
quota = atomic64_read(&priv->quota);
if (consumed >= quota) {
consumed_cap = quota;
@@ -145,7 +155,7 @@ static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
goto nla_put_failure;
if (reset) {
- atomic64_sub(consumed, &priv->consumed);
+ atomic64_sub(consumed, priv->consumed);
clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
}
return 0;
@@ -162,11 +172,20 @@ static int nft_quota_obj_dump(struct sk_buff *skb, struct nft_object *obj,
return nft_quota_do_dump(skb, priv, reset);
}
+static void nft_quota_obj_destroy(const struct nft_ctx *ctx,
+ struct nft_object *obj)
+{
+ struct nft_quota *priv = nft_obj_data(obj);
+
+ return nft_quota_do_destroy(ctx, priv);
+}
+
static struct nft_object_type nft_quota_obj_type;
static const struct nft_object_ops nft_quota_obj_ops = {
.type = &nft_quota_obj_type,
.size = sizeof(struct nft_quota),
.init = nft_quota_obj_init,
+ .destroy = nft_quota_obj_destroy,
.eval = nft_quota_obj_eval,
.dump = nft_quota_obj_dump,
.update = nft_quota_obj_update,
@@ -205,12 +224,35 @@ static int nft_quota_dump(struct sk_buff *skb, const struct nft_expr *expr)
return nft_quota_do_dump(skb, priv, false);
}
+static void nft_quota_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ struct nft_quota *priv = nft_expr_priv(expr);
+
+ return nft_quota_do_destroy(ctx, priv);
+}
+
+static int nft_quota_clone(struct nft_expr *dst, const struct nft_expr *src)
+{
+ struct nft_quota *priv_dst = nft_expr_priv(dst);
+
+ priv_dst->consumed = kmalloc(sizeof(*priv_dst->consumed), GFP_ATOMIC);
+ if (priv_dst->consumed)
+ return -ENOMEM;
+
+ atomic64_set(priv_dst->consumed, 0);
+
+ return 0;
+}
+
static struct nft_expr_type nft_quota_type;
static const struct nft_expr_ops nft_quota_ops = {
.type = &nft_quota_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_quota)),
.eval = nft_quota_eval,
.init = nft_quota_init,
+ .destroy = nft_quota_destroy,
+ .clone = nft_quota_clone,
.dump = nft_quota_dump,
};
diff --git a/net/netfilter/nft_reject_netdev.c b/net/netfilter/nft_reject_netdev.c
index d89f68754f42..61cd8c4ac385 100644
--- a/net/netfilter/nft_reject_netdev.c
+++ b/net/netfilter/nft_reject_netdev.c
@@ -4,6 +4,7 @@
* Copyright (c) 2020 Jose M. Guisado <guigom@riseup.net>
*/
+#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index dce866d93fee..2c8051d8cca6 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -1290,6 +1290,11 @@ static struct nft_pipapo_match *pipapo_clone(struct nft_pipapo_match *old)
if (!new->scratch_aligned)
goto out_scratch;
#endif
+ for_each_possible_cpu(i)
+ *per_cpu_ptr(new->scratch, i) = NULL;
+
+ if (pipapo_realloc_scratch(new, old->bsize_max))
+ goto out_scratch_realloc;
rcu_head_init(&new->rcu);
@@ -1334,6 +1339,9 @@ out_lt:
kvfree(dst->lt);
dst--;
}
+out_scratch_realloc:
+ for_each_possible_cpu(i)
+ kfree(*per_cpu_ptr(new->scratch, i));
#ifdef NFT_PIPAPO_ALIGN
free_percpu(new->scratch_aligned);
#endif
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index 6f4116e72958..52e0d026d30a 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -1048,11 +1048,9 @@ static int nft_pipapo_avx2_lookup_slow(unsigned long *map, unsigned long *fill,
struct nft_pipapo_field *f, int offset,
const u8 *pkt, bool first, bool last)
{
- unsigned long *lt = f->lt, bsize = f->bsize;
+ unsigned long bsize = f->bsize;
int i, ret = -1, b;
- lt += offset * NFT_PIPAPO_LONGS_PER_M256;
-
if (first)
memset(map, 0xff, bsize * sizeof(*map));
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0a913ce07425..267757b0392a 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -24,7 +24,7 @@ static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
return XT_CONTINUE;
if (ct) {
- atomic_inc(&ct->ct_general.use);
+ refcount_inc(&ct->ct_general.use);
nf_ct_set(skb, ct, IP_CT_NEW);
} else {
nf_ct_set(skb, ct, IP_CT_UNTRACKED);
@@ -201,7 +201,6 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
goto err4;
}
__set_bit(IPS_CONFIRMED_BIT, &ct->status);
- nf_conntrack_get(&ct->ct_general);
out:
info->ct = ct;
return 0;
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 566ba4397ee4..8490e46359ae 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1537,7 +1537,7 @@ int __init netlbl_unlabel_defconf(void)
/* Only the kernel is allowed to call this function and the only time
* it is called is at bootup before the audit subsystem is reporting
* messages so don't worry to much about these values. */
- security_task_getsecid_subj(current, &audit_info.secid);
+ security_current_getsecid_subj(&audit_info.secid);
audit_info.loginuid = GLOBAL_ROOT_UID;
audit_info.sessionid = 0;
diff --git a/net/netlabel/netlabel_user.h b/net/netlabel/netlabel_user.h
index 6190cbf94bf0..d6c5b31eb4eb 100644
--- a/net/netlabel/netlabel_user.h
+++ b/net/netlabel/netlabel_user.h
@@ -32,7 +32,7 @@
*/
static inline void netlbl_netlink_auditinfo(struct netlbl_audit *audit_info)
{
- security_task_getsecid_subj(current, &audit_info->secid);
+ security_current_getsecid_subj(&audit_info->secid);
audit_info->loginuid = audit_get_loginuid(current);
audit_info->sessionid = audit_get_sessionid(current);
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9eba2e648385..7b344035bfe3 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -20,8 +20,10 @@
#include <linux/module.h>
+#include <linux/bpf.h>
#include <linux/capability.h>
#include <linux/kernel.h>
+#include <linux/filter.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -707,9 +709,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
if (err < 0)
goto out_module;
- local_bh_disable();
sock_prot_inuse_add(net, &netlink_proto, 1);
- local_bh_enable();
nlk = nlk_sk(sock->sk);
nlk->module = module;
@@ -809,9 +809,7 @@ static int netlink_release(struct socket *sock)
netlink_table_ungrab();
}
- local_bh_disable();
sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1);
- local_bh_enable();
call_rcu(&nlk->rcu, deferred_put_nlk_sk);
return 0;
}
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 775064cdd0ee..fa9dc2ba3941 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -298,7 +298,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct nr_sock *nr = nr_sk(sk);
- unsigned long opt;
+ unsigned int opt;
if (level != SOL_NETROM)
return -ENOPROTOOPT;
@@ -306,18 +306,18 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(unsigned int))
return -EINVAL;
- if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
+ if (copy_from_sockptr(&opt, optval, sizeof(opt)))
return -EFAULT;
switch (optname) {
case NETROM_T1:
- if (opt < 1 || opt > ULONG_MAX / HZ)
+ if (opt < 1 || opt > UINT_MAX / HZ)
return -EINVAL;
nr->t1 = opt * HZ;
return 0;
case NETROM_T2:
- if (opt < 1 || opt > ULONG_MAX / HZ)
+ if (opt < 1 || opt > UINT_MAX / HZ)
return -EINVAL;
nr->t2 = opt * HZ;
return 0;
@@ -329,13 +329,13 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
return 0;
case NETROM_T4:
- if (opt < 1 || opt > ULONG_MAX / HZ)
+ if (opt < 1 || opt > UINT_MAX / HZ)
return -EINVAL;
nr->t4 = opt * HZ;
return 0;
case NETROM_IDLE:
- if (opt > ULONG_MAX / (60 * HZ))
+ if (opt > UINT_MAX / (60 * HZ))
return -EINVAL;
nr->idle = opt * 60 * HZ;
return 0;
diff --git a/net/nfc/nci/uart.c b/net/nfc/nci/uart.c
index c027c76d493c..cc8fa9e36159 100644
--- a/net/nfc/nci/uart.c
+++ b/net/nfc/nci/uart.c
@@ -317,14 +317,13 @@ static void nci_uart_tty_receive(struct tty_struct *tty, const u8 *data,
* Arguments:
*
* tty pointer to tty instance data
- * file pointer to open file object for device
* cmd IOCTL command code
* arg argument for IOCTL call (cmd dependent)
*
* Return Value: Command dependent
*/
-static int nci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int nci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct nci_uart *nu = (void *)tty->disc_data;
int err = 0;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 1b5eae57bc90..c07afff57dd3 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -25,6 +25,8 @@
#include <net/netfilter/nf_nat.h>
#endif
+#include <net/netfilter/nf_conntrack_act_ct.h>
+
#include "datapath.h"
#include "conntrack.h"
#include "flow.h"
@@ -574,7 +576,7 @@ ovs_ct_expect_find(struct net *net, const struct nf_conntrack_zone *zone,
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
nf_ct_delete(ct, 0, 0);
- nf_conntrack_put(&ct->ct_general);
+ nf_ct_put(ct);
}
}
@@ -723,7 +725,7 @@ static bool skb_nfct_cached(struct net *net,
if (nf_ct_is_confirmed(ct))
nf_ct_delete(ct, 0, 0);
- nf_conntrack_put(&ct->ct_general);
+ nf_ct_put(ct);
nf_ct_set(skb, NULL, 0);
return false;
}
@@ -967,7 +969,8 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
/* Associate skb with specified zone. */
if (tmpl) {
- nf_conntrack_put(skb_nfct(skb));
+ ct = nf_ct_get(skb, &ctinfo);
+ nf_ct_put(ct);
nf_conntrack_get(&tmpl->ct_general);
nf_ct_set(skb, tmpl, IP_CT_NEW);
}
@@ -1045,6 +1048,8 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
*/
nf_ct_set_tcp_be_liberal(ct);
}
+
+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
}
return 0;
@@ -1245,6 +1250,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
&info->labels.mask);
if (err)
return err;
+
+ nf_conn_act_ct_ext_add(ct);
} else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
labels_nonzero(&info->labels.mask)) {
err = ovs_ct_set_labels(ct, key, &info->labels.value,
@@ -1328,7 +1335,12 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
{
- nf_conntrack_put(skb_nfct(skb));
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &ctinfo);
+
+ nf_ct_put(ct);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
ovs_ct_fill_key(skb, key, false);
@@ -1716,7 +1728,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
goto err_free_ct;
__set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
- nf_conntrack_get(&ct_info.ct->ct_general);
return 0;
err_free_ct:
__ovs_ct_free_action(&ct_info);
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 6d262d9aa10e..02096f2ec678 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -859,7 +859,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
struct tc_skb_ext *tc_ext;
#endif
- bool post_ct = false;
+ bool post_ct = false, post_ct_snat = false, post_ct_dnat = false;
int res, err;
u16 zone = 0;
@@ -900,6 +900,8 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
key->recirc_id = tc_ext ? tc_ext->chain : 0;
OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
post_ct = tc_ext ? tc_ext->post_ct : false;
+ post_ct_snat = post_ct ? tc_ext->post_ct_snat : false;
+ post_ct_dnat = post_ct ? tc_ext->post_ct_dnat : false;
zone = post_ct ? tc_ext->zone : 0;
} else {
key->recirc_id = 0;
@@ -911,8 +913,16 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
err = key_extract(skb, key);
if (!err) {
ovs_ct_fill_key(skb, key, post_ct); /* Must be after key_extract(). */
- if (post_ct && !skb_get_nfct(skb))
- key->ct_zone = zone;
+ if (post_ct) {
+ if (!skb_get_nfct(skb)) {
+ key->ct_zone = zone;
+ } else {
+ if (!post_ct_dnat)
+ key->ct_state &= ~OVS_CS_F_DST_NAT;
+ if (!post_ct_snat)
+ key->ct_state &= ~OVS_CS_F_SRC_NAT;
+ }
+ }
}
return err;
}
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 8e1a88f13622..b498dac4e1e0 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -82,7 +82,7 @@ struct vport *ovs_netdev_link(struct vport *vport, const char *name)
err = -ENODEV;
goto error_free_vport;
}
-
+ netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL);
if (vport->dev->flags & IFF_LOOPBACK ||
(vport->dev->type != ARPHRD_ETHER &&
vport->dev->type != ARPHRD_NONE) ||
@@ -115,7 +115,7 @@ error_master_upper_dev_unlink:
error_unlock:
rtnl_unlock();
error_put:
- dev_put(vport->dev);
+ dev_put_track(vport->dev, &vport->dev_tracker);
error_free_vport:
ovs_vport_free(vport);
return ERR_PTR(err);
@@ -137,8 +137,7 @@ static void vport_netdev_free(struct rcu_head *rcu)
{
struct vport *vport = container_of(rcu, struct vport, rcu);
- if (vport->dev)
- dev_put(vport->dev);
+ dev_put_track(vport->dev, &vport->dev_tracker);
ovs_vport_free(vport);
}
@@ -174,7 +173,7 @@ void ovs_netdev_tunnel_destroy(struct vport *vport)
*/
if (vport->dev->reg_state == NETREG_REGISTERED)
rtnl_delete_link(vport->dev);
- dev_put(vport->dev);
+ dev_put_track(vport->dev, &vport->dev_tracker);
vport->dev = NULL;
rtnl_unlock();
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 8a930ca6d6b1..9de5030d9801 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -58,6 +58,7 @@ struct vport_portids {
/**
* struct vport - one port within a datapath
* @dev: Pointer to net_device.
+ * @dev_tracker: refcount tracker for @dev reference
* @dp: Datapath to which this port belongs.
* @upcall_portids: RCU protected 'struct vport_portids'.
* @port_no: Index into @dp's @ports array.
@@ -69,6 +70,7 @@ struct vport_portids {
*/
struct vport {
struct net_device *dev;
+ netdevice_tracker dev_tracker;
struct datapath *dp;
struct vport_portids __rcu *upcall_portids;
u16 port_no;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 76c2dca7f0a5..5bd409ab4cc2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -49,6 +49,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/ethtool.h>
+#include <linux/filter.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/capability.h>
@@ -3102,16 +3103,14 @@ static int packet_release(struct socket *sock)
sk_del_node_init_rcu(sk);
mutex_unlock(&net->packet.sklist_lock);
- preempt_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
- preempt_enable();
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false);
packet_cached_dev_reset(po);
if (po->prot_hook.dev) {
- dev_put(po->prot_hook.dev);
+ dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
@@ -3163,12 +3162,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
__be16 proto)
{
struct packet_sock *po = pkt_sk(sk);
- struct net_device *dev_curr;
- __be16 proto_curr;
- bool need_rehook;
struct net_device *dev = NULL;
- int ret = 0;
bool unlisted = false;
+ bool need_rehook;
+ int ret = 0;
lock_sock(sk);
spin_lock(&po->bind_lock);
@@ -3193,14 +3190,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
}
}
- dev_hold(dev);
-
- proto_curr = po->prot_hook.type;
- dev_curr = po->prot_hook.dev;
-
- need_rehook = proto_curr != proto || dev_curr != dev;
+ need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
if (need_rehook) {
+ dev_hold(dev);
if (po->running) {
rcu_read_unlock();
/* prevents packet_notifier() from calling
@@ -3209,7 +3202,6 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
WRITE_ONCE(po->num, 0);
__unregister_prot_hook(sk, true);
rcu_read_lock();
- dev_curr = po->prot_hook.dev;
if (dev)
unlisted = !dev_get_by_index_rcu(sock_net(sk),
dev->ifindex);
@@ -3219,18 +3211,21 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
WRITE_ONCE(po->num, proto);
po->prot_hook.type = proto;
+ dev_put_track(po->prot_hook.dev, &po->prot_hook.dev_tracker);
+
if (unlikely(unlisted)) {
- dev_put(dev);
po->prot_hook.dev = NULL;
WRITE_ONCE(po->ifindex, -1);
packet_cached_dev_reset(po);
} else {
+ dev_hold_track(dev, &po->prot_hook.dev_tracker,
+ GFP_ATOMIC);
po->prot_hook.dev = dev;
WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
packet_cached_dev_assign(po, dev);
}
+ dev_put(dev);
}
- dev_put(dev_curr);
if (proto == 0 || !need_rehook)
goto out_unlock;
@@ -3368,9 +3363,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
sk_add_node_tail_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);
- preempt_disable();
sock_prot_inuse_add(net, &packet_proto, 1);
- preempt_enable();
return 0;
out2:
@@ -4142,7 +4135,8 @@ static int packet_notifier(struct notifier_block *this,
if (msg == NETDEV_UNREGISTER) {
packet_cached_dev_reset(po);
WRITE_ONCE(po->ifindex, -1);
- dev_put(po->prot_hook.dev);
+ dev_put_track(po->prot_hook.dev,
+ &po->prot_hook.dev_tracker);
po->prot_hook.dev = NULL;
}
spin_unlock(&po->bind_lock);
diff --git a/net/rds/send.c b/net/rds/send.c
index 53444397de66..0c5504068e3c 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -272,7 +272,7 @@ restart:
/* Unfortunately, the way Infiniband deals with
* RDMA to a bad MR key is by moving the entire
- * queue pair to error state. We cold possibly
+ * queue pair to error state. We could possibly
* recover from that, but right now we drop the
* connection.
* Therefore, we never retransmit messages with RDMA ops.
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ac15a944573f..5b1927d66f0d 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -946,6 +946,18 @@ bool rfkill_blocked(struct rfkill *rfkill)
}
EXPORT_SYMBOL(rfkill_blocked);
+bool rfkill_soft_blocked(struct rfkill *rfkill)
+{
+ unsigned long flags;
+ u32 state;
+
+ spin_lock_irqsave(&rfkill->lock, flags);
+ state = rfkill->state;
+ spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ return !!(state & RFKILL_BLOCK_SW);
+}
+EXPORT_SYMBOL(rfkill_soft_blocked);
struct rfkill * __must_check rfkill_alloc(const char *name,
struct device *parent,
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 6af786d66b03..4d67f36dce1b 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -9,6 +9,7 @@
* diagrams as the code is not obvious and probably very easy to break.
*/
#include <linux/errno.h>
+#include <linux/filter.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 3258da3d5bed..32563cef85bf 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -19,8 +19,10 @@
#include <net/sock.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/act_api.h>
#include <net/netlink.h>
+#include <net/flow_offload.h>
#ifdef CONFIG_INET
DEFINE_STATIC_KEY_FALSE(tcf_frag_xmit_count);
@@ -129,8 +131,244 @@ static void free_tcf(struct tc_action *p)
kfree(p);
}
+static void offload_action_hw_count_set(struct tc_action *act,
+ u32 hw_count)
+{
+ act->in_hw_count = hw_count;
+}
+
+static void offload_action_hw_count_inc(struct tc_action *act,
+ u32 hw_count)
+{
+ act->in_hw_count += hw_count;
+}
+
+static void offload_action_hw_count_dec(struct tc_action *act,
+ u32 hw_count)
+{
+ act->in_hw_count = act->in_hw_count > hw_count ?
+ act->in_hw_count - hw_count : 0;
+}
+
+static unsigned int tcf_offload_act_num_actions_single(struct tc_action *act)
+{
+ if (is_tcf_pedit(act))
+ return tcf_pedit_nkeys(act);
+ else
+ return 1;
+}
+
+static bool tc_act_skip_hw(u32 flags)
+{
+ return (flags & TCA_ACT_FLAGS_SKIP_HW) ? true : false;
+}
+
+static bool tc_act_skip_sw(u32 flags)
+{
+ return (flags & TCA_ACT_FLAGS_SKIP_SW) ? true : false;
+}
+
+static bool tc_act_in_hw(struct tc_action *act)
+{
+ return !!act->in_hw_count;
+}
+
+/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
+static bool tc_act_flags_valid(u32 flags)
+{
+ flags &= TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW;
+
+ return flags ^ (TCA_ACT_FLAGS_SKIP_HW | TCA_ACT_FLAGS_SKIP_SW);
+}
+
+static int offload_action_init(struct flow_offload_action *fl_action,
+ struct tc_action *act,
+ enum offload_act_command cmd,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ fl_action->extack = extack;
+ fl_action->command = cmd;
+ fl_action->index = act->tcfa_index;
+
+ if (act->ops->offload_act_setup) {
+ spin_lock_bh(&act->tcfa_lock);
+ err = act->ops->offload_act_setup(act, fl_action, NULL,
+ false);
+ spin_unlock_bh(&act->tcfa_lock);
+ return err;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int tcf_action_offload_cmd_ex(struct flow_offload_action *fl_act,
+ u32 *hw_count)
+{
+ int err;
+
+ err = flow_indr_dev_setup_offload(NULL, NULL, TC_SETUP_ACT,
+ fl_act, NULL, NULL);
+ if (err < 0)
+ return err;
+
+ if (hw_count)
+ *hw_count = err;
+
+ return 0;
+}
+
+static int tcf_action_offload_cmd_cb_ex(struct flow_offload_action *fl_act,
+ u32 *hw_count,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv)
+{
+ int err;
+
+ err = cb(NULL, NULL, cb_priv, TC_SETUP_ACT, NULL, fl_act, NULL);
+ if (err < 0)
+ return err;
+
+ if (hw_count)
+ *hw_count = 1;
+
+ return 0;
+}
+
+static int tcf_action_offload_cmd(struct flow_offload_action *fl_act,
+ u32 *hw_count,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv)
+{
+ return cb ? tcf_action_offload_cmd_cb_ex(fl_act, hw_count,
+ cb, cb_priv) :
+ tcf_action_offload_cmd_ex(fl_act, hw_count);
+}
+
+static int tcf_action_offload_add_ex(struct tc_action *action,
+ struct netlink_ext_ack *extack,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv)
+{
+ bool skip_sw = tc_act_skip_sw(action->tcfa_flags);
+ struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
+ [0] = action,
+ };
+ struct flow_offload_action *fl_action;
+ u32 in_hw_count = 0;
+ int num, err = 0;
+
+ if (tc_act_skip_hw(action->tcfa_flags))
+ return 0;
+
+ num = tcf_offload_act_num_actions_single(action);
+ fl_action = offload_action_alloc(num);
+ if (!fl_action)
+ return -ENOMEM;
+
+ err = offload_action_init(fl_action, action, FLOW_ACT_REPLACE, extack);
+ if (err)
+ goto fl_err;
+
+ err = tc_setup_action(&fl_action->action, actions);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to setup tc actions for offload\n");
+ goto fl_err;
+ }
+
+ err = tcf_action_offload_cmd(fl_action, &in_hw_count, cb, cb_priv);
+ if (!err)
+ cb ? offload_action_hw_count_inc(action, in_hw_count) :
+ offload_action_hw_count_set(action, in_hw_count);
+
+ if (skip_sw && !tc_act_in_hw(action))
+ err = -EINVAL;
+
+ tc_cleanup_offload_action(&fl_action->action);
+
+fl_err:
+ kfree(fl_action);
+
+ return err;
+}
+
+/* offload the tc action after it is inserted */
+static int tcf_action_offload_add(struct tc_action *action,
+ struct netlink_ext_ack *extack)
+{
+ return tcf_action_offload_add_ex(action, extack, NULL, NULL);
+}
+
+int tcf_action_update_hw_stats(struct tc_action *action)
+{
+ struct flow_offload_action fl_act = {};
+ int err;
+
+ if (!tc_act_in_hw(action))
+ return -EOPNOTSUPP;
+
+ err = offload_action_init(&fl_act, action, FLOW_ACT_STATS, NULL);
+ if (err)
+ return err;
+
+ err = tcf_action_offload_cmd(&fl_act, NULL, NULL, NULL);
+ if (!err) {
+ preempt_disable();
+ tcf_action_stats_update(action, fl_act.stats.bytes,
+ fl_act.stats.pkts,
+ fl_act.stats.drops,
+ fl_act.stats.lastused,
+ true);
+ preempt_enable();
+ action->used_hw_stats = fl_act.stats.used_hw_stats;
+ action->used_hw_stats_valid = true;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(tcf_action_update_hw_stats);
+
+static int tcf_action_offload_del_ex(struct tc_action *action,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv)
+{
+ struct flow_offload_action fl_act = {};
+ u32 in_hw_count = 0;
+ int err = 0;
+
+ if (!tc_act_in_hw(action))
+ return 0;
+
+ err = offload_action_init(&fl_act, action, FLOW_ACT_DESTROY, NULL);
+ if (err)
+ return err;
+
+ err = tcf_action_offload_cmd(&fl_act, &in_hw_count, cb, cb_priv);
+ if (err < 0)
+ return err;
+
+ if (!cb && action->in_hw_count != in_hw_count)
+ return -EINVAL;
+
+ /* do not need to update hw state when deleting action */
+ if (cb && in_hw_count)
+ offload_action_hw_count_dec(action, in_hw_count);
+
+ return 0;
+}
+
+static int tcf_action_offload_del(struct tc_action *action)
+{
+ return tcf_action_offload_del_ex(action, NULL, NULL);
+}
+
static void tcf_action_cleanup(struct tc_action *p)
{
+ tcf_action_offload_del(p);
if (p->ops->cleanup)
p->ops->cleanup(p);
@@ -497,7 +735,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
p->tcfa_tm.install = jiffies;
p->tcfa_tm.lastuse = jiffies;
p->tcfa_tm.firstuse = 0;
- p->tcfa_flags = flags & TCA_ACT_FLAGS_USER_MASK;
+ p->tcfa_flags = flags;
if (est) {
err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
&p->tcfa_rate_est,
@@ -622,6 +860,59 @@ EXPORT_SYMBOL(tcf_idrinfo_destroy);
static LIST_HEAD(act_base);
static DEFINE_RWLOCK(act_mod_lock);
+/* since act ops id is stored in pernet subsystem list,
+ * then there is no way to walk through only all the action
+ * subsystem, so we keep tc action pernet ops id for
+ * reoffload to walk through.
+ */
+static LIST_HEAD(act_pernet_id_list);
+static DEFINE_MUTEX(act_id_mutex);
+struct tc_act_pernet_id {
+ struct list_head list;
+ unsigned int id;
+};
+
+static int tcf_pernet_add_id_list(unsigned int id)
+{
+ struct tc_act_pernet_id *id_ptr;
+ int ret = 0;
+
+ mutex_lock(&act_id_mutex);
+ list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
+ if (id_ptr->id == id) {
+ ret = -EEXIST;
+ goto err_out;
+ }
+ }
+
+ id_ptr = kzalloc(sizeof(*id_ptr), GFP_KERNEL);
+ if (!id_ptr) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+ id_ptr->id = id;
+
+ list_add_tail(&id_ptr->list, &act_pernet_id_list);
+
+err_out:
+ mutex_unlock(&act_id_mutex);
+ return ret;
+}
+
+static void tcf_pernet_del_id_list(unsigned int id)
+{
+ struct tc_act_pernet_id *id_ptr;
+
+ mutex_lock(&act_id_mutex);
+ list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
+ if (id_ptr->id == id) {
+ list_del(&id_ptr->list);
+ kfree(id_ptr);
+ break;
+ }
+ }
+ mutex_unlock(&act_id_mutex);
+}
int tcf_register_action(struct tc_action_ops *act,
struct pernet_operations *ops)
@@ -640,18 +931,31 @@ int tcf_register_action(struct tc_action_ops *act,
if (ret)
return ret;
+ if (ops->id) {
+ ret = tcf_pernet_add_id_list(*ops->id);
+ if (ret)
+ goto err_id;
+ }
+
write_lock(&act_mod_lock);
list_for_each_entry(a, &act_base, head) {
if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
- write_unlock(&act_mod_lock);
- unregister_pernet_subsys(ops);
- return -EEXIST;
+ ret = -EEXIST;
+ goto err_out;
}
}
list_add_tail(&act->head, &act_base);
write_unlock(&act_mod_lock);
return 0;
+
+err_out:
+ write_unlock(&act_mod_lock);
+ if (ops->id)
+ tcf_pernet_del_id_list(*ops->id);
+err_id:
+ unregister_pernet_subsys(ops);
+ return ret;
}
EXPORT_SYMBOL(tcf_register_action);
@@ -670,8 +974,11 @@ int tcf_unregister_action(struct tc_action_ops *act,
}
}
write_unlock(&act_mod_lock);
- if (!err)
+ if (!err) {
unregister_pernet_subsys(ops);
+ if (ops->id)
+ tcf_pernet_del_id_list(*ops->id);
+ }
return err;
}
EXPORT_SYMBOL(tcf_unregister_action);
@@ -735,6 +1042,9 @@ restart_act_graph:
jmp_prgcnt -= 1;
continue;
}
+
+ if (tc_act_skip_sw(a->tcfa_flags))
+ continue;
repeat:
ret = a->ops->act(skb, a, res);
if (ret == TC_ACT_REPEAT)
@@ -821,6 +1131,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
int err = -EINVAL;
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
+ u32 flags;
if (tcf_action_dump_terse(skb, a, false))
goto nla_put_failure;
@@ -835,9 +1146,13 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
goto nla_put_failure;
- if (a->tcfa_flags &&
+ flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
+ if (flags &&
nla_put_bitfield32(skb, TCA_ACT_FLAGS,
- a->tcfa_flags, a->tcfa_flags))
+ flags, flags))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
@@ -919,7 +1234,9 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
[TCA_ACT_COOKIE] = { .type = NLA_BINARY,
.len = TC_COOKIE_MAX_SIZE },
[TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
- [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
+ [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS |
+ TCA_ACT_FLAGS_SKIP_HW |
+ TCA_ACT_FLAGS_SKIP_SW),
[TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
};
@@ -1032,8 +1349,13 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
}
}
hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
- if (tb[TCA_ACT_FLAGS])
+ if (tb[TCA_ACT_FLAGS]) {
userflags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
+ if (!tc_act_flags_valid(userflags.value)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ }
err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, tp,
userflags.value | flags, extack);
@@ -1061,11 +1383,17 @@ err_out:
return ERR_PTR(err);
}
+static bool tc_act_bind(u32 flags)
+{
+ return !!(flags & TCA_ACT_FLAGS_BIND);
+}
+
/* Returns numbers of initialized actions or negative error. */
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est, struct tc_action *actions[],
- int init_res[], size_t *attr_size, u32 flags,
+ int init_res[], size_t *attr_size,
+ u32 flags, u32 fl_flags,
struct netlink_ext_ack *extack)
{
struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
@@ -1103,6 +1431,22 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
sz += tcf_action_fill_size(act);
/* Start from index 0 */
actions[i - 1] = act;
+ if (tc_act_bind(flags)) {
+ bool skip_sw = tc_skip_sw(fl_flags);
+ bool skip_hw = tc_skip_hw(fl_flags);
+
+ if (tc_act_bind(act->tcfa_flags))
+ continue;
+ if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
+ skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
+ err = -EINVAL;
+ goto err;
+ }
+ } else {
+ err = tcf_action_offload_add(act, extack);
+ if (tc_act_skip_sw(act->tcfa_flags) && err)
+ goto err;
+ }
}
/* We have to commit them all together, because if any error happened in
@@ -1154,6 +1498,9 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
if (p == NULL)
goto errout;
+ /* update hw stats for this action */
+ tcf_action_update_hw_stats(p);
+
/* compat_mode being true specifies a call that is supposed
* to add additional backward compatibility statistic TLVs.
*/
@@ -1396,6 +1743,96 @@ static int tcf_action_delete(struct net *net, struct tc_action *actions[])
}
static int
+tcf_reoffload_del_notify(struct net *net, struct tc_action *action)
+{
+ size_t attr_size = tcf_action_fill_size(action);
+ struct tc_action *actions[TCA_ACT_MAX_PRIO] = {
+ [0] = action,
+ };
+ const struct tc_action_ops *ops = action->ops;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
+ GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ if (tca_get_fill(skb, actions, 0, 0, 0, RTM_DELACTION, 0, 1) <= 0) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = tcf_idr_release_unsafe(action);
+ if (ret == ACT_P_DELETED) {
+ module_put(ops->owner);
+ ret = rtnetlink_send(skb, net, 0, RTNLGRP_TC, 0);
+ } else {
+ kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int tcf_action_reoffload_cb(flow_indr_block_bind_cb_t *cb,
+ void *cb_priv, bool add)
+{
+ struct tc_act_pernet_id *id_ptr;
+ struct tcf_idrinfo *idrinfo;
+ struct tc_action_net *tn;
+ struct tc_action *p;
+ unsigned int act_id;
+ unsigned long tmp;
+ unsigned long id;
+ struct idr *idr;
+ struct net *net;
+ int ret;
+
+ if (!cb)
+ return -EINVAL;
+
+ down_read(&net_rwsem);
+ mutex_lock(&act_id_mutex);
+
+ for_each_net(net) {
+ list_for_each_entry(id_ptr, &act_pernet_id_list, list) {
+ act_id = id_ptr->id;
+ tn = net_generic(net, act_id);
+ if (!tn)
+ continue;
+ idrinfo = tn->idrinfo;
+ if (!idrinfo)
+ continue;
+
+ mutex_lock(&idrinfo->lock);
+ idr = &idrinfo->action_idr;
+ idr_for_each_entry_ul(idr, p, tmp, id) {
+ if (IS_ERR(p) || tc_act_bind(p->tcfa_flags))
+ continue;
+ if (add) {
+ tcf_action_offload_add_ex(p, NULL, cb,
+ cb_priv);
+ continue;
+ }
+
+ /* cb unregister to update hw count */
+ ret = tcf_action_offload_del_ex(p, cb, cb_priv);
+ if (ret < 0)
+ continue;
+ if (tc_act_skip_sw(p->tcfa_flags) &&
+ !tc_act_in_hw(p))
+ tcf_reoffload_del_notify(net, p);
+ }
+ mutex_unlock(&idrinfo->lock);
+ }
+ }
+ mutex_unlock(&act_id_mutex);
+ up_read(&net_rwsem);
+
+ return 0;
+}
+
+static int
tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
{
@@ -1508,7 +1945,7 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
for (loop = 0; loop < 10; loop++) {
ret = tcf_action_init(net, NULL, nla, NULL, actions, init_res,
- &attr_size, flags, extack);
+ &attr_size, flags, 0, extack);
if (ret != -EAGAIN)
break;
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index f2bf896331a5..a77d8908e737 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -305,7 +305,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
ret = tcf_idr_check_alloc(tn, &index, act, bind);
if (!ret) {
ret = tcf_idr_create(tn, index, est, act,
- &act_bpf_ops, bind, true, 0);
+ &act_bpf_ops, bind, true, flags);
if (ret < 0) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 94e78ac7a748..09e2aafc8943 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -124,7 +124,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
ret = tcf_idr_check_alloc(tn, &index, a, bind);
if (!ret) {
ret = tcf_idr_create(tn, index, est, a,
- &act_connmark_ops, bind, false, 0);
+ &act_connmark_ops, bind, false, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index a15ec95e69c3..e0f515b774ca 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -695,6 +695,24 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_csum));
}
+static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_CSUM;
+ entry->csum_flags = tcf_csum_update_flags(act);
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_CSUM;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_csum_ops = {
.kind = "csum",
.id = TCA_ID_CSUM,
@@ -706,6 +724,7 @@ static struct tc_action_ops act_csum_ops = {
.walk = tcf_csum_walker,
.lookup = tcf_csum_search,
.get_fill_size = tcf_csum_get_fill_size,
+ .offload_act_setup = tcf_csum_offload_act_setup,
.size = sizeof(struct tcf_csum),
};
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index ab3591408419..f99247fc6468 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -32,6 +32,7 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#include <net/netfilter/nf_conntrack_act_ct.h>
#include <uapi/linux/netfilter/nf_nat.h>
static struct workqueue_struct *act_ct_wq;
@@ -56,6 +57,12 @@ static const struct rhashtable_params zones_params = {
.automatic_shrinking = true,
};
+static struct nf_ct_ext_type act_ct_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_act_ct_ext),
+ .align = __alignof__(struct nf_conn_act_ct_ext),
+ .id = NF_CT_EXT_ACT_CT,
+};
+
static struct flow_action_entry *
tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
{
@@ -358,6 +365,7 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
struct nf_conn *ct,
bool tcp)
{
+ struct nf_conn_act_ct_ext *act_ct_ext;
struct flow_offload *entry;
int err;
@@ -375,6 +383,14 @@ static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
}
+ act_ct_ext = nf_conn_act_ct_ext_find(ct);
+ if (act_ct_ext) {
+ entry->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx =
+ act_ct_ext->ifindex[IP_CT_DIR_ORIGINAL];
+ entry->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.iifidx =
+ act_ct_ext->ifindex[IP_CT_DIR_REPLY];
+ }
+
err = flow_offload_add(&ct_ft->nf_ft, entry);
if (err)
goto err_add;
@@ -393,7 +409,8 @@ static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
{
bool tcp = false;
- if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
+ if ((ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY) ||
+ !test_bit(IPS_ASSURED_BIT, &ct->status))
return;
switch (nf_ct_protonum(ct)) {
@@ -597,7 +614,7 @@ static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
if (nf_ct_is_confirmed(ct))
nf_ct_kill(ct);
- nf_conntrack_put(&ct->ct_general);
+ nf_ct_put(ct);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
return false;
@@ -762,7 +779,7 @@ static void tcf_ct_params_free(struct rcu_head *head)
tcf_ct_flow_table_put(params);
if (params->tmpl)
- nf_conntrack_put(&params->tmpl->ct_general);
+ nf_ct_put(params->tmpl);
kfree(params);
}
@@ -839,6 +856,12 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
}
err = nf_nat_packet(ct, ctinfo, hooknum, skb);
+ if (err == NF_ACCEPT) {
+ if (maniptype == NF_NAT_MANIP_SRC)
+ tc_skb_cb(skb)->post_ct_snat = 1;
+ if (maniptype == NF_NAT_MANIP_DST)
+ tc_skb_cb(skb)->post_ct_dnat = 1;
+ }
out:
return err;
}
@@ -966,7 +989,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
tc_skb_cb(skb)->post_ct = false;
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
- nf_conntrack_put(&ct->ct_general);
+ nf_ct_put(ct);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
}
@@ -1026,6 +1049,7 @@ do_nat:
if (!ct)
goto out_push;
nf_ct_deliver_cached_events(ct);
+ nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
if (err != NF_ACCEPT)
@@ -1035,6 +1059,9 @@ do_nat:
tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
+ if (!nf_ct_is_confirmed(ct))
+ nf_conn_act_ct_ext_add(ct);
+
/* This will take care of sending queued events
* even if the connection is already confirmed.
*/
@@ -1228,7 +1255,6 @@ static int tcf_ct_fill_params(struct net *net,
return -ENOMEM;
}
__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
- nf_conntrack_get(&tmpl->ct_general);
p->tmpl = tmpl;
return 0;
@@ -1493,6 +1519,26 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
}
+static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_CT;
+ entry->ct.action = tcf_ct_action(act);
+ entry->ct.zone = tcf_ct_zone(act);
+ entry->ct.flow_table = tcf_ct_ft(act);
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_CT;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_ct_ops = {
.kind = "ct",
.id = TCA_ID_CT,
@@ -1504,6 +1550,7 @@ static struct tc_action_ops act_ct_ops = {
.walk = tcf_ct_walker,
.lookup = tcf_ct_search,
.stats_update = tcf_stats_update,
+ .offload_act_setup = tcf_ct_offload_act_setup,
.size = sizeof(struct tcf_ct),
};
@@ -1561,10 +1608,16 @@ static int __init ct_init_module(void)
if (err)
goto err_register;
+ err = nf_ct_extend_register(&act_ct_extend);
+ if (err)
+ goto err_register_extend;
+
static_branch_inc(&tcf_frag_xmit_count);
return 0;
+err_register_extend:
+ tcf_unregister_action(&act_ct_ops, &ct_net_ops);
err_register:
tcf_ct_flow_tables_uninit();
err_tbl_init:
@@ -1575,6 +1628,7 @@ err_tbl_init:
static void __exit ct_cleanup_module(void)
{
static_branch_dec(&tcf_frag_xmit_count);
+ nf_ct_extend_unregister(&act_ct_extend);
tcf_unregister_action(&act_ct_ops, &ct_net_ops);
tcf_ct_flow_tables_uninit();
destroy_workqueue(act_ct_wq);
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 549374a2d008..0281e45987a4 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -212,7 +212,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
ret = tcf_idr_create(tn, index, est, a,
- &act_ctinfo_ops, bind, false, 0);
+ &act_ctinfo_ops, bind, false, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index d8dce173df37..bde6a6c01e64 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -252,6 +252,43 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
return sz;
}
+static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ if (is_tcf_gact_ok(act)) {
+ entry->id = FLOW_ACTION_ACCEPT;
+ } else if (is_tcf_gact_shot(act)) {
+ entry->id = FLOW_ACTION_DROP;
+ } else if (is_tcf_gact_trap(act)) {
+ entry->id = FLOW_ACTION_TRAP;
+ } else if (is_tcf_gact_goto_chain(act)) {
+ entry->id = FLOW_ACTION_GOTO;
+ entry->chain_index = tcf_gact_goto_chain_index(act);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ if (is_tcf_gact_ok(act))
+ fl_action->id = FLOW_ACTION_ACCEPT;
+ else if (is_tcf_gact_shot(act))
+ fl_action->id = FLOW_ACTION_DROP;
+ else if (is_tcf_gact_trap(act))
+ fl_action->id = FLOW_ACTION_TRAP;
+ else if (is_tcf_gact_goto_chain(act))
+ fl_action->id = FLOW_ACTION_GOTO;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
.id = TCA_ID_GACT,
@@ -263,6 +300,7 @@ static struct tc_action_ops act_gact_ops = {
.walk = tcf_gact_walker,
.lookup = tcf_gact_search,
.get_fill_size = tcf_gact_get_fill_size,
+ .offload_act_setup = tcf_gact_offload_act_setup,
.size = sizeof(struct tcf_gact),
};
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 7df72a4197a3..d56e73843a4b 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -357,7 +357,7 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (!err) {
ret = tcf_idr_create(tn, index, est, a,
- &act_gate_ops, bind, false, 0);
+ &act_gate_ops, bind, false, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -597,6 +597,54 @@ static size_t tcf_gate_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_gate));
}
+static void tcf_gate_entry_destructor(void *priv)
+{
+ struct action_gate_entry *oe = priv;
+
+ kfree(oe);
+}
+
+static int tcf_gate_get_entries(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ entry->gate.entries = tcf_gate_get_list(act);
+
+ if (!entry->gate.entries)
+ return -EINVAL;
+
+ entry->destructor = tcf_gate_entry_destructor;
+ entry->destructor_priv = entry->gate.entries;
+
+ return 0;
+}
+
+static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ int err;
+
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_GATE;
+ entry->gate.prio = tcf_gate_prio(act);
+ entry->gate.basetime = tcf_gate_basetime(act);
+ entry->gate.cycletime = tcf_gate_cycletime(act);
+ entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
+ entry->gate.num_entries = tcf_gate_num_entries(act);
+ err = tcf_gate_get_entries(entry, act);
+ if (err)
+ return err;
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_GATE;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_gate_ops = {
.kind = "gate",
.id = TCA_ID_GATE,
@@ -609,6 +657,7 @@ static struct tc_action_ops act_gate_ops = {
.stats_update = tcf_gate_stats_update,
.get_fill_size = tcf_gate_get_fill_size,
.lookup = tcf_gate_search,
+ .offload_act_setup = tcf_gate_offload_act_setup,
.size = sizeof(struct tcf_gate),
};
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index b757f90a2d58..41ba55e60b1b 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -553,7 +553,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
- bind, true, 0);
+ bind, true, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
kfree(p);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 265b1443e252..2f3d507c24a1 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -145,7 +145,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a, ops, bind,
- false, 0);
+ false, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index efc963ab995a..39acd1d18609 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -79,7 +79,7 @@ static void tcf_mirred_release(struct tc_action *a)
/* last reference to action, no need to lock */
dev = rcu_dereference_protected(m->tcfm_dev, 1);
- dev_put(dev);
+ dev_put_track(dev, &m->tcfm_dev_tracker);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -101,7 +101,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
bool mac_header_xmit = false;
struct tc_mirred *parm;
struct tcf_mirred *m;
- struct net_device *dev;
bool exists = false;
int ret, err;
u32 index;
@@ -171,16 +170,19 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
spin_lock_bh(&m->tcf_lock);
if (parm->ifindex) {
- dev = dev_get_by_index(net, parm->ifindex);
- if (!dev) {
+ struct net_device *odev, *ndev;
+
+ ndev = dev_get_by_index(net, parm->ifindex);
+ if (!ndev) {
spin_unlock_bh(&m->tcf_lock);
err = -ENODEV;
goto put_chain;
}
- mac_header_xmit = dev_is_mac_header_xmit(dev);
- dev = rcu_replace_pointer(m->tcfm_dev, dev,
+ mac_header_xmit = dev_is_mac_header_xmit(ndev);
+ odev = rcu_replace_pointer(m->tcfm_dev, ndev,
lockdep_is_held(&m->tcf_lock));
- dev_put(dev);
+ dev_put_track(odev, &m->tcfm_dev_tracker);
+ netdev_tracker_alloc(ndev, &m->tcfm_dev_tracker, GFP_ATOMIC);
m->tcfm_mac_header_xmit = mac_header_xmit;
}
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
@@ -400,7 +402,7 @@ static int mirred_device_event(struct notifier_block *unused,
list_for_each_entry(m, &mirred_list, tcfm_list) {
spin_lock_bh(&m->tcf_lock);
if (tcf_mirred_dev_dereference(m) == dev) {
- dev_put(dev);
+ dev_put_track(dev, &m->tcfm_dev_tracker);
/* Note : no rcu grace period necessary, as
* net_device are already rcu protected.
*/
@@ -448,6 +450,55 @@ static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
return nla_total_size(sizeof(struct tc_mirred));
}
+static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ entry->dev = act->ops->get_dev(act, &entry->destructor);
+ if (!entry->dev)
+ return;
+ entry->destructor_priv = entry->dev;
+}
+
+static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ if (is_tcf_mirred_egress_redirect(act)) {
+ entry->id = FLOW_ACTION_REDIRECT;
+ tcf_offload_mirred_get_dev(entry, act);
+ } else if (is_tcf_mirred_egress_mirror(act)) {
+ entry->id = FLOW_ACTION_MIRRED;
+ tcf_offload_mirred_get_dev(entry, act);
+ } else if (is_tcf_mirred_ingress_redirect(act)) {
+ entry->id = FLOW_ACTION_REDIRECT_INGRESS;
+ tcf_offload_mirred_get_dev(entry, act);
+ } else if (is_tcf_mirred_ingress_mirror(act)) {
+ entry->id = FLOW_ACTION_MIRRED_INGRESS;
+ tcf_offload_mirred_get_dev(entry, act);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ if (is_tcf_mirred_egress_redirect(act))
+ fl_action->id = FLOW_ACTION_REDIRECT;
+ else if (is_tcf_mirred_egress_mirror(act))
+ fl_action->id = FLOW_ACTION_MIRRED;
+ else if (is_tcf_mirred_ingress_redirect(act))
+ fl_action->id = FLOW_ACTION_REDIRECT_INGRESS;
+ else if (is_tcf_mirred_ingress_mirror(act))
+ fl_action->id = FLOW_ACTION_MIRRED_INGRESS;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
.id = TCA_ID_MIRRED,
@@ -460,6 +511,7 @@ static struct tc_action_ops act_mirred_ops = {
.walk = tcf_mirred_walker,
.lookup = tcf_mirred_search,
.get_fill_size = tcf_mirred_get_fill_size,
+ .offload_act_setup = tcf_mirred_offload_act_setup,
.size = sizeof(struct tcf_mirred),
.get_dev = tcf_mirred_get_dev,
};
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 8faa4c58305e..b9ff3459fdab 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -248,7 +248,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_mpls_ops, bind, true, 0);
+ &act_mpls_ops, bind, true, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -384,6 +384,57 @@ static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ switch (tcf_mpls_action(act)) {
+ case TCA_MPLS_ACT_PUSH:
+ entry->id = FLOW_ACTION_MPLS_PUSH;
+ entry->mpls_push.proto = tcf_mpls_proto(act);
+ entry->mpls_push.label = tcf_mpls_label(act);
+ entry->mpls_push.tc = tcf_mpls_tc(act);
+ entry->mpls_push.bos = tcf_mpls_bos(act);
+ entry->mpls_push.ttl = tcf_mpls_ttl(act);
+ break;
+ case TCA_MPLS_ACT_POP:
+ entry->id = FLOW_ACTION_MPLS_POP;
+ entry->mpls_pop.proto = tcf_mpls_proto(act);
+ break;
+ case TCA_MPLS_ACT_MODIFY:
+ entry->id = FLOW_ACTION_MPLS_MANGLE;
+ entry->mpls_mangle.label = tcf_mpls_label(act);
+ entry->mpls_mangle.tc = tcf_mpls_tc(act);
+ entry->mpls_mangle.bos = tcf_mpls_bos(act);
+ entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ switch (tcf_mpls_action(act)) {
+ case TCA_MPLS_ACT_PUSH:
+ fl_action->id = FLOW_ACTION_MPLS_PUSH;
+ break;
+ case TCA_MPLS_ACT_POP:
+ fl_action->id = FLOW_ACTION_MPLS_POP;
+ break;
+ case TCA_MPLS_ACT_MODIFY:
+ fl_action->id = FLOW_ACTION_MPLS_MANGLE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_mpls_ops = {
.kind = "mpls",
.id = TCA_ID_MPLS,
@@ -394,6 +445,7 @@ static struct tc_action_ops act_mpls_ops = {
.cleanup = tcf_mpls_cleanup,
.walk = tcf_mpls_walker,
.lookup = tcf_mpls_search,
+ .offload_act_setup = tcf_mpls_offload_act_setup,
.size = sizeof(struct tcf_mpls),
};
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 7dd6b586ba7f..2a39b3729e84 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -61,7 +61,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
ret = tcf_idr_create(tn, index, est, a,
- &act_nat_ops, bind, false, 0);
+ &act_nat_ops, bind, false, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index c6c862c459cc..31fcd279c177 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -189,7 +189,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
ret = tcf_idr_create(tn, index, est, a,
- &act_pedit_ops, bind, false, 0);
+ &act_pedit_ops, bind, false, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
goto out_free;
@@ -487,6 +487,39 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+ int k;
+
+ for (k = 0; k < tcf_pedit_nkeys(act); k++) {
+ switch (tcf_pedit_cmd(act, k)) {
+ case TCA_PEDIT_KEY_EX_CMD_SET:
+ entry->id = FLOW_ACTION_MANGLE;
+ break;
+ case TCA_PEDIT_KEY_EX_CMD_ADD:
+ entry->id = FLOW_ACTION_ADD;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ entry->mangle.htype = tcf_pedit_htype(act, k);
+ entry->mangle.mask = tcf_pedit_mask(act, k);
+ entry->mangle.val = tcf_pedit_val(act, k);
+ entry->mangle.offset = tcf_pedit_offset(act, k);
+ entry->hw_stats = tc_act_hw_stats(act->hw_stats);
+ entry++;
+ }
+ *index_inc = k;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
.id = TCA_ID_PEDIT,
@@ -498,6 +531,7 @@ static struct tc_action_ops act_pedit_ops = {
.init = tcf_pedit_init,
.walk = tcf_pedit_walker,
.lookup = tcf_pedit_search,
+ .offload_act_setup = tcf_pedit_offload_act_setup,
.size = sizeof(struct tcf_pedit),
};
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 9e77ba8401e5..0923aa2b8f8a 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -90,7 +90,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, NULL, a,
- &act_police_ops, bind, true, 0);
+ &act_police_ops, bind, true, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -405,6 +405,30 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_POLICE;
+ entry->police.burst = tcf_police_burst(act);
+ entry->police.rate_bytes_ps =
+ tcf_police_rate_bytes_ps(act);
+ entry->police.burst_pkt = tcf_police_burst_pkt(act);
+ entry->police.rate_pkt_ps =
+ tcf_police_rate_pkt_ps(act);
+ entry->police.mtu = tcf_police_tcfp_mtu(act);
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_POLICE;
+ }
+
+ return 0;
+}
+
MODULE_AUTHOR("Alexey Kuznetsov");
MODULE_DESCRIPTION("Policing actions");
MODULE_LICENSE("GPL");
@@ -420,6 +444,7 @@ static struct tc_action_ops act_police_ops = {
.walk = tcf_police_walker,
.lookup = tcf_police_search,
.cleanup = tcf_police_cleanup,
+ .offload_act_setup = tcf_police_offload_act_setup,
.size = sizeof(struct tcf_police),
};
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index ce859b0e0deb..9a22cdda6bbd 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -70,7 +70,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_sample_ops, bind, true, 0);
+ &act_sample_ops, bind, true, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -282,6 +282,35 @@ tcf_sample_get_group(const struct tc_action *a,
return group;
}
+static void tcf_offload_sample_get_group(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ entry->sample.psample_group =
+ act->ops->get_psample_group(act, &entry->destructor);
+ entry->destructor_priv = entry->sample.psample_group;
+}
+
+static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ entry->id = FLOW_ACTION_SAMPLE;
+ entry->sample.trunc_size = tcf_sample_trunc_size(act);
+ entry->sample.truncate = tcf_sample_truncate(act);
+ entry->sample.rate = tcf_sample_rate(act);
+ tcf_offload_sample_get_group(entry, act);
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ fl_action->id = FLOW_ACTION_SAMPLE;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_sample_ops = {
.kind = "sample",
.id = TCA_ID_SAMPLE,
@@ -294,6 +323,7 @@ static struct tc_action_ops act_sample_ops = {
.walk = tcf_sample_walker,
.lookup = tcf_sample_search,
.get_psample_group = tcf_sample_get_group,
+ .offload_act_setup = tcf_sample_offload_act_setup,
.size = sizeof(struct tcf_sample),
};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index e617ab4505ca..8c1d60bde93e 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -129,7 +129,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_simp_ops, bind, false, 0);
+ &act_simp_ops, bind, false, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index d30ecbfc8f84..ceba11b198bb 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -176,7 +176,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_skbedit_ops, bind, true, 0);
+ &act_skbedit_ops, bind, true, act_flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
@@ -327,6 +327,41 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
+ nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
}
+static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ if (is_tcf_skbedit_mark(act)) {
+ entry->id = FLOW_ACTION_MARK;
+ entry->mark = tcf_skbedit_mark(act);
+ } else if (is_tcf_skbedit_ptype(act)) {
+ entry->id = FLOW_ACTION_PTYPE;
+ entry->ptype = tcf_skbedit_ptype(act);
+ } else if (is_tcf_skbedit_priority(act)) {
+ entry->id = FLOW_ACTION_PRIORITY;
+ entry->priority = tcf_skbedit_priority(act);
+ } else {
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ if (is_tcf_skbedit_mark(act))
+ fl_action->id = FLOW_ACTION_MARK;
+ else if (is_tcf_skbedit_ptype(act))
+ fl_action->id = FLOW_ACTION_PTYPE;
+ else if (is_tcf_skbedit_priority(act))
+ fl_action->id = FLOW_ACTION_PRIORITY;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.id = TCA_ID_SKBEDIT,
@@ -339,6 +374,7 @@ static struct tc_action_ops act_skbedit_ops = {
.walk = tcf_skbedit_walker,
.get_fill_size = tcf_skbedit_get_fill_size,
.lookup = tcf_skbedit_search,
+ .offload_act_setup = tcf_skbedit_offload_act_setup,
.size = sizeof(struct tcf_skbedit),
};
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 9b6b52c5e24e..2083612d8780 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -168,7 +168,7 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, index, est, a,
- &act_skbmod_ops, bind, true, 0);
+ &act_skbmod_ops, bind, true, flags);
if (ret) {
tcf_idr_cleanup(tn, index);
return ret;
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index d9cd174eecb7..23aba03d26a8 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -787,6 +787,59 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static void tcf_tunnel_encap_put_tunnel(void *priv)
+{
+ struct ip_tunnel_info *tunnel = priv;
+
+ kfree(tunnel);
+}
+
+static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
+ const struct tc_action *act)
+{
+ entry->tunnel = tcf_tunnel_info_copy(act);
+ if (!entry->tunnel)
+ return -ENOMEM;
+ entry->destructor = tcf_tunnel_encap_put_tunnel;
+ entry->destructor_priv = entry->tunnel;
+ return 0;
+}
+
+static int tcf_tunnel_key_offload_act_setup(struct tc_action *act,
+ void *entry_data,
+ u32 *index_inc,
+ bool bind)
+{
+ int err;
+
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ if (is_tcf_tunnel_set(act)) {
+ entry->id = FLOW_ACTION_TUNNEL_ENCAP;
+ err = tcf_tunnel_encap_get_tunnel(entry, act);
+ if (err)
+ return err;
+ } else if (is_tcf_tunnel_release(act)) {
+ entry->id = FLOW_ACTION_TUNNEL_DECAP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ if (is_tcf_tunnel_set(act))
+ fl_action->id = FLOW_ACTION_TUNNEL_ENCAP;
+ else if (is_tcf_tunnel_release(act))
+ fl_action->id = FLOW_ACTION_TUNNEL_DECAP;
+ else
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_tunnel_key_ops = {
.kind = "tunnel_key",
.id = TCA_ID_TUNNEL_KEY,
@@ -797,6 +850,7 @@ static struct tc_action_ops act_tunnel_key_ops = {
.cleanup = tunnel_key_release,
.walk = tunnel_key_walker,
.lookup = tunnel_key_search,
+ .offload_act_setup = tcf_tunnel_key_offload_act_setup,
.size = sizeof(struct tcf_tunnel_key),
};
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index e4dc5a555bd8..756e2dcde1cd 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -368,6 +368,53 @@ static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
+ nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
}
+static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data,
+ u32 *index_inc, bool bind)
+{
+ if (bind) {
+ struct flow_action_entry *entry = entry_data;
+
+ switch (tcf_vlan_action(act)) {
+ case TCA_VLAN_ACT_PUSH:
+ entry->id = FLOW_ACTION_VLAN_PUSH;
+ entry->vlan.vid = tcf_vlan_push_vid(act);
+ entry->vlan.proto = tcf_vlan_push_proto(act);
+ entry->vlan.prio = tcf_vlan_push_prio(act);
+ break;
+ case TCA_VLAN_ACT_POP:
+ entry->id = FLOW_ACTION_VLAN_POP;
+ break;
+ case TCA_VLAN_ACT_MODIFY:
+ entry->id = FLOW_ACTION_VLAN_MANGLE;
+ entry->vlan.vid = tcf_vlan_push_vid(act);
+ entry->vlan.proto = tcf_vlan_push_proto(act);
+ entry->vlan.prio = tcf_vlan_push_prio(act);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ *index_inc = 1;
+ } else {
+ struct flow_offload_action *fl_action = entry_data;
+
+ switch (tcf_vlan_action(act)) {
+ case TCA_VLAN_ACT_PUSH:
+ fl_action->id = FLOW_ACTION_VLAN_PUSH;
+ break;
+ case TCA_VLAN_ACT_POP:
+ fl_action->id = FLOW_ACTION_VLAN_POP;
+ break;
+ case TCA_VLAN_ACT_MODIFY:
+ fl_action->id = FLOW_ACTION_VLAN_MANGLE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
+}
+
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.id = TCA_ID_VLAN,
@@ -380,6 +427,7 @@ static struct tc_action_ops act_vlan_ops = {
.stats_update = tcf_vlan_stats_update,
.get_fill_size = tcf_vlan_get_fill_size,
.lookup = tcf_vlan_search,
+ .offload_act_setup = tcf_vlan_offload_act_setup,
.size = sizeof(struct tcf_vlan),
};
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 35c74bdde848..d4e27c679123 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1625,6 +1625,8 @@ int tcf_classify(struct sk_buff *skb,
ext->chain = last_executed_chain;
ext->mru = cb->mru;
ext->post_ct = cb->post_ct;
+ ext->post_ct_snat = cb->post_ct_snat;
+ ext->post_ct_dnat = cb->post_ct_dnat;
ext->zone = cb->zone;
}
@@ -3028,9 +3030,9 @@ void tcf_exts_destroy(struct tcf_exts *exts)
}
EXPORT_SYMBOL(tcf_exts_destroy);
-int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
- struct nlattr *rate_tlv, struct tcf_exts *exts,
- u32 flags, struct netlink_ext_ack *extack)
+int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ struct nlattr *rate_tlv, struct tcf_exts *exts,
+ u32 flags, u32 fl_flags, struct netlink_ext_ack *extack)
{
#ifdef CONFIG_NET_CLS_ACT
{
@@ -3064,7 +3066,8 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
flags |= TCA_ACT_FLAGS_BIND;
err = tcf_action_init(net, tp, tb[exts->action],
rate_tlv, exts->actions, init_res,
- &attr_size, flags, extack);
+ &attr_size, flags, fl_flags,
+ extack);
if (err < 0)
return err;
exts->nr_actions = err;
@@ -3080,6 +3083,15 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
return 0;
}
+EXPORT_SYMBOL(tcf_exts_validate_ex);
+
+int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ struct nlattr *rate_tlv, struct tcf_exts *exts,
+ u32 flags, struct netlink_ext_ack *extack)
+{
+ return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts,
+ flags, 0, extack);
+}
EXPORT_SYMBOL(tcf_exts_validate);
void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
@@ -3323,7 +3335,7 @@ err_unlock:
up_read(&block->cb_lock);
if (take_rtnl)
rtnl_unlock();
- return ok_count < 0 ? ok_count : 0;
+ return min(ok_count, 0);
}
EXPORT_SYMBOL(tc_setup_cb_add);
@@ -3379,7 +3391,7 @@ err_unlock:
up_read(&block->cb_lock);
if (take_rtnl)
rtnl_unlock();
- return ok_count < 0 ? ok_count : 0;
+ return min(ok_count, 0);
}
EXPORT_SYMBOL(tc_setup_cb_replace);
@@ -3417,7 +3429,7 @@ retry:
up_read(&block->cb_lock);
if (take_rtnl)
rtnl_unlock();
- return ok_count < 0 ? ok_count : 0;
+ return min(ok_count, 0);
}
EXPORT_SYMBOL(tc_setup_cb_destroy);
@@ -3464,7 +3476,7 @@ static void tcf_act_put_cookie(struct flow_action_entry *entry)
flow_action_cookie_destroy(entry->cookie);
}
-void tc_cleanup_flow_action(struct flow_action *flow_action)
+void tc_cleanup_offload_action(struct flow_action *flow_action)
{
struct flow_action_entry *entry;
int i;
@@ -3475,93 +3487,37 @@ void tc_cleanup_flow_action(struct flow_action *flow_action)
entry->destructor(entry->destructor_priv);
}
}
-EXPORT_SYMBOL(tc_cleanup_flow_action);
+EXPORT_SYMBOL(tc_cleanup_offload_action);
-static void tcf_mirred_get_dev(struct flow_action_entry *entry,
- const struct tc_action *act)
+static int tc_setup_offload_act(struct tc_action *act,
+ struct flow_action_entry *entry,
+ u32 *index_inc)
{
#ifdef CONFIG_NET_CLS_ACT
- entry->dev = act->ops->get_dev(act, &entry->destructor);
- if (!entry->dev)
- return;
- entry->destructor_priv = entry->dev;
-#endif
-}
-
-static void tcf_tunnel_encap_put_tunnel(void *priv)
-{
- struct ip_tunnel_info *tunnel = priv;
-
- kfree(tunnel);
-}
-
-static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
- const struct tc_action *act)
-{
- entry->tunnel = tcf_tunnel_info_copy(act);
- if (!entry->tunnel)
- return -ENOMEM;
- entry->destructor = tcf_tunnel_encap_put_tunnel;
- entry->destructor_priv = entry->tunnel;
+ if (act->ops->offload_act_setup)
+ return act->ops->offload_act_setup(act, entry, index_inc, true);
+ else
+ return -EOPNOTSUPP;
+#else
return 0;
-}
-
-static void tcf_sample_get_group(struct flow_action_entry *entry,
- const struct tc_action *act)
-{
-#ifdef CONFIG_NET_CLS_ACT
- entry->sample.psample_group =
- act->ops->get_psample_group(act, &entry->destructor);
- entry->destructor_priv = entry->sample.psample_group;
#endif
}
-static void tcf_gate_entry_destructor(void *priv)
-{
- struct action_gate_entry *oe = priv;
-
- kfree(oe);
-}
-
-static int tcf_gate_get_entries(struct flow_action_entry *entry,
- const struct tc_action *act)
-{
- entry->gate.entries = tcf_gate_get_list(act);
-
- if (!entry->gate.entries)
- return -EINVAL;
-
- entry->destructor = tcf_gate_entry_destructor;
- entry->destructor_priv = entry->gate.entries;
-
- return 0;
-}
-
-static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
-{
- if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
- return FLOW_ACTION_HW_STATS_DONT_CARE;
- else if (!hw_stats)
- return FLOW_ACTION_HW_STATS_DISABLED;
-
- return hw_stats;
-}
-
-int tc_setup_flow_action(struct flow_action *flow_action,
- const struct tcf_exts *exts)
+int tc_setup_action(struct flow_action *flow_action,
+ struct tc_action *actions[])
{
+ int i, j, index, err = 0;
struct tc_action *act;
- int i, j, k, err = 0;
BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
- if (!exts)
+ if (!actions)
return 0;
j = 0;
- tcf_exts_for_each_action(i, act, exts) {
+ tcf_act_for_each_action(i, act, actions) {
struct flow_action_entry *entry;
entry = &flow_action->entries[j];
@@ -3571,165 +3527,39 @@ int tc_setup_flow_action(struct flow_action *flow_action,
goto err_out_locked;
entry->hw_stats = tc_act_hw_stats(act->hw_stats);
-
- if (is_tcf_gact_ok(act)) {
- entry->id = FLOW_ACTION_ACCEPT;
- } else if (is_tcf_gact_shot(act)) {
- entry->id = FLOW_ACTION_DROP;
- } else if (is_tcf_gact_trap(act)) {
- entry->id = FLOW_ACTION_TRAP;
- } else if (is_tcf_gact_goto_chain(act)) {
- entry->id = FLOW_ACTION_GOTO;
- entry->chain_index = tcf_gact_goto_chain_index(act);
- } else if (is_tcf_mirred_egress_redirect(act)) {
- entry->id = FLOW_ACTION_REDIRECT;
- tcf_mirred_get_dev(entry, act);
- } else if (is_tcf_mirred_egress_mirror(act)) {
- entry->id = FLOW_ACTION_MIRRED;
- tcf_mirred_get_dev(entry, act);
- } else if (is_tcf_mirred_ingress_redirect(act)) {
- entry->id = FLOW_ACTION_REDIRECT_INGRESS;
- tcf_mirred_get_dev(entry, act);
- } else if (is_tcf_mirred_ingress_mirror(act)) {
- entry->id = FLOW_ACTION_MIRRED_INGRESS;
- tcf_mirred_get_dev(entry, act);
- } else if (is_tcf_vlan(act)) {
- switch (tcf_vlan_action(act)) {
- case TCA_VLAN_ACT_PUSH:
- entry->id = FLOW_ACTION_VLAN_PUSH;
- entry->vlan.vid = tcf_vlan_push_vid(act);
- entry->vlan.proto = tcf_vlan_push_proto(act);
- entry->vlan.prio = tcf_vlan_push_prio(act);
- break;
- case TCA_VLAN_ACT_POP:
- entry->id = FLOW_ACTION_VLAN_POP;
- break;
- case TCA_VLAN_ACT_MODIFY:
- entry->id = FLOW_ACTION_VLAN_MANGLE;
- entry->vlan.vid = tcf_vlan_push_vid(act);
- entry->vlan.proto = tcf_vlan_push_proto(act);
- entry->vlan.prio = tcf_vlan_push_prio(act);
- break;
- default:
- err = -EOPNOTSUPP;
- goto err_out_locked;
- }
- } else if (is_tcf_tunnel_set(act)) {
- entry->id = FLOW_ACTION_TUNNEL_ENCAP;
- err = tcf_tunnel_encap_get_tunnel(entry, act);
- if (err)
- goto err_out_locked;
- } else if (is_tcf_tunnel_release(act)) {
- entry->id = FLOW_ACTION_TUNNEL_DECAP;
- } else if (is_tcf_pedit(act)) {
- for (k = 0; k < tcf_pedit_nkeys(act); k++) {
- switch (tcf_pedit_cmd(act, k)) {
- case TCA_PEDIT_KEY_EX_CMD_SET:
- entry->id = FLOW_ACTION_MANGLE;
- break;
- case TCA_PEDIT_KEY_EX_CMD_ADD:
- entry->id = FLOW_ACTION_ADD;
- break;
- default:
- err = -EOPNOTSUPP;
- goto err_out_locked;
- }
- entry->mangle.htype = tcf_pedit_htype(act, k);
- entry->mangle.mask = tcf_pedit_mask(act, k);
- entry->mangle.val = tcf_pedit_val(act, k);
- entry->mangle.offset = tcf_pedit_offset(act, k);
- entry->hw_stats = tc_act_hw_stats(act->hw_stats);
- entry = &flow_action->entries[++j];
- }
- } else if (is_tcf_csum(act)) {
- entry->id = FLOW_ACTION_CSUM;
- entry->csum_flags = tcf_csum_update_flags(act);
- } else if (is_tcf_skbedit_mark(act)) {
- entry->id = FLOW_ACTION_MARK;
- entry->mark = tcf_skbedit_mark(act);
- } else if (is_tcf_sample(act)) {
- entry->id = FLOW_ACTION_SAMPLE;
- entry->sample.trunc_size = tcf_sample_trunc_size(act);
- entry->sample.truncate = tcf_sample_truncate(act);
- entry->sample.rate = tcf_sample_rate(act);
- tcf_sample_get_group(entry, act);
- } else if (is_tcf_police(act)) {
- entry->id = FLOW_ACTION_POLICE;
- entry->police.burst = tcf_police_burst(act);
- entry->police.rate_bytes_ps =
- tcf_police_rate_bytes_ps(act);
- entry->police.burst_pkt = tcf_police_burst_pkt(act);
- entry->police.rate_pkt_ps =
- tcf_police_rate_pkt_ps(act);
- entry->police.mtu = tcf_police_tcfp_mtu(act);
- entry->police.index = act->tcfa_index;
- } else if (is_tcf_ct(act)) {
- entry->id = FLOW_ACTION_CT;
- entry->ct.action = tcf_ct_action(act);
- entry->ct.zone = tcf_ct_zone(act);
- entry->ct.flow_table = tcf_ct_ft(act);
- } else if (is_tcf_mpls(act)) {
- switch (tcf_mpls_action(act)) {
- case TCA_MPLS_ACT_PUSH:
- entry->id = FLOW_ACTION_MPLS_PUSH;
- entry->mpls_push.proto = tcf_mpls_proto(act);
- entry->mpls_push.label = tcf_mpls_label(act);
- entry->mpls_push.tc = tcf_mpls_tc(act);
- entry->mpls_push.bos = tcf_mpls_bos(act);
- entry->mpls_push.ttl = tcf_mpls_ttl(act);
- break;
- case TCA_MPLS_ACT_POP:
- entry->id = FLOW_ACTION_MPLS_POP;
- entry->mpls_pop.proto = tcf_mpls_proto(act);
- break;
- case TCA_MPLS_ACT_MODIFY:
- entry->id = FLOW_ACTION_MPLS_MANGLE;
- entry->mpls_mangle.label = tcf_mpls_label(act);
- entry->mpls_mangle.tc = tcf_mpls_tc(act);
- entry->mpls_mangle.bos = tcf_mpls_bos(act);
- entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
- break;
- default:
- err = -EOPNOTSUPP;
- goto err_out_locked;
- }
- } else if (is_tcf_skbedit_ptype(act)) {
- entry->id = FLOW_ACTION_PTYPE;
- entry->ptype = tcf_skbedit_ptype(act);
- } else if (is_tcf_skbedit_priority(act)) {
- entry->id = FLOW_ACTION_PRIORITY;
- entry->priority = tcf_skbedit_priority(act);
- } else if (is_tcf_gate(act)) {
- entry->id = FLOW_ACTION_GATE;
- entry->gate.index = tcf_gate_index(act);
- entry->gate.prio = tcf_gate_prio(act);
- entry->gate.basetime = tcf_gate_basetime(act);
- entry->gate.cycletime = tcf_gate_cycletime(act);
- entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
- entry->gate.num_entries = tcf_gate_num_entries(act);
- err = tcf_gate_get_entries(entry, act);
- if (err)
- goto err_out_locked;
- } else {
- err = -EOPNOTSUPP;
+ entry->hw_index = act->tcfa_index;
+ index = 0;
+ err = tc_setup_offload_act(act, entry, &index);
+ if (!err)
+ j += index;
+ else
goto err_out_locked;
- }
spin_unlock_bh(&act->tcfa_lock);
-
- if (!is_tcf_pedit(act))
- j++;
}
err_out:
if (err)
- tc_cleanup_flow_action(flow_action);
+ tc_cleanup_offload_action(flow_action);
return err;
err_out_locked:
spin_unlock_bh(&act->tcfa_lock);
goto err_out;
}
-EXPORT_SYMBOL(tc_setup_flow_action);
+
+int tc_setup_offload_action(struct flow_action *flow_action,
+ const struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (!exts)
+ return 0;
+
+ return tc_setup_action(flow_action, exts->actions);
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(tc_setup_offload_action);
unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
{
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index ef54ed395874..1a9b1f140f9e 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -463,7 +463,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
cls_flower.rule->match.key = &f->mkey;
cls_flower.classid = f->res.classid;
- err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+ err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
if (err) {
kfree(cls_flower.rule);
if (skip_sw) {
@@ -475,7 +475,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower,
skip_sw, &f->flags, &f->in_hw_count, rtnl_held);
- tc_cleanup_flow_action(&cls_flower.rule->action);
+ tc_cleanup_offload_action(&cls_flower.rule->action);
kfree(cls_flower.rule);
if (err) {
@@ -503,12 +503,12 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false,
rtnl_held);
- tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
- cls_flower.stats.pkts,
- cls_flower.stats.drops,
- cls_flower.stats.lastused,
- cls_flower.stats.used_hw_stats,
- cls_flower.stats.used_hw_stats_valid);
+ tcf_exts_hw_stats_update(&f->exts, cls_flower.stats.bytes,
+ cls_flower.stats.pkts,
+ cls_flower.stats.drops,
+ cls_flower.stats.lastused,
+ cls_flower.stats.used_hw_stats,
+ cls_flower.stats.used_hw_stats_valid);
}
static void __fl_put(struct cls_fl_filter *f)
@@ -1919,12 +1919,14 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp,
struct cls_fl_filter *f, struct fl_flow_mask *mask,
unsigned long base, struct nlattr **tb,
struct nlattr *est,
- struct fl_flow_tmplt *tmplt, u32 flags,
+ struct fl_flow_tmplt *tmplt,
+ u32 flags, u32 fl_flags,
struct netlink_ext_ack *extack)
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
+ err = tcf_exts_validate_ex(net, tp, tb, est, &f->exts, flags,
+ fl_flags, extack);
if (err < 0)
return err;
@@ -2038,7 +2040,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
}
err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE],
- tp->chain->tmplt_priv, flags, extack);
+ tp->chain->tmplt_priv, flags, fnew->flags,
+ extack);
if (err)
goto errout;
@@ -2268,7 +2271,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
cls_flower.rule->match.mask = &f->mask->key;
cls_flower.rule->match.key = &f->mkey;
- err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+ err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts);
if (err) {
kfree(cls_flower.rule);
if (tc_skip_sw(f->flags)) {
@@ -2285,7 +2288,7 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
TC_SETUP_CLSFLOWER, &cls_flower,
cb_priv, &f->flags,
&f->in_hw_count);
- tc_cleanup_flow_action(&cls_flower.rule->action);
+ tc_cleanup_offload_action(&cls_flower.rule->action);
kfree(cls_flower.rule);
if (err) {
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 24f0046ce0b3..ca5670fd5228 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -97,7 +97,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
cls_mall.command = TC_CLSMATCHALL_REPLACE;
cls_mall.cookie = cookie;
- err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
+ err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
mall_destroy_hw_filter(tp, head, cookie, NULL);
@@ -111,7 +111,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp,
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
skip_sw, &head->flags, &head->in_hw_count, true);
- tc_cleanup_flow_action(&cls_mall.rule->action);
+ tc_cleanup_offload_action(&cls_mall.rule->action);
kfree(cls_mall.rule);
if (err) {
@@ -163,12 +163,13 @@ static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
struct cls_mall_head *head,
unsigned long base, struct nlattr **tb,
- struct nlattr *est, u32 flags,
+ struct nlattr *est, u32 flags, u32 fl_flags,
struct netlink_ext_ack *extack)
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &head->exts, flags, extack);
+ err = tcf_exts_validate_ex(net, tp, tb, est, &head->exts, flags,
+ fl_flags, extack);
if (err < 0)
return err;
@@ -226,8 +227,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
goto err_alloc_percpu;
}
- err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], flags,
- extack);
+ err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE],
+ flags, new->flags, extack);
if (err)
goto err_set_parms;
@@ -301,7 +302,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
cls_mall.cookie = (unsigned long)head;
- err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
+ err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts);
if (err) {
kfree(cls_mall.rule);
if (add && tc_skip_sw(head->flags)) {
@@ -314,7 +315,7 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
&cls_mall, cb_priv, &head->flags,
&head->in_hw_count);
- tc_cleanup_flow_action(&cls_mall.rule->action);
+ tc_cleanup_offload_action(&cls_mall.rule->action);
kfree(cls_mall.rule);
if (err)
@@ -336,11 +337,11 @@ static void mall_stats_hw_filter(struct tcf_proto *tp,
tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
- tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
- cls_mall.stats.pkts, cls_mall.stats.drops,
- cls_mall.stats.lastused,
- cls_mall.stats.used_hw_stats,
- cls_mall.stats.used_hw_stats_valid);
+ tcf_exts_hw_stats_update(&head->exts, cls_mall.stats.bytes,
+ cls_mall.stats.pkts, cls_mall.stats.drops,
+ cls_mall.stats.lastused,
+ cls_mall.stats.used_hw_stats,
+ cls_mall.stats.used_hw_stats_valid);
}
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4272814487f0..cf5649292ee0 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -709,12 +709,13 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
static int u32_set_parms(struct net *net, struct tcf_proto *tp,
unsigned long base,
struct tc_u_knode *n, struct nlattr **tb,
- struct nlattr *est, u32 flags,
+ struct nlattr *est, u32 flags, u32 fl_flags,
struct netlink_ext_ack *extack)
{
int err;
- err = tcf_exts_validate(net, tp, tb, est, &n->exts, flags, extack);
+ err = tcf_exts_validate_ex(net, tp, tb, est, &n->exts, flags,
+ fl_flags, extack);
if (err < 0)
return err;
@@ -895,7 +896,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
return -ENOMEM;
err = u32_set_parms(net, tp, base, new, tb,
- tca[TCA_RATE], flags, extack);
+ tca[TCA_RATE], flags, new->flags,
+ extack);
if (err) {
u32_destroy_key(new, false);
@@ -1060,8 +1062,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
}
#endif
- err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], flags,
- extack);
+ err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE],
+ flags, n->flags, extack);
if (err == 0) {
struct tc_u_knode __rcu **ins;
struct tc_u_knode *pins;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index efcd0b5e9a32..c9c6f49f9c28 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1292,7 +1292,7 @@ err_out5:
if (ops->destroy)
ops->destroy(sch);
err_out3:
- dev_put(dev);
+ dev_put_track(dev, &sch->dev_tracker);
qdisc_free(sch);
err_out2:
module_put(ops->owner);
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 857aaebd49f4..a43a58a73d09 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -2342,9 +2342,7 @@ static int cake_config_precedence(struct Qdisc *sch)
/* List of known Diffserv codepoints:
*
- * Least Effort (CS1, LE)
- * Best Effort (CS0)
- * Max Reliability & LLT "Lo" (TOS1)
+ * Default Forwarding (DF/CS0) - Best Effort
* Max Throughput (TOS2)
* Min Delay (TOS4)
* LLT "La" (TOS5)
@@ -2352,6 +2350,7 @@ static int cake_config_precedence(struct Qdisc *sch)
* Assured Forwarding 2 (AF2x) - x3
* Assured Forwarding 3 (AF3x) - x3
* Assured Forwarding 4 (AF4x) - x3
+ * Precedence Class 1 (CS1)
* Precedence Class 2 (CS2)
* Precedence Class 3 (CS3)
* Precedence Class 4 (CS4)
@@ -2360,8 +2359,9 @@ static int cake_config_precedence(struct Qdisc *sch)
* Precedence Class 7 (CS7)
* Voice Admit (VA)
* Expedited Forwarding (EF)
-
- * Total 25 codepoints.
+ * Lower Effort (LE)
+ *
+ * Total 26 codepoints.
*/
/* List of traffic classes in RFC 4594, updated by RFC 8622:
@@ -2375,12 +2375,12 @@ static int cake_config_precedence(struct Qdisc *sch)
* Realtime Interactive (CS4) - eg. games
* Multimedia Streaming (AF3x) - eg. YouTube, NetFlix, Twitch
* Broadcast Video (CS3)
- * Low Latency Data (AF2x,TOS4) - eg. database
- * Ops, Admin, Management (CS2,TOS1) - eg. ssh
- * Standard Service (CS0 & unrecognised codepoints)
- * High Throughput Data (AF1x,TOS2) - eg. web traffic
- * Low Priority Data (CS1,LE) - eg. BitTorrent
-
+ * Low-Latency Data (AF2x,TOS4) - eg. database
+ * Ops, Admin, Management (CS2) - eg. ssh
+ * Standard Service (DF & unrecognised codepoints)
+ * High-Throughput Data (AF1x,TOS2) - eg. web traffic
+ * Low-Priority Data (LE,CS1) - eg. BitTorrent
+ *
* Total 12 traffic classes.
*/
@@ -2390,12 +2390,12 @@ static int cake_config_diffserv8(struct Qdisc *sch)
*
* Network Control (CS6, CS7)
* Minimum Latency (EF, VA, CS5, CS4)
- * Interactive Shell (CS2, TOS1)
+ * Interactive Shell (CS2)
* Low Latency Transactions (AF2x, TOS4)
* Video Streaming (AF4x, AF3x, CS3)
- * Bog Standard (CS0 etc.)
- * High Throughput (AF1x, TOS2)
- * Background Traffic (CS1, LE)
+ * Bog Standard (DF etc.)
+ * High Throughput (AF1x, TOS2, CS1)
+ * Background Traffic (LE)
*
* Total 8 traffic classes.
*/
@@ -2437,9 +2437,9 @@ static int cake_config_diffserv4(struct Qdisc *sch)
/* Further pruned list of traffic classes for four-class system:
*
* Latency Sensitive (CS7, CS6, EF, VA, CS5, CS4)
- * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2, TOS1)
- * Best Effort (CS0, AF1x, TOS2, and those not specified)
- * Background Traffic (CS1, LE)
+ * Streaming Media (AF4x, AF3x, CS3, AF2x, TOS4, CS2)
+ * Best Effort (DF, AF1x, TOS2, and those not specified)
+ * Background Traffic (LE, CS1)
*
* Total 4 traffic classes.
*/
@@ -2477,9 +2477,9 @@ static int cake_config_diffserv4(struct Qdisc *sch)
static int cake_config_diffserv3(struct Qdisc *sch)
{
/* Simplified Diffserv structure with 3 tins.
- * Low Priority (CS1, LE)
+ * Latency Sensitive (CS7, CS6, EF, VA, TOS4)
* Best Effort
- * Latency Sensitive (TOS4, VA, EF, CS6, CS7)
+ * Low Priority (LE, CS1)
*/
struct cake_sched_data *q = qdisc_priv(sch);
u32 mtu = psched_mtu(qdisc_dev(sch));
diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
index 5ded4c8672a6..a9bd0a235890 100644
--- a/net/sched/sch_frag.c
+++ b/net/sched/sch_frag.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+#include <linux/if_vlan.h>
#include <net/netlink.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3b0f62095803..b07bd1c7330f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -434,9 +434,9 @@ unsigned long dev_trans_start(struct net_device *dev)
dev = vlan_dev_real_dev(dev);
else if (netif_is_macvlan(dev))
dev = macvlan_dev_real_dev(dev);
- res = netdev_get_tx_queue(dev, 0)->trans_start;
+ res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
for (i = 1; i < dev->num_tx_queues; i++) {
- val = netdev_get_tx_queue(dev, i)->trans_start;
+ val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
if (val && time_after(val, res))
res = val;
}
@@ -445,11 +445,63 @@ unsigned long dev_trans_start(struct net_device *dev)
}
EXPORT_SYMBOL(dev_trans_start);
+static void netif_freeze_queues(struct net_device *dev)
+{
+ unsigned int i;
+ int cpu;
+
+ cpu = smp_processor_id();
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* We are the only thread of execution doing a
+ * freeze, but we have to grab the _xmit_lock in
+ * order to synchronize with threads which are in
+ * the ->hard_start_xmit() handler and already
+ * checked the frozen bit.
+ */
+ __netif_tx_lock(txq, cpu);
+ set_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ __netif_tx_unlock(txq);
+ }
+}
+
+void netif_tx_lock(struct net_device *dev)
+{
+ spin_lock(&dev->tx_global_lock);
+ netif_freeze_queues(dev);
+}
+EXPORT_SYMBOL(netif_tx_lock);
+
+static void netif_unfreeze_queues(struct net_device *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+ /* No need to grab the _xmit_lock here. If the
+ * queue is not stopped for another reason, we
+ * force a schedule.
+ */
+ clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
+ netif_schedule_queue(txq);
+ }
+}
+
+void netif_tx_unlock(struct net_device *dev)
+{
+ netif_unfreeze_queues(dev);
+ spin_unlock(&dev->tx_global_lock);
+}
+EXPORT_SYMBOL(netif_tx_unlock);
+
static void dev_watchdog(struct timer_list *t)
{
struct net_device *dev = from_timer(dev, t, watchdog_timer);
+ bool release = true;
- netif_tx_lock(dev);
+ spin_lock(&dev->tx_global_lock);
if (!qdisc_tx_is_noop(dev)) {
if (netif_device_present(dev) &&
netif_running(dev) &&
@@ -462,31 +514,34 @@ static void dev_watchdog(struct timer_list *t)
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
- trans_start = txq->trans_start;
+ trans_start = READ_ONCE(txq->trans_start);
if (netif_xmit_stopped(txq) &&
time_after(jiffies, (trans_start +
dev->watchdog_timeo))) {
some_queue_timedout = 1;
- txq->trans_timeout++;
+ atomic_long_inc(&txq->trans_timeout);
break;
}
}
- if (some_queue_timedout) {
+ if (unlikely(some_queue_timedout)) {
trace_net_dev_xmit_timeout(dev, i);
WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
dev->name, netdev_drivername(dev), i);
+ netif_freeze_queues(dev);
dev->netdev_ops->ndo_tx_timeout(dev, i);
+ netif_unfreeze_queues(dev);
}
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies +
dev->watchdog_timeo)))
- dev_hold(dev);
+ release = false;
}
}
- netif_tx_unlock(dev);
+ spin_unlock(&dev->tx_global_lock);
- dev_put(dev);
+ if (release)
+ dev_put_track(dev, &dev->watchdog_dev_tracker);
}
void __netdev_watchdog_up(struct net_device *dev)
@@ -496,7 +551,7 @@ void __netdev_watchdog_up(struct net_device *dev)
dev->watchdog_timeo = 5*HZ;
if (!mod_timer(&dev->watchdog_timer,
round_jiffies(jiffies + dev->watchdog_timeo)))
- dev_hold(dev);
+ dev_hold_track(dev, &dev->watchdog_dev_tracker, GFP_ATOMIC);
}
}
EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
@@ -510,7 +565,7 @@ static void dev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
if (del_timer(&dev->watchdog_timer))
- dev_put(dev);
+ dev_put_track(dev, &dev->watchdog_dev_tracker);
netif_tx_unlock_bh(dev);
}
@@ -920,7 +975,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
sch->enqueue = ops->enqueue;
sch->dequeue = ops->dequeue;
sch->dev_queue = dev_queue;
- dev_hold(dev);
+ dev_hold_track(dev, &sch->dev_tracker, GFP_KERNEL);
refcount_set(&sch->refcnt, 1);
return sch;
@@ -1020,7 +1075,7 @@ static void qdisc_destroy(struct Qdisc *qdisc)
ops->destroy(qdisc);
module_put(ops->owner);
- dev_put(qdisc_dev(qdisc));
+ dev_put_track(qdisc_dev(qdisc), &qdisc->dev_tracker);
trace_qdisc_destroy(qdisc);
@@ -1148,7 +1203,7 @@ static void transition_one_qdisc(struct net_device *dev,
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
if (need_watchdog_p) {
- dev_queue->trans_start = 0;
+ WRITE_ONCE(dev_queue->trans_start, 0);
*need_watchdog_p = 1;
}
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ecbb10db1111..ed4ccef5d6a8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -208,17 +208,17 @@ static bool loss_4state(struct netem_sched_data *q)
* next state and if the next packet has to be transmitted or lost.
* The four states correspond to:
* TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
- * LOST_IN_BURST_PERIOD => isolated losses within a gap period
- * LOST_IN_GAP_PERIOD => lost packets within a burst period
- * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
+ * LOST_IN_GAP_PERIOD => isolated losses within a gap period
+ * LOST_IN_BURST_PERIOD => lost packets within a burst period
+ * TX_IN_BURST_PERIOD => successfully transmitted packets within a burst period
*/
switch (clg->state) {
case TX_IN_GAP_PERIOD:
if (rnd < clg->a4) {
- clg->state = LOST_IN_BURST_PERIOD;
+ clg->state = LOST_IN_GAP_PERIOD;
return true;
} else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
- clg->state = LOST_IN_GAP_PERIOD;
+ clg->state = LOST_IN_BURST_PERIOD;
return true;
} else if (clg->a1 + clg->a4 < rnd) {
clg->state = TX_IN_GAP_PERIOD;
@@ -227,24 +227,24 @@ static bool loss_4state(struct netem_sched_data *q)
break;
case TX_IN_BURST_PERIOD:
if (rnd < clg->a5) {
- clg->state = LOST_IN_GAP_PERIOD;
+ clg->state = LOST_IN_BURST_PERIOD;
return true;
} else {
clg->state = TX_IN_BURST_PERIOD;
}
break;
- case LOST_IN_GAP_PERIOD:
+ case LOST_IN_BURST_PERIOD:
if (rnd < clg->a3)
clg->state = TX_IN_BURST_PERIOD;
else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
clg->state = TX_IN_GAP_PERIOD;
} else if (clg->a2 + clg->a3 < rnd) {
- clg->state = LOST_IN_GAP_PERIOD;
+ clg->state = LOST_IN_BURST_PERIOD;
return true;
}
break;
- case LOST_IN_BURST_PERIOD:
+ case LOST_IN_GAP_PERIOD:
clg->state = TX_IN_GAP_PERIOD;
break;
}
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 0b7f9ba28deb..d4ce58c90f9f 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1421,10 +1421,8 @@ static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
if (err < 0)
return err;
- if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
- max_classes = QFQ_MAX_AGG_CLASSES;
- else
- max_classes = qdisc_dev(sch)->tx_queue_len + 1;
+ max_classes = min_t(u64, (u64)qdisc_dev(sch)->tx_queue_len + 1,
+ QFQ_MAX_AGG_CLASSES);
/* max_cl_shift = floor(log_2(max_classes)) */
max_cl_shift = __fls(max_classes);
q->max_agg_classes = 1<<max_cl_shift;
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index a7d623171501..034e2c74497d 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -245,48 +245,44 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+ 64;
}
-static int sctp_tsp_dump_one(struct sctp_transport *tsp, void *p)
+static int sctp_sock_dump_one(struct sctp_endpoint *ep, struct sctp_transport *tsp, void *p)
{
struct sctp_association *assoc = tsp->asoc;
- struct sock *sk = tsp->asoc->base.sk;
struct sctp_comm_param *commp = p;
- struct sk_buff *in_skb = commp->skb;
+ struct sock *sk = ep->base.sk;
const struct inet_diag_req_v2 *req = commp->r;
- const struct nlmsghdr *nlh = commp->nlh;
- struct net *net = sock_net(in_skb->sk);
+ struct sk_buff *skb = commp->skb;
struct sk_buff *rep;
int err;
err = sock_diag_check_cookie(sk, req->id.idiag_cookie);
if (err)
- goto out;
+ return err;
- err = -ENOMEM;
rep = nlmsg_new(inet_assoc_attr_size(assoc), GFP_KERNEL);
if (!rep)
- goto out;
+ return -ENOMEM;
lock_sock(sk);
- if (sk != assoc->base.sk) {
- release_sock(sk);
- sk = assoc->base.sk;
- lock_sock(sk);
+ if (ep != assoc->ep) {
+ err = -EAGAIN;
+ goto out;
}
- err = inet_sctp_diag_fill(sk, assoc, rep, req,
- sk_user_ns(NETLINK_CB(in_skb).sk),
- NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, nlh,
- commp->net_admin);
- release_sock(sk);
+
+ err = inet_sctp_diag_fill(sk, assoc, rep, req, sk_user_ns(NETLINK_CB(skb).sk),
+ NETLINK_CB(skb).portid, commp->nlh->nlmsg_seq, 0,
+ commp->nlh, commp->net_admin);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
- kfree_skb(rep);
goto out;
}
+ release_sock(sk);
- err = nlmsg_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid);
+ return nlmsg_unicast(sock_net(skb->sk)->diag_nlsk, rep, NETLINK_CB(skb).portid);
out:
+ release_sock(sk);
+ kfree_skb(rep);
return err;
}
@@ -429,15 +425,15 @@ static void sctp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
static int sctp_diag_dump_one(struct netlink_callback *cb,
const struct inet_diag_req_v2 *req)
{
- struct sk_buff *in_skb = cb->skb;
- struct net *net = sock_net(in_skb->sk);
+ struct sk_buff *skb = cb->skb;
+ struct net *net = sock_net(skb->sk);
const struct nlmsghdr *nlh = cb->nlh;
union sctp_addr laddr, paddr;
struct sctp_comm_param commp = {
- .skb = in_skb,
+ .skb = skb,
.r = req,
.nlh = nlh,
- .net_admin = netlink_net_capable(in_skb, CAP_NET_ADMIN),
+ .net_admin = netlink_net_capable(skb, CAP_NET_ADMIN),
};
if (req->sdiag_family == AF_INET) {
@@ -460,7 +456,7 @@ static int sctp_diag_dump_one(struct netlink_callback *cb,
paddr.v6.sin6_family = AF_INET6;
}
- return sctp_transport_lookup_process(sctp_tsp_dump_one,
+ return sctp_transport_lookup_process(sctp_sock_dump_one,
net, &laddr, &paddr, &commp);
}
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 1f1786021d9c..90e12bafdd48 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -746,23 +746,21 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
struct sock *sk = ep->base.sk;
struct net *net = sock_net(sk);
struct sctp_hashbucket *head;
- struct sctp_ep_common *epb;
- epb = &ep->base;
- epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port);
- head = &sctp_ep_hashtable[epb->hashent];
+ ep->hashent = sctp_ep_hashfn(net, ep->base.bind_addr.port);
+ head = &sctp_ep_hashtable[ep->hashent];
if (sk->sk_reuseport) {
bool any = sctp_is_ep_boundall(sk);
- struct sctp_ep_common *epb2;
+ struct sctp_endpoint *ep2;
struct list_head *list;
int cnt = 0, err = 1;
list_for_each(list, &ep->base.bind_addr.address_list)
cnt++;
- sctp_for_each_hentry(epb2, &head->chain) {
- struct sock *sk2 = epb2->sk;
+ sctp_for_each_hentry(ep2, &head->chain) {
+ struct sock *sk2 = ep2->base.sk;
if (!net_eq(sock_net(sk2), net) || sk2 == sk ||
!uid_eq(sock_i_uid(sk2), sock_i_uid(sk)) ||
@@ -789,7 +787,7 @@ static int __sctp_hash_endpoint(struct sctp_endpoint *ep)
}
write_lock(&head->lock);
- hlist_add_head(&epb->node, &head->chain);
+ hlist_add_head(&ep->node, &head->chain);
write_unlock(&head->lock);
return 0;
}
@@ -811,19 +809,16 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
{
struct sock *sk = ep->base.sk;
struct sctp_hashbucket *head;
- struct sctp_ep_common *epb;
- epb = &ep->base;
+ ep->hashent = sctp_ep_hashfn(sock_net(sk), ep->base.bind_addr.port);
- epb->hashent = sctp_ep_hashfn(sock_net(sk), epb->bind_addr.port);
-
- head = &sctp_ep_hashtable[epb->hashent];
+ head = &sctp_ep_hashtable[ep->hashent];
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
write_lock(&head->lock);
- hlist_del_init(&epb->node);
+ hlist_del_init(&ep->node);
write_unlock(&head->lock);
}
@@ -856,7 +851,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
const union sctp_addr *paddr)
{
struct sctp_hashbucket *head;
- struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
struct sock *sk;
__be16 lport;
@@ -866,8 +860,7 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(
hash = sctp_ep_hashfn(net, ntohs(lport));
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
- sctp_for_each_hentry(epb, &head->chain) {
- ep = sctp_ep(epb);
+ sctp_for_each_hentry(ep, &head->chain) {
if (sctp_endpoint_is_match(ep, net, laddr))
goto hit;
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index cdfdbd353c67..72fe6669c50d 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -134,7 +134,7 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
dst_hold(tp->dst);
sk_setup_caps(sk, tp->dst);
}
- packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
+ packet->max_size = sk_can_gso(sk) ? READ_ONCE(tp->dst->dev->gso_max_size)
: asoc->pathmtu;
rcu_read_unlock();
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index ff47091c385e..a18609f608fb 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -547,6 +547,9 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_assoc_update_retran_path(transport->asoc);
transport->asoc->rtx_data_chunks +=
transport->asoc->unack_data;
+ if (transport->pl.state == SCTP_PL_COMPLETE &&
+ transport->asoc->unack_data)
+ sctp_transport_reset_probe_timer(transport);
break;
case SCTP_RTXR_FAST_RTX:
SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 982a87b3e11f..f13d6a34f32f 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -161,7 +161,6 @@ static void *sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static int sctp_eps_seq_show(struct seq_file *seq, void *v)
{
struct sctp_hashbucket *head;
- struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
struct sock *sk;
int hash = *(loff_t *)v;
@@ -171,18 +170,17 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
head = &sctp_ep_hashtable[hash];
read_lock_bh(&head->lock);
- sctp_for_each_hentry(epb, &head->chain) {
- ep = sctp_ep(epb);
- sk = epb->sk;
+ sctp_for_each_hentry(ep, &head->chain) {
+ sk = ep->base.sk;
if (!net_eq(sock_net(sk), seq_file_net(seq)))
continue;
seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
- epb->bind_addr.port,
+ ep->base.bind_addr.port,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk));
- sctp_seq_dump_local_addrs(seq, epb);
+ sctp_seq_dump_local_addrs(seq, &ep->base);
seq_printf(seq, "\n");
}
read_unlock_bh(&head->lock);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 354c1c4de19b..cc544a97c4af 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1124,12 +1124,11 @@ enum sctp_disposition sctp_sf_send_probe(struct net *net,
if (!sctp_transport_pl_enabled(transport))
return SCTP_DISPOSITION_CONSUME;
- if (sctp_transport_pl_send(transport)) {
- reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
- if (!reply)
- return SCTP_DISPOSITION_NOMEM;
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
- }
+ sctp_transport_pl_send(transport);
+ reply = sctp_make_heartbeat(asoc, transport, transport->pl.probe_size);
+ if (!reply)
+ return SCTP_DISPOSITION_NOMEM;
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_PROBE_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ad5028a07b18..3e1a9600be5e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5068,12 +5068,9 @@ static int sctp_init_sock(struct sock *sk)
SCTP_DBG_OBJCNT_INC(sock);
- local_bh_disable();
sk_sockets_allocated_inc(sk);
sock_prot_inuse_add(net, sk->sk_prot, 1);
- local_bh_enable();
-
return 0;
}
@@ -5099,10 +5096,8 @@ static void sctp_destroy_sock(struct sock *sk)
list_del(&sp->auto_asconf_list);
}
sctp_endpoint_free(sp->ep);
- local_bh_disable();
sk_sockets_allocated_dec(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- local_bh_enable();
}
/* Triggered when there are no references on the socket anymore */
@@ -5299,14 +5294,14 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
void *p) {
int err = 0;
int hash = 0;
- struct sctp_ep_common *epb;
+ struct sctp_endpoint *ep;
struct sctp_hashbucket *head;
for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
hash++, head++) {
read_lock_bh(&head->lock);
- sctp_for_each_hentry(epb, &head->chain) {
- err = cb(sctp_ep(epb), p);
+ sctp_for_each_hentry(ep, &head->chain) {
+ err = cb(ep, p);
if (err)
break;
}
@@ -5317,23 +5312,31 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
}
EXPORT_SYMBOL_GPL(sctp_for_each_endpoint);
-int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *),
- struct net *net,
+int sctp_transport_lookup_process(sctp_callback_t cb, struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr, void *p)
{
struct sctp_transport *transport;
- int err;
+ struct sctp_endpoint *ep;
+ int err = -ENOENT;
rcu_read_lock();
transport = sctp_addrs_lookup_transport(net, laddr, paddr);
+ if (!transport) {
+ rcu_read_unlock();
+ return err;
+ }
+ ep = transport->asoc->ep;
+ if (!sctp_endpoint_hold(ep)) { /* asoc can be peeled off */
+ sctp_transport_put(transport);
+ rcu_read_unlock();
+ return err;
+ }
rcu_read_unlock();
- if (!transport)
- return -ENOENT;
- err = cb(transport, p);
+ err = cb(ep, transport, p);
+ sctp_endpoint_put(ep);
sctp_transport_put(transport);
-
return err;
}
EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 133f1719bf1b..f8fd98784977 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -213,13 +213,18 @@ void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
{
- if (timer_pending(&transport->probe_timer))
- return;
if (!mod_timer(&transport->probe_timer,
jiffies + transport->probe_interval))
sctp_transport_hold(transport);
}
+void sctp_transport_reset_raise_timer(struct sctp_transport *transport)
+{
+ if (!mod_timer(&transport->probe_timer,
+ jiffies + transport->probe_interval * 30))
+ sctp_transport_hold(transport);
+}
+
/* This transport has been assigned to an association.
* Initialize fields from the association or from the sock itself.
* Register the reference count in the association.
@@ -258,12 +263,11 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
sctp_transport_pl_update(transport);
}
-bool sctp_transport_pl_send(struct sctp_transport *t)
+void sctp_transport_pl_send(struct sctp_transport *t)
{
if (t->pl.probe_count < SCTP_MAX_PROBES)
goto out;
- t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
t->pl.probe_count = 0;
if (t->pl.state == SCTP_PL_BASE) {
if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
@@ -298,17 +302,9 @@ bool sctp_transport_pl_send(struct sctp_transport *t)
}
out:
- if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
- !t->pl.probe_count && t->pl.last_rtx_chunks == t->asoc->rtx_data_chunks) {
- t->pl.raise_count++;
- return false;
- }
-
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
-
t->pl.probe_count++;
- return true;
}
bool sctp_transport_pl_recv(struct sctp_transport *t)
@@ -316,7 +312,6 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
__func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
- t->pl.last_rtx_chunks = t->asoc->rtx_data_chunks;
t->pl.pmtu = t->pl.probe_size;
t->pl.probe_count = 0;
if (t->pl.state == SCTP_PL_BASE) {
@@ -338,14 +333,14 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
t->pl.probe_size += SCTP_PL_MIN_STEP;
if (t->pl.probe_size >= t->pl.probe_high) {
t->pl.probe_high = 0;
- t->pl.raise_count = 0;
t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
t->pl.probe_size = t->pl.pmtu;
t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
sctp_assoc_sync_pmtu(t->asoc);
+ sctp_transport_reset_raise_timer(t);
}
- } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
+ } else if (t->pl.state == SCTP_PL_COMPLETE) {
/* Raise probe_size again after 30 * interval in Search Complete */
t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
t->pl.probe_size += SCTP_PL_MIN_STEP;
@@ -393,6 +388,7 @@ static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
t->pl.probe_high = 0;
t->pl.pmtu = SCTP_BASE_PLPMTU;
t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
+ sctp_transport_reset_probe_timer(t);
return true;
}
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 1c9289f56dc4..aa3bcaaeabf7 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -89,8 +89,8 @@ int smc_hash_sk(struct sock *sk)
write_lock_bh(&h->lock);
sk_add_node(sk, head);
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
write_unlock_bh(&h->lock);
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
return 0;
}
@@ -632,10 +632,12 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
static void smc_conn_abort(struct smc_sock *smc, int local_first)
{
+ struct smc_connection *conn = &smc->conn;
+ struct smc_link_group *lgr = conn->lgr;
+
+ smc_conn_free(conn);
if (local_first)
- smc_lgr_cleanup_early(&smc->conn);
- else
- smc_conn_free(&smc->conn);
+ smc_lgr_cleanup_early(lgr);
}
/* check if there is a rdma device available for this connection. */
@@ -2700,8 +2702,8 @@ static const struct proto_ops smc_sock_ops = {
.splice_read = smc_splice_read,
};
-static int smc_create(struct net *net, struct socket *sock, int protocol,
- int kern)
+static int __smc_create(struct net *net, struct socket *sock, int protocol,
+ int kern, struct socket *clcsock)
{
int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
struct smc_sock *smc;
@@ -2726,12 +2728,19 @@ static int smc_create(struct net *net, struct socket *sock, int protocol,
smc = smc_sk(sk);
smc->use_fallback = false; /* assume rdma capability first */
smc->fallback_rsn = 0;
- rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
- &smc->clcsock);
- if (rc) {
- sk_common_release(sk);
- goto out;
+
+ rc = 0;
+ if (!clcsock) {
+ rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
+ &smc->clcsock);
+ if (rc) {
+ sk_common_release(sk);
+ goto out;
+ }
+ } else {
+ smc->clcsock = clcsock;
}
+
smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
@@ -2739,12 +2748,76 @@ out:
return rc;
}
+static int smc_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ return __smc_create(net, sock, protocol, kern, NULL);
+}
+
static const struct net_proto_family smc_sock_family_ops = {
.family = PF_SMC,
.owner = THIS_MODULE,
.create = smc_create,
};
+static int smc_ulp_init(struct sock *sk)
+{
+ struct socket *tcp = sk->sk_socket;
+ struct net *net = sock_net(sk);
+ struct socket *smcsock;
+ int protocol, ret;
+
+ /* only TCP can be replaced */
+ if (tcp->type != SOCK_STREAM || sk->sk_protocol != IPPROTO_TCP ||
+ (sk->sk_family != AF_INET && sk->sk_family != AF_INET6))
+ return -ESOCKTNOSUPPORT;
+ /* don't handle wq now */
+ if (tcp->state != SS_UNCONNECTED || !tcp->file || tcp->wq.fasync_list)
+ return -ENOTCONN;
+
+ if (sk->sk_family == AF_INET)
+ protocol = SMCPROTO_SMC;
+ else
+ protocol = SMCPROTO_SMC6;
+
+ smcsock = sock_alloc();
+ if (!smcsock)
+ return -ENFILE;
+
+ smcsock->type = SOCK_STREAM;
+ __module_get(THIS_MODULE); /* tried in __tcp_ulp_find_autoload */
+ ret = __smc_create(net, smcsock, protocol, 1, tcp);
+ if (ret) {
+ sock_release(smcsock); /* module_put() which ops won't be NULL */
+ return ret;
+ }
+
+ /* replace tcp socket to smc */
+ smcsock->file = tcp->file;
+ smcsock->file->private_data = smcsock;
+ smcsock->file->f_inode = SOCK_INODE(smcsock); /* replace inode when sock_close */
+ smcsock->file->f_path.dentry->d_inode = SOCK_INODE(smcsock); /* dput() in __fput */
+ tcp->file = NULL;
+
+ return ret;
+}
+
+static void smc_ulp_clone(const struct request_sock *req, struct sock *newsk,
+ const gfp_t priority)
+{
+ struct inet_connection_sock *icsk = inet_csk(newsk);
+
+ /* don't inherit ulp ops to child when listen */
+ icsk->icsk_ulp_ops = NULL;
+}
+
+static struct tcp_ulp_ops smc_ulp_ops __read_mostly = {
+ .name = "smc",
+ .owner = THIS_MODULE,
+ .init = smc_ulp_init,
+ .clone = smc_ulp_clone,
+};
+
unsigned int smc_net_id;
static __net_init int smc_net_init(struct net *net)
@@ -2855,6 +2928,12 @@ static int __init smc_init(void)
goto out_sock;
}
+ rc = tcp_register_ulp(&smc_ulp_ops);
+ if (rc) {
+ pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
+ goto out_sock;
+ }
+
static_branch_enable(&tcp_have_smc);
return 0;
@@ -2883,6 +2962,7 @@ out_pernet_subsys:
static void __exit smc_exit(void)
{
static_branch_disable(&tcp_have_smc);
+ tcp_unregister_ulp(&smc_ulp_ops);
sock_unregister(PF_SMC);
smc_core_exit();
smc_ib_unregister_client();
@@ -2905,3 +2985,4 @@ MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("smc socket address family");
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_SMC);
+MODULE_ALIAS_TCP_ULP("smc");
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 8409ab71a5e4..6be95a2a7b25 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -1021,7 +1021,6 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
struct smc_link *link = conn->lnk;
/* SMC-R specific settings */
- link = conn->lnk;
memcpy(clc->hdr.eyecatcher, SMC_EYECATCHER,
sizeof(SMC_EYECATCHER));
clc->hdr.typev1 = SMC_TYPE_R;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index a6849362f4dd..8935ef4811b0 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -171,8 +171,10 @@ static int smc_lgr_register_conn(struct smc_connection *conn, bool first)
if (!conn->lgr->is_smcd) {
rc = smcr_lgr_conn_assign_link(conn, first);
- if (rc)
+ if (rc) {
+ conn->lgr = NULL;
return rc;
+ }
}
/* find a new alert_token_local value not yet used by some connection
* in this link group
@@ -348,6 +350,9 @@ static int smc_nl_fill_lgr(struct smc_link_group *lgr,
goto errattr;
if (nla_put_u8(skb, SMC_NLA_LGR_R_VLAN_ID, lgr->vlan_id))
goto errattr;
+ if (nla_put_u64_64bit(skb, SMC_NLA_LGR_R_NET_COOKIE,
+ lgr->net->net_cookie, SMC_NLA_LGR_R_PAD))
+ goto errattr;
memcpy(smc_target, lgr->pnet_id, SMC_MAX_PNETID_LEN);
smc_target[SMC_MAX_PNETID_LEN] = 0;
if (nla_put_string(skb, SMC_NLA_LGR_R_PNETID, smc_target))
@@ -622,15 +627,13 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
-void smc_lgr_cleanup_early(struct smc_connection *conn)
+void smc_lgr_cleanup_early(struct smc_link_group *lgr)
{
- struct smc_link_group *lgr = conn->lgr;
spinlock_t *lgr_lock;
if (!lgr)
return;
- smc_conn_free(conn);
smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
/* do not use this link group for new connections */
@@ -897,6 +900,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
smc_wr_free_lgr_mem(lgr);
goto free_wq;
}
+ lgr->net = smc_ib_net(lnk->smcibdev);
lgr_list = &smc_lgr_list.list;
lgr_lock = &smc_lgr_list.lock;
atomic_inc(&lgr_cnt);
@@ -1101,18 +1105,24 @@ static void smcr_buf_unuse(struct smc_buf_desc *rmb_desc,
smc_buf_free(lgr, true, rmb_desc);
} else {
rmb_desc->used = 0;
+ memset(rmb_desc->cpu_addr, 0, rmb_desc->len);
}
}
static void smc_buf_unuse(struct smc_connection *conn,
struct smc_link_group *lgr)
{
- if (conn->sndbuf_desc)
+ if (conn->sndbuf_desc) {
conn->sndbuf_desc->used = 0;
- if (conn->rmb_desc && lgr->is_smcd)
+ memset(conn->sndbuf_desc->cpu_addr, 0, conn->sndbuf_desc->len);
+ }
+ if (conn->rmb_desc && lgr->is_smcd) {
conn->rmb_desc->used = 0;
- else if (conn->rmb_desc)
+ memset(conn->rmb_desc->cpu_addr, 0, conn->rmb_desc->len +
+ sizeof(struct smcd_cdc_msg));
+ } else if (conn->rmb_desc) {
smcr_buf_unuse(conn->rmb_desc, lgr);
+ }
}
/* remove a finished connection from its link group */
@@ -1542,9 +1552,9 @@ void smcr_lgr_set_type(struct smc_link_group *lgr, enum smc_lgr_type new_type)
lgr_type = "ASYMMETRIC_LOCAL";
break;
}
- pr_warn_ratelimited("smc: SMC-R lg %*phN state changed: "
+ pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu state changed: "
"%s, pnetid %.16s\n", SMC_LGR_ID_SIZE, &lgr->id,
- lgr_type, lgr->pnet_id);
+ lgr->net->net_cookie, lgr_type, lgr->pnet_id);
}
/* set new lgr type and tag a link as asymmetric */
@@ -1579,7 +1589,8 @@ void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
SMC_MAX_PNETID_LEN) ||
lgr->type == SMC_LGR_SYMMETRIC ||
- lgr->type == SMC_LGR_ASYMMETRIC_PEER)
+ lgr->type == SMC_LGR_ASYMMETRIC_PEER ||
+ !rdma_dev_access_netns(smcibdev->ibdev, lgr->net))
continue;
/* trigger local add link processing */
@@ -1737,8 +1748,10 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version,
u8 peer_systemid[],
u8 peer_gid[],
u8 peer_mac_v1[],
- enum smc_lgr_role role, u32 clcqpn)
+ enum smc_lgr_role role, u32 clcqpn,
+ struct net *net)
{
+ struct smc_link *lnk;
int i;
if (memcmp(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN) ||
@@ -1746,12 +1759,17 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, u8 smcr_version,
return false;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_active(&lgr->lnk[i]))
+ lnk = &lgr->lnk[i];
+
+ if (!smc_link_active(lnk))
continue;
- if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
- !memcmp(lgr->lnk[i].peer_gid, peer_gid, SMC_GID_SIZE) &&
+ /* use verbs API to check netns, instead of lgr->net */
+ if (!rdma_dev_access_netns(lnk->smcibdev->ibdev, net))
+ return false;
+ if ((lgr->role == SMC_SERV || lnk->peer_qpn == clcqpn) &&
+ !memcmp(lnk->peer_gid, peer_gid, SMC_GID_SIZE) &&
(smcr_version == SMC_V2 ||
- !memcmp(lgr->lnk[i].peer_mac, peer_mac_v1, ETH_ALEN)))
+ !memcmp(lnk->peer_mac, peer_mac_v1, ETH_ALEN)))
return true;
}
return false;
@@ -1767,6 +1785,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
{
struct smc_connection *conn = &smc->conn;
+ struct net *net = sock_net(&smc->sk);
struct list_head *lgr_list;
struct smc_link_group *lgr;
enum smc_lgr_role role;
@@ -1793,7 +1812,7 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
smcr_lgr_match(lgr, ini->smcr_version,
ini->peer_systemid,
ini->peer_gid, ini->peer_mac, role,
- ini->ib_clcqpn)) &&
+ ini->ib_clcqpn, net)) &&
!lgr->sync_err &&
(ini->smcd_version == SMC_V2 ||
lgr->vlan_id == ini->vlan_id) &&
@@ -1832,8 +1851,10 @@ create:
write_lock_bh(&lgr->conns_lock);
rc = smc_lgr_register_conn(conn, true);
write_unlock_bh(&lgr->conns_lock);
- if (rc)
+ if (rc) {
+ smc_lgr_cleanup_early(lgr);
goto out;
+ }
}
conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
@@ -2163,7 +2184,6 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
if (buf_desc) {
SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
SMC_STAT_BUF_REUSE(smc, is_smcd, is_rmb);
- memset(buf_desc->cpu_addr, 0, bufsize);
break; /* found reusable slot */
}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index d63b08274197..521c64a3d8d3 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -306,6 +306,8 @@ struct smc_link_group {
u8 nexthop_mac[ETH_ALEN];
u8 uses_gateway;
__be32 saddr;
+ /* net namespace */
+ struct net *net;
};
struct { /* SMC-D */
u64 peer_gid;
@@ -407,7 +409,13 @@ static inline struct smc_connection *smc_lgr_find_conn(
return res;
}
-/* returns true if the specified link is usable */
+/*
+ * Returns true if the specified link is usable.
+ *
+ * usable means the link is ready to receive RDMA messages, map memory
+ * on the link, etc. This doesn't ensure we are able to send RDMA messages
+ * on this link, if sending RDMA messages is needed, use smc_link_sendable()
+ */
static inline bool smc_link_usable(struct smc_link *lnk)
{
if (lnk->state == SMC_LNK_UNUSED || lnk->state == SMC_LNK_INACTIVE)
@@ -415,6 +423,15 @@ static inline bool smc_link_usable(struct smc_link *lnk)
return true;
}
+/*
+ * Returns true if the specified link is ready to receive AND send RDMA
+ * messages.
+ *
+ * For the client side in first contact, the underlying QP may still in
+ * RESET or RTR when the link state is ACTIVATING, checks in smc_link_usable()
+ * is not strong enough. For those places that need to send any CDC or LLC
+ * messages, use smc_link_sendable(), otherwise, use smc_link_usable() instead
+ */
static inline bool smc_link_sendable(struct smc_link *lnk)
{
return smc_link_usable(lnk) &&
@@ -468,7 +485,7 @@ static inline void smc_set_pci_values(struct pci_dev *pci_dev,
struct smc_sock;
struct smc_clc_msg_accept_confirm;
-void smc_lgr_cleanup_early(struct smc_connection *conn);
+void smc_lgr_cleanup_early(struct smc_link_group *lgr);
void smc_lgr_terminate_sched(struct smc_link_group *lgr);
void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport);
void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport);
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index c952986a6aca..7c8dad28c18d 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -145,19 +145,21 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
(req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
!list_empty(&smc->conn.lgr->list)) {
+ struct smc_link *link = smc->conn.lnk;
+ struct net *net = read_pnet(&link->smcibdev->ibdev->coredev.rdma_net);
+
struct smc_diag_lgrinfo linfo = {
.role = smc->conn.lgr->role,
- .lnk[0].ibport = smc->conn.lnk->ibport,
- .lnk[0].link_id = smc->conn.lnk->link_id,
+ .lnk[0].ibport = link->ibport,
+ .lnk[0].link_id = link->link_id,
+ .lnk[0].net_cookie = net->net_cookie,
};
memcpy(linfo.lnk[0].ibname,
smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
- sizeof(smc->conn.lnk->smcibdev->ibdev->name));
- smc_gid_be16_convert(linfo.lnk[0].gid,
- smc->conn.lnk->gid);
- smc_gid_be16_convert(linfo.lnk[0].peer_gid,
- smc->conn.lnk->peer_gid);
+ sizeof(link->smcibdev->ibdev->name));
+ smc_gid_be16_convert(linfo.lnk[0].gid, link->gid);
+ smc_gid_be16_convert(linfo.lnk[0].peer_gid, link->peer_gid);
if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
goto errout;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index fe5d5399c4e8..a3e2d3b89568 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -12,6 +12,8 @@
* Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
*/
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/random.h>
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index bfa1c6bf6313..5d8b49c57f50 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -69,6 +69,13 @@ static inline __be32 smc_ib_gid_to_ipv4(u8 gid[SMC_GID_SIZE])
return cpu_to_be32(INADDR_NONE);
}
+static inline struct net *smc_ib_net(struct smc_ib_device *smcibdev)
+{
+ if (smcibdev && smcibdev->ibdev)
+ return read_pnet(&smcibdev->ibdev->coredev.rdma_net);
+ return NULL;
+}
+
struct smc_init_info_smcrv2;
struct smc_buf_desc;
struct smc_link;
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index fd28cc498b98..a2084ecdb97e 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -6,6 +6,7 @@
* Copyright IBM Corp. 2018
*/
+#include <linux/if_vlan.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 3e9fd8a3124c..c4d057b2941d 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -242,9 +242,10 @@ static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
}
/* drop parallel or already-in-progress llc requests */
if (flow_type != msg_type)
- pr_warn_once("smc: SMC-R lg %*phN dropped parallel "
+ pr_warn_once("smc: SMC-R lg %*phN net %llu dropped parallel "
"LLC msg: msg %d flow %d role %d\n",
SMC_LGR_ID_SIZE, &lgr->id,
+ lgr->net->net_cookie,
qentry->msg.raw.hdr.common.type,
flow_type, lgr->role);
kfree(qentry);
@@ -359,9 +360,10 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
smc_llc_flow_qentry_clr(flow));
return NULL;
}
- pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: "
+ pr_warn_once("smc: SMC-R lg %*phN net %llu dropped unexpected LLC msg: "
"msg %d exp %d flow %d role %d flags %x\n",
- SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg,
+ SMC_LGR_ID_SIZE, &lgr->id, lgr->net->net_cookie,
+ rcv_msg, exp_msg,
flow->type, lgr->role,
flow->qentry->msg.raw.hdr.flags);
smc_llc_flow_qentry_del(flow);
@@ -1816,8 +1818,9 @@ finish:
static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type)
{
- pr_warn_ratelimited("smc: SMC-R lg %*phN LLC protocol violation: "
- "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id, type);
+ pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu LLC protocol violation: "
+ "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id,
+ lgr->net->net_cookie, type);
smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL);
smc_lgr_terminate_sched(lgr);
}
@@ -2146,9 +2149,10 @@ int smc_llc_link_init(struct smc_link *link)
void smc_llc_link_active(struct smc_link *link)
{
- pr_warn_ratelimited("smc: SMC-R lg %*phN link added: id %*phN, "
+ pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link added: id %*phN, "
"peerid %*phN, ibdev %s, ibport %d\n",
SMC_LGR_ID_SIZE, &link->lgr->id,
+ link->lgr->net->net_cookie,
SMC_LGR_ID_SIZE, &link->link_uid,
SMC_LGR_ID_SIZE, &link->peer_link_uid,
link->smcibdev->ibdev->name, link->ibport);
@@ -2164,9 +2168,10 @@ void smc_llc_link_active(struct smc_link *link)
void smc_llc_link_clear(struct smc_link *link, bool log)
{
if (log)
- pr_warn_ratelimited("smc: SMC-R lg %*phN link removed: id %*phN"
+ pr_warn_ratelimited("smc: SMC-R lg %*phN net %llu link removed: id %*phN"
", peerid %*phN, ibdev %s, ibport %d\n",
SMC_LGR_ID_SIZE, &link->lgr->id,
+ link->lgr->net->net_cookie,
SMC_LGR_ID_SIZE, &link->link_uid,
SMC_LGR_ID_SIZE, &link->peer_link_uid,
link->smcibdev->ibdev->name, link->ibport);
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 67e9d9fde085..db9825c01e0a 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -64,6 +64,7 @@ struct smc_pnetentry {
struct {
char eth_name[IFNAMSIZ + 1];
struct net_device *ndev;
+ netdevice_tracker dev_tracker;
};
struct {
char ib_name[IB_DEVICE_NAME_MAX + 1];
@@ -119,7 +120,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
smc_pnet_match(pnetelem->pnet_name, pnet_name)) {
list_del(&pnetelem->list);
if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev) {
- dev_put(pnetelem->ndev);
+ dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker);
pr_warn_ratelimited("smc: net device %s "
"erased user defined "
"pnetid %.16s\n",
@@ -195,7 +196,7 @@ static int smc_pnet_add_by_ndev(struct net_device *ndev)
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && !pnetelem->ndev &&
!strncmp(pnetelem->eth_name, ndev->name, IFNAMSIZ)) {
- dev_hold(ndev);
+ dev_hold_track(ndev, &pnetelem->dev_tracker, GFP_ATOMIC);
pnetelem->ndev = ndev;
rc = 0;
pr_warn_ratelimited("smc: adding net device %s with "
@@ -226,7 +227,7 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
write_lock(&pnettable->lock);
list_for_each_entry_safe(pnetelem, tmp_pe, &pnettable->pnetlist, list) {
if (pnetelem->type == SMC_PNET_ETH && pnetelem->ndev == ndev) {
- dev_put(pnetelem->ndev);
+ dev_put_track(pnetelem->ndev, &pnetelem->dev_tracker);
pnetelem->ndev = NULL;
rc = 0;
pr_warn_ratelimited("smc: removing net device %s with "
@@ -368,7 +369,7 @@ static int smc_pnet_add_eth(struct smc_pnettable *pnettable, struct net *net,
memcpy(new_pe->pnet_name, pnet_name, SMC_MAX_PNETID_LEN);
strncpy(new_pe->eth_name, eth_name, IFNAMSIZ);
new_pe->ndev = ndev;
-
+ netdev_tracker_alloc(ndev, &new_pe->dev_tracker, GFP_KERNEL);
rc = -EEXIST;
new_netdev = true;
write_lock(&pnettable->lock);
@@ -976,14 +977,16 @@ static int smc_pnet_determine_gid(struct smc_ib_device *ibdev, int i,
/* find a roce device for the given pnetid */
static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
struct smc_init_info *ini,
- struct smc_ib_device *known_dev)
+ struct smc_ib_device *known_dev,
+ struct net *net)
{
struct smc_ib_device *ibdev;
int i;
mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
- if (ibdev == known_dev)
+ if (ibdev == known_dev ||
+ !rdma_dev_access_netns(ibdev->ibdev, net))
continue;
for (i = 1; i <= SMC_MAX_PORTS; i++) {
if (!rdma_is_port_valid(ibdev->ibdev, i))
@@ -1000,12 +1003,14 @@ out:
mutex_unlock(&smc_ib_devices.mutex);
}
-/* find alternate roce device with same pnet_id and vlan_id */
+/* find alternate roce device with same pnet_id, vlan_id and net namespace */
void smc_pnet_find_alt_roce(struct smc_link_group *lgr,
struct smc_init_info *ini,
struct smc_ib_device *known_dev)
{
- _smc_pnet_find_roce_by_pnetid(lgr->pnet_id, ini, known_dev);
+ struct net *net = lgr->net;
+
+ _smc_pnet_find_roce_by_pnetid(lgr->pnet_id, ini, known_dev, net);
}
/* if handshake network device belongs to a roce device, return its
@@ -1014,6 +1019,7 @@ void smc_pnet_find_alt_roce(struct smc_link_group *lgr,
static void smc_pnet_find_rdma_dev(struct net_device *netdev,
struct smc_init_info *ini)
{
+ struct net *net = dev_net(netdev);
struct smc_ib_device *ibdev;
mutex_lock(&smc_ib_devices.mutex);
@@ -1021,6 +1027,10 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
struct net_device *ndev;
int i;
+ /* check rdma net namespace */
+ if (!rdma_dev_access_netns(ibdev->ibdev, net))
+ continue;
+
for (i = 1; i <= SMC_MAX_PORTS; i++) {
if (!rdma_is_port_valid(ibdev->ibdev, i))
continue;
@@ -1051,15 +1061,17 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev,
struct smc_init_info *ini)
{
u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+ struct net *net;
ndev = pnet_find_base_ndev(ndev);
+ net = dev_net(ndev);
if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port,
ndev_pnetid) &&
smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) {
smc_pnet_find_rdma_dev(ndev, ini);
return; /* pnetid could not be determined */
}
- _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL);
+ _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net);
}
static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
diff --git a/net/smc/smc_tracepoint.h b/net/smc/smc_tracepoint.h
index ec17f29646f5..9fc5e586d24a 100644
--- a/net/smc/smc_tracepoint.h
+++ b/net/smc/smc_tracepoint.h
@@ -22,6 +22,7 @@ TRACE_EVENT(smc_switch_to_fallback,
TP_STRUCT__entry(
__field(const void *, sk)
__field(const void *, clcsk)
+ __field(u64, net_cookie)
__field(int, fallback_rsn)
),
@@ -31,11 +32,13 @@ TRACE_EVENT(smc_switch_to_fallback,
__entry->sk = sk;
__entry->clcsk = clcsk;
+ __entry->net_cookie = sock_net(sk)->net_cookie;
__entry->fallback_rsn = fallback_rsn;
),
- TP_printk("sk=%p clcsk=%p fallback_rsn=%d",
- __entry->sk, __entry->clcsk, __entry->fallback_rsn)
+ TP_printk("sk=%p clcsk=%p net=%llu fallback_rsn=%d",
+ __entry->sk, __entry->clcsk,
+ __entry->net_cookie, __entry->fallback_rsn)
);
DECLARE_EVENT_CLASS(smc_msg_event,
@@ -46,19 +49,23 @@ DECLARE_EVENT_CLASS(smc_msg_event,
TP_STRUCT__entry(
__field(const void *, smc)
+ __field(u64, net_cookie)
__field(size_t, len)
__string(name, smc->conn.lnk->ibname)
),
TP_fast_assign(
+ const struct sock *sk = &smc->sk;
+
__entry->smc = smc;
+ __entry->net_cookie = sock_net(sk)->net_cookie;
__entry->len = len;
__assign_str(name, smc->conn.lnk->ibname);
),
- TP_printk("smc=%p len=%zu dev=%s",
- __entry->smc, __entry->len,
- __get_str(name))
+ TP_printk("smc=%p net=%llu len=%zu dev=%s",
+ __entry->smc, __entry->net_cookie,
+ __entry->len, __get_str(name))
);
DEFINE_EVENT(smc_msg_event, smc_tx_sendmsg,
@@ -84,6 +91,7 @@ TRACE_EVENT(smcr_link_down,
TP_STRUCT__entry(
__field(const void *, lnk)
__field(const void *, lgr)
+ __field(u64, net_cookie)
__field(int, state)
__string(name, lnk->ibname)
__field(void *, location)
@@ -94,13 +102,14 @@ TRACE_EVENT(smcr_link_down,
__entry->lnk = lnk;
__entry->lgr = lgr;
+ __entry->net_cookie = lgr->net->net_cookie;
__entry->state = lnk->state;
__assign_str(name, lnk->ibname);
__entry->location = location;
),
- TP_printk("lnk=%p lgr=%p state=%d dev=%s location=%pS",
- __entry->lnk, __entry->lgr,
+ TP_printk("lnk=%p lgr=%p net=%llu state=%d dev=%s location=%pS",
+ __entry->lnk, __entry->lgr, __entry->net_cookie,
__entry->state, __get_str(name),
__entry->location)
);
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index c6cfdea8b71b..24be1d03fef9 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -54,11 +54,7 @@ struct smc_wr_tx_pend { /* control data for a pending send request */
/* returns true if at least one tx work request is pending on the given link */
static inline bool smc_wr_is_tx_pend(struct smc_link *link)
{
- if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
- link->wr_tx_cnt) {
- return true;
- }
- return false;
+ return !bitmap_empty(link->wr_tx_mask, link->wr_tx_cnt);
}
/* wait till all pending tx work requests on the given link are completed */
@@ -696,7 +692,7 @@ void smc_wr_free_link_mem(struct smc_link *lnk)
lnk->wr_tx_compl = NULL;
kfree(lnk->wr_tx_pends);
lnk->wr_tx_pends = NULL;
- kfree(lnk->wr_tx_mask);
+ bitmap_free(lnk->wr_tx_mask);
lnk->wr_tx_mask = NULL;
kfree(lnk->wr_tx_sges);
lnk->wr_tx_sges = NULL;
@@ -772,9 +768,7 @@ int smc_wr_alloc_link_mem(struct smc_link *link)
GFP_KERNEL);
if (!link->wr_rx_sges)
goto no_mem_wr_tx_sges;
- link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
- sizeof(*link->wr_tx_mask),
- GFP_KERNEL);
+ link->wr_tx_mask = bitmap_zalloc(SMC_WR_BUF_CNT, GFP_KERNEL);
if (!link->wr_tx_mask)
goto no_mem_wr_rx_sges;
link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
@@ -887,8 +881,7 @@ int smc_wr_create_link(struct smc_link *lnk)
goto dma_unmap;
}
smc_wr_init_sge(lnk);
- memset(lnk->wr_tx_mask, 0,
- BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
+ bitmap_zero(lnk->wr_tx_mask, SMC_WR_BUF_CNT);
init_waitqueue_head(&lnk->wr_tx_wait);
atomic_set(&lnk->wr_tx_refcnt, 0);
init_waitqueue_head(&lnk->wr_reg_wait);
diff --git a/net/socket.c b/net/socket.c
index 7f64a6eccf63..50cf75730fd7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -52,6 +52,7 @@
* Based upon Swansea University Computer Society NET3.039
*/
+#include <linux/bpf-cgroup.h>
#include <linux/ethtool.h>
#include <linux/mm.h>
#include <linux/socket.h>
@@ -829,6 +830,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
int empty = 1, false_tstamp = 0;
struct skb_shared_hwtstamps *shhwtstamps =
skb_hwtstamps(skb);
+ ktime_t hwtstamp;
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
@@ -877,10 +879,12 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
!skb_is_swtx_tstamp(skb, false_tstamp)) {
if (sk->sk_tsflags & SOF_TIMESTAMPING_BIND_PHC)
- ptp_convert_timestamp(shhwtstamps, sk->sk_bind_phc);
+ hwtstamp = ptp_convert_timestamp(shhwtstamps,
+ sk->sk_bind_phc);
+ else
+ hwtstamp = shhwtstamps->hwtstamp;
- if (ktime_to_timespec64_cond(shhwtstamps->hwtstamp,
- tss.ts + 2)) {
+ if (ktime_to_timespec64_cond(hwtstamp, tss.ts + 2)) {
empty = 0;
if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_PKTINFO) &&
@@ -1946,7 +1950,7 @@ int __sys_getsockname(int fd, struct sockaddr __user *usockaddr,
err = sock->ops->getname(sock, (struct sockaddr *)&address, 0);
if (err < 0)
goto out_put;
- /* "err" is actually length in this case */
+ /* "err" is actually length in this case */
err = move_addr_to_user(&address, err, usockaddr, usockaddr_len);
out_put:
@@ -3233,21 +3237,6 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
return dev_ioctl(net, cmd, &ifreq, data, NULL);
}
-/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
- * for some operations; this forces use of the newer bridge-utils that
- * use compatible ioctls
- */
-static int old_bridge_ioctl(compat_ulong_t __user *argp)
-{
- compat_ulong_t tmp;
-
- if (get_user(tmp, argp))
- return -EFAULT;
- if (tmp == BRCTL_GET_VERSION)
- return BRCTL_VERSION + 1;
- return -EINVAL;
-}
-
static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
unsigned int cmd, unsigned long arg)
{
@@ -3259,9 +3248,6 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
return sock_ioctl(file, cmd, (unsigned long)argp);
switch (cmd) {
- case SIOCSIFBR:
- case SIOCGIFBR:
- return old_bridge_ioctl(argp);
case SIOCWANDEV:
return compat_siocwandev(net, argp);
case SIOCGSTAMP_OLD:
@@ -3290,6 +3276,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCGSTAMP_NEW:
case SIOCGSTAMPNS_NEW:
case SIOCGIFCONF:
+ case SIOCSIFBR:
+ case SIOCGIFBR:
return sock_ioctl(file, cmd, arg);
case SIOCGIFFLAGS:
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 83460470e883..b62565278fac 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -28,6 +28,7 @@ typedef void switchdev_deferred_func_t(struct net_device *dev,
struct switchdev_deferred_item {
struct list_head list;
struct net_device *dev;
+ netdevice_tracker dev_tracker;
switchdev_deferred_func_t *func;
unsigned long data[];
};
@@ -63,7 +64,7 @@ void switchdev_deferred_process(void)
while ((dfitem = switchdev_deferred_dequeue())) {
dfitem->func(dfitem->dev, dfitem->data);
- dev_put(dfitem->dev);
+ dev_put_track(dfitem->dev, &dfitem->dev_tracker);
kfree(dfitem);
}
}
@@ -90,7 +91,7 @@ static int switchdev_deferred_enqueue(struct net_device *dev,
dfitem->dev = dev;
dfitem->func = func;
memcpy(dfitem->data, data, data_len);
- dev_hold(dev);
+ dev_hold_track(dev, &dfitem->dev_tracker, GFP_ATOMIC);
spin_lock_bh(&deferred_lock);
list_add_tail(&dfitem->list, &deferred);
spin_unlock_bh(&deferred_lock);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 60bc74b76adc..473a790f5894 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -787,7 +787,7 @@ int tipc_attach_loopback(struct net *net)
if (!dev)
return -ENODEV;
- dev_hold(dev);
+ dev_hold_track(dev, &tn->loopback_pt.dev_tracker, GFP_KERNEL);
tn->loopback_pt.dev = dev;
tn->loopback_pt.type = htons(ETH_P_TIPC);
tn->loopback_pt.func = tipc_loopback_rcv_pkt;
@@ -800,7 +800,7 @@ void tipc_detach_loopback(struct net *net)
struct tipc_net *tn = tipc_net(net);
dev_remove_pack(&tn->loopback_pt);
- dev_put(net->loopback_dev);
+ dev_put_track(net->loopback_dev, &tn->loopback_pt.dev_tracker);
}
/* Caller should hold rtnl_lock to protect the bearer */
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index d293614d5fc6..9325479295b8 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -761,21 +761,10 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
skb_tailroom(skb), tailen);
}
- if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) {
- nsg = 1;
- trailer = skb;
- } else {
- /* TODO: We could avoid skb_cow_data() if skb has no frag_list
- * e.g. by skb_fill_page_desc() to add another page to the skb
- * with the wanted tailen... However, page skbs look not often,
- * so take it easy now!
- * Cloned skbs e.g. from link_xmit() seems no choice though :(
- */
- nsg = skb_cow_data(skb, tailen, &trailer);
- if (unlikely(nsg < 0)) {
- pr_err("TX: skb_cow_data() returned %d\n", nsg);
- return nsg;
- }
+ nsg = skb_cow_data(skb, tailen, &trailer);
+ if (unlikely(nsg < 0)) {
+ pr_err("TX: skb_cow_data() returned %d\n", nsg);
+ return nsg;
}
pskb_put(skb, trailer, tailen);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 09ae8448f394..8d9e09f48f4c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1298,7 +1298,8 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
return false;
#ifdef CONFIG_TIPC_CRYPTO
case MSG_CRYPTO:
- if (TIPC_SKB_CB(skb)->decrypted) {
+ if (sysctl_tipc_key_exchange_enabled &&
+ TIPC_SKB_CB(skb)->decrypted) {
tipc_crypto_msg_rcv(l->net, skb);
return true;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ad570c2450be..3e63c83e641c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1461,6 +1461,8 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
msg_set_syn(hdr, 1);
}
+ memset(&skaddr, 0, sizeof(skaddr));
+
/* Determine destination */
if (atype == TIPC_SERVICE_RANGE) {
return tipc_sendmcast(sock, ua, m, dlen, timeout);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index dfe623a4e72f..95e774f1b91f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1990,6 +1990,7 @@ recv_end:
end:
release_sock(sk);
+ sk_defer_free_flush(sk);
if (psock)
sk_psock_put(sk, psock);
return copied ? : err;
@@ -2328,10 +2329,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_crypto_info *crypto_info;
- struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
- struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
- struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
- struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
struct tls_sw_context_tx *sw_ctx_tx = NULL;
struct tls_sw_context_rx *sw_ctx_rx = NULL;
struct cipher_context *cctx;
@@ -2394,15 +2391,15 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
+ struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
+
+ gcm_128_info = (void *)crypto_info;
nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
tag_size = TLS_CIPHER_AES_GCM_128_TAG_SIZE;
iv_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
- iv = ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->iv;
+ iv = gcm_128_info->iv;
rec_seq_size = TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE;
- rec_seq =
- ((struct tls12_crypto_info_aes_gcm_128 *)crypto_info)->rec_seq;
- gcm_128_info =
- (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+ rec_seq = gcm_128_info->rec_seq;
keysize = TLS_CIPHER_AES_GCM_128_KEY_SIZE;
key = gcm_128_info->key;
salt = gcm_128_info->salt;
@@ -2411,15 +2408,15 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
break;
}
case TLS_CIPHER_AES_GCM_256: {
+ struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
+
+ gcm_256_info = (void *)crypto_info;
nonce_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
tag_size = TLS_CIPHER_AES_GCM_256_TAG_SIZE;
iv_size = TLS_CIPHER_AES_GCM_256_IV_SIZE;
- iv = ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->iv;
+ iv = gcm_256_info->iv;
rec_seq_size = TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE;
- rec_seq =
- ((struct tls12_crypto_info_aes_gcm_256 *)crypto_info)->rec_seq;
- gcm_256_info =
- (struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
+ rec_seq = gcm_256_info->rec_seq;
keysize = TLS_CIPHER_AES_GCM_256_KEY_SIZE;
key = gcm_256_info->key;
salt = gcm_256_info->salt;
@@ -2428,15 +2425,15 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
break;
}
case TLS_CIPHER_AES_CCM_128: {
+ struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
+
+ ccm_128_info = (void *)crypto_info;
nonce_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
tag_size = TLS_CIPHER_AES_CCM_128_TAG_SIZE;
iv_size = TLS_CIPHER_AES_CCM_128_IV_SIZE;
- iv = ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->iv;
+ iv = ccm_128_info->iv;
rec_seq_size = TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE;
- rec_seq =
- ((struct tls12_crypto_info_aes_ccm_128 *)crypto_info)->rec_seq;
- ccm_128_info =
- (struct tls12_crypto_info_aes_ccm_128 *)crypto_info;
+ rec_seq = ccm_128_info->rec_seq;
keysize = TLS_CIPHER_AES_CCM_128_KEY_SIZE;
key = ccm_128_info->key;
salt = ccm_128_info->salt;
@@ -2445,6 +2442,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
break;
}
case TLS_CIPHER_CHACHA20_POLY1305: {
+ struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
+
chacha20_poly1305_info = (void *)crypto_info;
nonce_size = 0;
tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b0bfc78e421c..c19569819866 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -89,6 +89,7 @@
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
+#include <linux/filter.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
@@ -117,24 +118,64 @@
#include "scm.h"
+spinlock_t unix_table_locks[2 * UNIX_HASH_SIZE];
+EXPORT_SYMBOL_GPL(unix_table_locks);
struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
-DEFINE_SPINLOCK(unix_table_lock);
-EXPORT_SYMBOL_GPL(unix_table_lock);
static atomic_long_t unix_nr_socks;
+/* SMP locking strategy:
+ * hash table is protected with spinlock unix_table_locks
+ * each socket state is protected by separate spin lock.
+ */
-static struct hlist_head *unix_sockets_unbound(void *addr)
+static unsigned int unix_unbound_hash(struct sock *sk)
{
- unsigned long hash = (unsigned long)addr;
+ unsigned long hash = (unsigned long)sk;
hash ^= hash >> 16;
hash ^= hash >> 8;
- hash %= UNIX_HASH_SIZE;
- return &unix_socket_table[UNIX_HASH_SIZE + hash];
+ hash ^= sk->sk_type;
+
+ return UNIX_HASH_SIZE + (hash & (UNIX_HASH_SIZE - 1));
+}
+
+static unsigned int unix_bsd_hash(struct inode *i)
+{
+ return i->i_ino & (UNIX_HASH_SIZE - 1);
+}
+
+static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
+ int addr_len, int type)
+{
+ __wsum csum = csum_partial(sunaddr, addr_len, 0);
+ unsigned int hash;
+
+ hash = (__force unsigned int)csum_fold(csum);
+ hash ^= hash >> 8;
+ hash ^= type;
+
+ return hash & (UNIX_HASH_SIZE - 1);
}
-#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
+static void unix_table_double_lock(unsigned int hash1, unsigned int hash2)
+{
+ /* hash1 and hash2 is never the same because
+ * one is between 0 and UNIX_HASH_SIZE - 1, and
+ * another is between UNIX_HASH_SIZE and UNIX_HASH_SIZE * 2.
+ */
+ if (hash1 > hash2)
+ swap(hash1, hash2);
+
+ spin_lock(&unix_table_locks[hash1]);
+ spin_lock_nested(&unix_table_locks[hash2], SINGLE_DEPTH_NESTING);
+}
+
+static void unix_table_double_unlock(unsigned int hash1, unsigned int hash2)
+{
+ spin_unlock(&unix_table_locks[hash1]);
+ spin_unlock(&unix_table_locks[hash2]);
+}
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -164,20 +205,6 @@ static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
}
#endif /* CONFIG_SECURITY_NETWORK */
-/*
- * SMP locking strategy:
- * hash table is protected with spinlock unix_table_lock
- * each socket state is protected by separate spin lock.
- */
-
-static inline unsigned int unix_hash_fold(__wsum n)
-{
- unsigned int hash = (__force unsigned int)csum_fold(n);
-
- hash ^= hash>>8;
- return hash&(UNIX_HASH_SIZE-1);
-}
-
#define unix_peer(sk) (unix_sk(sk)->peer)
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
@@ -214,6 +241,22 @@ struct sock *unix_peer_get(struct sock *s)
}
EXPORT_SYMBOL_GPL(unix_peer_get);
+static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
+ int addr_len)
+{
+ struct unix_address *addr;
+
+ addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
+ if (!addr)
+ return NULL;
+
+ refcount_set(&addr->refcnt, 1);
+ addr->len = addr_len;
+ memcpy(addr->name, sunaddr, addr_len);
+
+ return addr;
+}
+
static inline void unix_release_addr(struct unix_address *addr)
{
if (refcount_dec_and_test(&addr->refcnt))
@@ -227,29 +270,29 @@ static inline void unix_release_addr(struct unix_address *addr)
* - if started by zero, it is abstract name.
*/
-static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
+static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
{
- *hashp = 0;
-
- if (len <= sizeof(short) || len > sizeof(*sunaddr))
+ if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
+ addr_len > sizeof(*sunaddr))
return -EINVAL;
- if (!sunaddr || sunaddr->sun_family != AF_UNIX)
+
+ if (sunaddr->sun_family != AF_UNIX)
return -EINVAL;
- if (sunaddr->sun_path[0]) {
- /*
- * This may look like an off by one error but it is a bit more
- * subtle. 108 is the longest valid AF_UNIX path for a binding.
- * sun_path[108] doesn't as such exist. However in kernel space
- * we are guaranteed that it is a valid memory location in our
- * kernel address buffer.
- */
- ((char *)sunaddr)[len] = 0;
- len = strlen(sunaddr->sun_path)+1+sizeof(short);
- return len;
- }
- *hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
- return len;
+ return 0;
+}
+
+static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
+{
+ /* This may look like an off by one error but it is a bit more
+ * subtle. 108 is the longest valid AF_UNIX path for a binding.
+ * sun_path[108] doesn't as such exist. However in kernel space
+ * we are guaranteed that it is a valid memory location in our
+ * kernel address buffer because syscall functions always pass
+ * a pointer of struct sockaddr_storage which has a bigger buffer
+ * than 108.
+ */
+ ((char *)sunaddr)[addr_len] = 0;
}
static void __unix_remove_socket(struct sock *sk)
@@ -257,32 +300,34 @@ static void __unix_remove_socket(struct sock *sk)
sk_del_node_init(sk);
}
-static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
+static void __unix_insert_socket(struct sock *sk)
{
WARN_ON(!sk_unhashed(sk));
- sk_add_node(sk, list);
+ sk_add_node(sk, &unix_socket_table[sk->sk_hash]);
}
-static void __unix_set_addr(struct sock *sk, struct unix_address *addr,
- unsigned hash)
+static void __unix_set_addr_hash(struct sock *sk, struct unix_address *addr,
+ unsigned int hash)
{
__unix_remove_socket(sk);
smp_store_release(&unix_sk(sk)->addr, addr);
- __unix_insert_socket(&unix_socket_table[hash], sk);
+
+ sk->sk_hash = hash;
+ __unix_insert_socket(sk);
}
-static inline void unix_remove_socket(struct sock *sk)
+static void unix_remove_socket(struct sock *sk)
{
- spin_lock(&unix_table_lock);
+ spin_lock(&unix_table_locks[sk->sk_hash]);
__unix_remove_socket(sk);
- spin_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_locks[sk->sk_hash]);
}
-static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
+static void unix_insert_unbound_socket(struct sock *sk)
{
- spin_lock(&unix_table_lock);
- __unix_insert_socket(list, sk);
- spin_unlock(&unix_table_lock);
+ spin_lock(&unix_table_locks[sk->sk_hash]);
+ __unix_insert_socket(sk);
+ spin_unlock(&unix_table_locks[sk->sk_hash]);
}
static struct sock *__unix_find_socket_byname(struct net *net,
@@ -310,32 +355,31 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
{
struct sock *s;
- spin_lock(&unix_table_lock);
+ spin_lock(&unix_table_locks[hash]);
s = __unix_find_socket_byname(net, sunname, len, hash);
if (s)
sock_hold(s);
- spin_unlock(&unix_table_lock);
+ spin_unlock(&unix_table_locks[hash]);
return s;
}
static struct sock *unix_find_socket_byinode(struct inode *i)
{
+ unsigned int hash = unix_bsd_hash(i);
struct sock *s;
- spin_lock(&unix_table_lock);
- sk_for_each(s,
- &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
+ spin_lock(&unix_table_locks[hash]);
+ sk_for_each(s, &unix_socket_table[hash]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
if (dentry && d_backing_inode(dentry) == i) {
sock_hold(s);
- goto found;
+ spin_unlock(&unix_table_locks[hash]);
+ return s;
}
}
- s = NULL;
-found:
- spin_unlock(&unix_table_lock);
- return s;
+ spin_unlock(&unix_table_locks[hash]);
+ return NULL;
}
/* Support code for asymmetrically connected dgram sockets
@@ -522,9 +566,7 @@ static void unix_sock_destructor(struct sock *sk)
unix_release_addr(u->addr);
atomic_long_dec(&unix_nr_socks);
- local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
@@ -872,6 +914,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sock_init_data(sock, sk);
+ sk->sk_hash = unix_unbound_hash(sk);
sk->sk_allocation = GFP_KERNEL_ACCOUNT;
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
@@ -887,11 +930,9 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
init_waitqueue_head(&u->peer_wait);
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
memset(&u->scm_stat, 0, sizeof(struct scm_stat));
- unix_insert_socket(unix_sockets_unbound(sk), sk);
+ unix_insert_unbound_socket(sk);
- local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- local_bh_enable();
return sk;
@@ -952,15 +993,90 @@ static int unix_release(struct socket *sock)
return 0;
}
-static int unix_autobind(struct socket *sock)
+static struct sock *unix_find_bsd(struct net *net, struct sockaddr_un *sunaddr,
+ int addr_len, int type)
{
- struct sock *sk = sock->sk;
- struct net *net = sock_net(sk);
+ struct inode *inode;
+ struct path path;
+ struct sock *sk;
+ int err;
+
+ unix_mkname_bsd(sunaddr, addr_len);
+ err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
+ if (err)
+ goto fail;
+
+ err = path_permission(&path, MAY_WRITE);
+ if (err)
+ goto path_put;
+
+ err = -ECONNREFUSED;
+ inode = d_backing_inode(path.dentry);
+ if (!S_ISSOCK(inode->i_mode))
+ goto path_put;
+
+ sk = unix_find_socket_byinode(inode);
+ if (!sk)
+ goto path_put;
+
+ err = -EPROTOTYPE;
+ if (sk->sk_type == type)
+ touch_atime(&path);
+ else
+ goto sock_put;
+
+ path_put(&path);
+
+ return sk;
+
+sock_put:
+ sock_put(sk);
+path_put:
+ path_put(&path);
+fail:
+ return ERR_PTR(err);
+}
+
+static struct sock *unix_find_abstract(struct net *net,
+ struct sockaddr_un *sunaddr,
+ int addr_len, int type)
+{
+ unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
+ struct dentry *dentry;
+ struct sock *sk;
+
+ sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
+ if (!sk)
+ return ERR_PTR(-ECONNREFUSED);
+
+ dentry = unix_sk(sk)->path.dentry;
+ if (dentry)
+ touch_atime(&unix_sk(sk)->path);
+
+ return sk;
+}
+
+static struct sock *unix_find_other(struct net *net,
+ struct sockaddr_un *sunaddr,
+ int addr_len, int type)
+{
+ struct sock *sk;
+
+ if (sunaddr->sun_path[0])
+ sk = unix_find_bsd(net, sunaddr, addr_len, type);
+ else
+ sk = unix_find_abstract(net, sunaddr, addr_len, type);
+
+ return sk;
+}
+
+static int unix_autobind(struct sock *sk)
+{
+ unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
- static u32 ordernum = 1;
struct unix_address *addr;
+ u32 lastnum, ordernum;
int err;
- unsigned int retries = 0;
err = mutex_lock_interruptible(&u->bindlock);
if (err)
@@ -970,141 +1086,103 @@ static int unix_autobind(struct socket *sock)
goto out;
err = -ENOMEM;
- addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
+ addr = kzalloc(sizeof(*addr) +
+ offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
if (!addr)
goto out;
+ addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
addr->name->sun_family = AF_UNIX;
refcount_set(&addr->refcnt, 1);
+ ordernum = prandom_u32();
+ lastnum = ordernum & 0xFFFFF;
retry:
- addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
- addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
- addr->hash ^= sk->sk_type;
+ ordernum = (ordernum + 1) & 0xFFFFF;
+ sprintf(addr->name->sun_path + 1, "%05x", ordernum);
- spin_lock(&unix_table_lock);
- ordernum = (ordernum+1)&0xFFFFF;
+ new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
+ unix_table_double_lock(old_hash, new_hash);
- if (__unix_find_socket_byname(net, addr->name, addr->len, addr->hash)) {
- spin_unlock(&unix_table_lock);
- /*
- * __unix_find_socket_byname() may take long time if many names
+ if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
+ new_hash)) {
+ unix_table_double_unlock(old_hash, new_hash);
+
+ /* __unix_find_socket_byname() may take long time if many names
* are already in use.
*/
cond_resched();
- /* Give up if all names seems to be in use. */
- if (retries++ == 0xFFFFF) {
+
+ if (ordernum == lastnum) {
+ /* Give up if all names seems to be in use. */
err = -ENOSPC;
- kfree(addr);
+ unix_release_addr(addr);
goto out;
}
+
goto retry;
}
- __unix_set_addr(sk, addr, addr->hash);
- spin_unlock(&unix_table_lock);
+ __unix_set_addr_hash(sk, addr, new_hash);
+ unix_table_double_unlock(old_hash, new_hash);
err = 0;
out: mutex_unlock(&u->bindlock);
return err;
}
-static struct sock *unix_find_other(struct net *net,
- struct sockaddr_un *sunname, int len,
- int type, unsigned int hash, int *error)
-{
- struct sock *u;
- struct path path;
- int err = 0;
-
- if (sunname->sun_path[0]) {
- struct inode *inode;
- err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
- if (err)
- goto fail;
- inode = d_backing_inode(path.dentry);
- err = path_permission(&path, MAY_WRITE);
- if (err)
- goto put_fail;
-
- err = -ECONNREFUSED;
- if (!S_ISSOCK(inode->i_mode))
- goto put_fail;
- u = unix_find_socket_byinode(inode);
- if (!u)
- goto put_fail;
-
- if (u->sk_type == type)
- touch_atime(&path);
-
- path_put(&path);
-
- err = -EPROTOTYPE;
- if (u->sk_type != type) {
- sock_put(u);
- goto fail;
- }
- } else {
- err = -ECONNREFUSED;
- u = unix_find_socket_byname(net, sunname, len, type ^ hash);
- if (u) {
- struct dentry *dentry;
- dentry = unix_sk(u)->path.dentry;
- if (dentry)
- touch_atime(&unix_sk(u)->path);
- } else
- goto fail;
- }
- return u;
-
-put_fail:
- path_put(&path);
-fail:
- *error = err;
- return NULL;
-}
-
-static int unix_bind_bsd(struct sock *sk, struct unix_address *addr)
+static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ int addr_len)
{
- struct unix_sock *u = unix_sk(sk);
umode_t mode = S_IFSOCK |
(SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
+ unsigned int new_hash, old_hash = sk->sk_hash;
+ struct unix_sock *u = unix_sk(sk);
struct user_namespace *ns; // barf...
- struct path parent;
+ struct unix_address *addr;
struct dentry *dentry;
- unsigned int hash;
+ struct path parent;
int err;
+ unix_mkname_bsd(sunaddr, addr_len);
+ addr_len = strlen(sunaddr->sun_path) +
+ offsetof(struct sockaddr_un, sun_path) + 1;
+
+ addr = unix_create_addr(sunaddr, addr_len);
+ if (!addr)
+ return -ENOMEM;
+
/*
* Get the parent directory, calculate the hash for last
* component.
*/
dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- ns = mnt_user_ns(parent.mnt);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
/*
* All right, let's create it.
*/
+ ns = mnt_user_ns(parent.mnt);
err = security_path_mknod(&parent, dentry, mode, 0);
if (!err)
err = vfs_mknod(ns, d_inode(parent.dentry), dentry, mode, 0);
if (err)
- goto out;
+ goto out_path;
err = mutex_lock_interruptible(&u->bindlock);
if (err)
goto out_unlink;
if (u->addr)
goto out_unlock;
- addr->hash = UNIX_HASH_SIZE;
- hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
- spin_lock(&unix_table_lock);
+ new_hash = unix_bsd_hash(d_backing_inode(dentry));
+ unix_table_double_lock(old_hash, new_hash);
u->path.mnt = mntget(parent.mnt);
u->path.dentry = dget(dentry);
- __unix_set_addr(sk, addr, hash);
- spin_unlock(&unix_table_lock);
+ __unix_set_addr_hash(sk, addr, new_hash);
+ unix_table_double_unlock(old_hash, new_hash);
mutex_unlock(&u->bindlock);
done_path_create(&parent, dentry);
return 0;
@@ -1115,74 +1193,76 @@ out_unlock:
out_unlink:
/* failed after successful mknod? unlink what we'd created... */
vfs_unlink(ns, d_inode(parent.dentry), dentry, NULL);
-out:
+out_path:
done_path_create(&parent, dentry);
- return err;
+out:
+ unix_release_addr(addr);
+ return err == -EEXIST ? -EADDRINUSE : err;
}
-static int unix_bind_abstract(struct sock *sk, struct unix_address *addr)
+static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+ int addr_len)
{
+ unsigned int new_hash, old_hash = sk->sk_hash;
struct unix_sock *u = unix_sk(sk);
+ struct unix_address *addr;
int err;
+ addr = unix_create_addr(sunaddr, addr_len);
+ if (!addr)
+ return -ENOMEM;
+
err = mutex_lock_interruptible(&u->bindlock);
if (err)
- return err;
+ goto out;
if (u->addr) {
- mutex_unlock(&u->bindlock);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_mutex;
}
- spin_lock(&unix_table_lock);
+ new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
+ unix_table_double_lock(old_hash, new_hash);
+
if (__unix_find_socket_byname(sock_net(sk), addr->name, addr->len,
- addr->hash)) {
- spin_unlock(&unix_table_lock);
- mutex_unlock(&u->bindlock);
- return -EADDRINUSE;
- }
- __unix_set_addr(sk, addr, addr->hash);
- spin_unlock(&unix_table_lock);
+ new_hash))
+ goto out_spin;
+
+ __unix_set_addr_hash(sk, addr, new_hash);
+ unix_table_double_unlock(old_hash, new_hash);
mutex_unlock(&u->bindlock);
return 0;
+
+out_spin:
+ unix_table_double_unlock(old_hash, new_hash);
+ err = -EADDRINUSE;
+out_mutex:
+ mutex_unlock(&u->bindlock);
+out:
+ unix_release_addr(addr);
+ return err;
}
static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- struct sock *sk = sock->sk;
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
- char *sun_path = sunaddr->sun_path;
+ struct sock *sk = sock->sk;
int err;
- unsigned int hash;
- struct unix_address *addr;
-
- if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
- sunaddr->sun_family != AF_UNIX)
- return -EINVAL;
- if (addr_len == sizeof(short))
- return unix_autobind(sock);
+ if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
+ sunaddr->sun_family == AF_UNIX)
+ return unix_autobind(sk);
- err = unix_mkname(sunaddr, addr_len, &hash);
- if (err < 0)
+ err = unix_validate_addr(sunaddr, addr_len);
+ if (err)
return err;
- addr_len = err;
- addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
- memcpy(addr->name, sunaddr, addr_len);
- addr->len = addr_len;
- addr->hash = hash ^ sk->sk_type;
- refcount_set(&addr->refcnt, 1);
-
- if (sun_path[0])
- err = unix_bind_bsd(sk, addr);
+ if (sunaddr->sun_path[0])
+ err = unix_bind_bsd(sk, sunaddr, addr_len);
else
- err = unix_bind_abstract(sk, addr);
- if (err)
- unix_release_addr(addr);
- return err == -EEXIST ? -EADDRINUSE : err;
+ err = unix_bind_abstract(sk, sunaddr, addr_len);
+
+ return err;
}
static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
@@ -1217,7 +1297,6 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
struct net *net = sock_net(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
struct sock *other;
- unsigned int hash;
int err;
err = -EINVAL;
@@ -1225,19 +1304,23 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
goto out;
if (addr->sa_family != AF_UNSPEC) {
- err = unix_mkname(sunaddr, alen, &hash);
- if (err < 0)
+ err = unix_validate_addr(sunaddr, alen);
+ if (err)
goto out;
- alen = err;
if (test_bit(SOCK_PASSCRED, &sock->flags) &&
- !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
- goto out;
+ !unix_sk(sk)->addr) {
+ err = unix_autobind(sk);
+ if (err)
+ goto out;
+ }
restart:
- other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
- if (!other)
+ other = unix_find_other(net, sunaddr, alen, sock->type);
+ if (IS_ERR(other)) {
+ err = PTR_ERR(other);
goto out;
+ }
unix_state_double_lock(sk, other);
@@ -1327,19 +1410,19 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
struct sock *newsk = NULL;
struct sock *other = NULL;
struct sk_buff *skb = NULL;
- unsigned int hash;
int st;
int err;
long timeo;
- err = unix_mkname(sunaddr, addr_len, &hash);
- if (err < 0)
+ err = unix_validate_addr(sunaddr, addr_len);
+ if (err)
goto out;
- addr_len = err;
- if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
- (err = unix_autobind(sock)) != 0)
- goto out;
+ if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
+ err = unix_autobind(sk);
+ if (err)
+ goto out;
+ }
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
@@ -1365,9 +1448,12 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
restart:
/* Find listening sock. */
- other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
- if (!other)
+ other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
+ if (IS_ERR(other)) {
+ err = PTR_ERR(other);
+ other = NULL;
goto out;
+ }
/* Latch state of peer */
unix_state_lock(other);
@@ -1455,9 +1541,9 @@ restart:
*
* The contents of *(otheru->addr) and otheru->path
* are seen fully set up here, since we have found
- * otheru in hash under unix_table_lock. Insertion
+ * otheru in hash under unix_table_locks. Insertion
* into the hash chain we'd found it in had been done
- * in an earlier critical area protected by unix_table_lock,
+ * in an earlier critical area protected by unix_table_locks,
* the same one where we'd set *(otheru->addr) contents,
* as well as otheru->path and otheru->addr itself.
*
@@ -1604,7 +1690,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
if (!addr) {
sunaddr->sun_family = AF_UNIX;
sunaddr->sun_path[0] = 0;
- err = sizeof(short);
+ err = offsetof(struct sockaddr_un, sun_path);
} else {
err = addr->len;
memcpy(sunaddr, addr->name, addr->len);
@@ -1760,9 +1846,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
struct unix_sock *u = unix_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
struct sock *other = NULL;
- int namelen = 0; /* fake GCC */
int err;
- unsigned int hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie scm;
@@ -1779,10 +1863,9 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
goto out;
if (msg->msg_namelen) {
- err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
- if (err < 0)
+ err = unix_validate_addr(sunaddr, msg->msg_namelen);
+ if (err)
goto out;
- namelen = err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
@@ -1791,9 +1874,11 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
goto out;
}
- if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
- && (err = unix_autobind(sock)) != 0)
- goto out;
+ if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
+ err = unix_autobind(sk);
+ if (err)
+ goto out;
+ }
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
@@ -1833,10 +1918,13 @@ restart:
if (sunaddr == NULL)
goto out_free;
- other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
- hash, &err);
- if (other == NULL)
+ other = unix_find_other(net, sunaddr, msg->msg_namelen,
+ sk->sk_type);
+ if (IS_ERR(other)) {
+ err = PTR_ERR(other);
+ other = NULL;
goto out_free;
+ }
}
if (sk_filter(other, skb) < 0) {
@@ -3132,7 +3220,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
#define get_bucket(x) ((x) >> BUCKET_SPACE)
-#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
+#define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
@@ -3156,7 +3244,7 @@ static struct sock *unix_next_socket(struct seq_file *seq,
struct sock *sk,
loff_t *pos)
{
- unsigned long bucket;
+ unsigned long bucket = get_bucket(*pos);
while (sk > (struct sock *)SEQ_START_TOKEN) {
sk = sk_next(sk);
@@ -3167,12 +3255,13 @@ static struct sock *unix_next_socket(struct seq_file *seq,
}
do {
+ spin_lock(&unix_table_locks[bucket]);
sk = unix_from_bucket(seq, pos);
if (sk)
return sk;
next_bucket:
- bucket = get_bucket(*pos) + 1;
+ spin_unlock(&unix_table_locks[bucket++]);
*pos = set_bucket_offset(bucket, 1);
} while (bucket < ARRAY_SIZE(unix_socket_table));
@@ -3180,10 +3269,7 @@ next_bucket:
}
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
- __acquires(unix_table_lock)
{
- spin_lock(&unix_table_lock);
-
if (!*pos)
return SEQ_START_TOKEN;
@@ -3200,9 +3286,11 @@ static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void unix_seq_stop(struct seq_file *seq, void *v)
- __releases(unix_table_lock)
{
- spin_unlock(&unix_table_lock);
+ struct sock *sk = v;
+
+ if (sk)
+ spin_unlock(&unix_table_locks[sk->sk_hash]);
}
static int unix_seq_show(struct seq_file *seq, void *v)
@@ -3227,15 +3315,16 @@ static int unix_seq_show(struct seq_file *seq, void *v)
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
- if (u->addr) { // under unix_table_lock here
+ if (u->addr) { // under unix_table_locks here
int i, len;
seq_putc(seq, ' ');
i = 0;
- len = u->addr->len - sizeof(short);
- if (!UNIX_ABSTRACT(s))
+ len = u->addr->len -
+ offsetof(struct sockaddr_un, sun_path);
+ if (u->addr->name->sun_path[0]) {
len--;
- else {
+ } else {
seq_putc(seq, '@');
i++;
}
@@ -3385,10 +3474,13 @@ static void __init bpf_iter_register(void)
static int __init af_unix_init(void)
{
- int rc = -1;
+ int i, rc = -1;
BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
+ for (i = 0; i < 2 * UNIX_HASH_SIZE; i++)
+ spin_lock_init(&unix_table_locks[i]);
+
rc = proto_register(&unix_dgram_proto, 1);
if (rc != 0) {
pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 7e7d7f45685a..bb0b5ea1655f 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -13,13 +13,14 @@
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
- /* might or might not have unix_table_lock */
+ /* might or might not have unix_table_locks */
struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
if (!addr)
return 0;
- return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
+ return nla_put(nlskb, UNIX_DIAG_NAME,
+ addr->len - offsetof(struct sockaddr_un, sun_path),
addr->name->sun_path);
}
@@ -203,13 +204,13 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
s_slot = cb->args[0];
num = s_num = cb->args[1];
- spin_lock(&unix_table_lock);
for (slot = s_slot;
slot < ARRAY_SIZE(unix_socket_table);
s_num = 0, slot++) {
struct sock *sk;
num = 0;
+ spin_lock(&unix_table_locks[slot]);
sk_for_each(sk, &unix_socket_table[slot]) {
if (!net_eq(sock_net(sk), net))
continue;
@@ -220,14 +221,16 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (sk_diag_dump(sk, skb, req,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- NLM_F_MULTI) < 0)
+ NLM_F_MULTI) < 0) {
+ spin_unlock(&unix_table_locks[slot]);
goto done;
+ }
next:
num++;
}
+ spin_unlock(&unix_table_locks[slot]);
}
done:
- spin_unlock(&unix_table_lock);
cb->args[0] = slot;
cb->args[1] = num;
@@ -236,21 +239,19 @@ done:
static struct sock *unix_lookup_by_ino(unsigned int ino)
{
- int i;
struct sock *sk;
+ int i;
- spin_lock(&unix_table_lock);
for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
+ spin_lock(&unix_table_locks[i]);
sk_for_each(sk, &unix_socket_table[i])
if (ino == sock_i_ino(sk)) {
sock_hold(sk);
- spin_unlock(&unix_table_lock);
-
+ spin_unlock(&unix_table_locks[i]);
return sk;
}
+ spin_unlock(&unix_table_locks[i]);
}
-
- spin_unlock(&unix_table_lock);
return NULL;
}
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index c09bea89151b..01d44e2598e2 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -30,10 +30,6 @@ int __net_init unix_sysctl_register(struct net *net)
if (table == NULL)
goto err_alloc;
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
-
table[0].data = &net->unx.sysctl_max_dgram_qlen;
net->unx.ctl = register_net_sysctl(net, "net/unix", table);
if (net->unx.ctl == NULL)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ed0df839c38c..3235261f138d 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -85,6 +85,7 @@
* TCP_LISTEN - listening
*/
+#include <linux/compat.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/cred.h>
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 19189cf30a72..e111e13b6660 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -225,14 +225,20 @@ static size_t hvs_channel_writable_bytes(struct vmbus_channel *chan)
return round_down(ret, 8);
}
+static int __hvs_send_data(struct vmbus_channel *chan,
+ struct vmpipe_proto_header *hdr,
+ size_t to_write)
+{
+ hdr->pkt_type = 1;
+ hdr->data_size = to_write;
+ return vmbus_sendpacket(chan, hdr, sizeof(*hdr) + to_write,
+ 0, VM_PKT_DATA_INBAND, 0);
+}
+
static int hvs_send_data(struct vmbus_channel *chan,
struct hvs_send_buf *send_buf, size_t to_write)
{
- send_buf->hdr.pkt_type = 1;
- send_buf->hdr.data_size = to_write;
- return vmbus_sendpacket(chan, &send_buf->hdr,
- sizeof(send_buf->hdr) + to_write,
- 0, VM_PKT_DATA_INBAND, 0);
+ return __hvs_send_data(chan, &send_buf->hdr, to_write);
}
static void hvs_channel_cb(void *ctx)
@@ -468,7 +474,7 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
return;
/* It can't fail: see hvs_channel_writable_bytes(). */
- (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
+ (void)__hvs_send_data(hvs->chan, &hdr, 0);
hvs->fin_sent = true;
}
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 869c43d4414c..eb822052d344 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -245,19 +245,7 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
oper_freq - MHZ_TO_KHZ(oper_width) / 2)
return false;
break;
- case NL80211_CHAN_WIDTH_40:
- if (chandef->center_freq1 != control_freq + 10 &&
- chandef->center_freq1 != control_freq - 10)
- return false;
- if (chandef->center_freq2)
- return false;
- break;
case NL80211_CHAN_WIDTH_80P80:
- if (chandef->center_freq1 != control_freq + 30 &&
- chandef->center_freq1 != control_freq + 10 &&
- chandef->center_freq1 != control_freq - 10 &&
- chandef->center_freq1 != control_freq - 30)
- return false;
if (!chandef->center_freq2)
return false;
/* adjacent is not allowed -- that's a 160 MHz channel */
@@ -265,28 +253,42 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
chandef->center_freq2 - chandef->center_freq1 == 80)
return false;
break;
- case NL80211_CHAN_WIDTH_80:
- if (chandef->center_freq1 != control_freq + 30 &&
- chandef->center_freq1 != control_freq + 10 &&
- chandef->center_freq1 != control_freq - 10 &&
- chandef->center_freq1 != control_freq - 30)
- return false;
+ default:
if (chandef->center_freq2)
return false;
break;
- case NL80211_CHAN_WIDTH_160:
- if (chandef->center_freq1 != control_freq + 70 &&
- chandef->center_freq1 != control_freq + 50 &&
- chandef->center_freq1 != control_freq + 30 &&
- chandef->center_freq1 != control_freq + 10 &&
- chandef->center_freq1 != control_freq - 10 &&
- chandef->center_freq1 != control_freq - 30 &&
- chandef->center_freq1 != control_freq - 50 &&
- chandef->center_freq1 != control_freq - 70)
- return false;
- if (chandef->center_freq2)
- return false;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_1:
+ case NL80211_CHAN_WIDTH_2:
+ case NL80211_CHAN_WIDTH_4:
+ case NL80211_CHAN_WIDTH_8:
+ case NL80211_CHAN_WIDTH_16:
+ /* all checked above */
break;
+ case NL80211_CHAN_WIDTH_160:
+ if (chandef->center_freq1 == control_freq + 70 ||
+ chandef->center_freq1 == control_freq + 50 ||
+ chandef->center_freq1 == control_freq - 50 ||
+ chandef->center_freq1 == control_freq - 70)
+ break;
+ fallthrough;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_80:
+ if (chandef->center_freq1 == control_freq + 30 ||
+ chandef->center_freq1 == control_freq - 30)
+ break;
+ fallthrough;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef->center_freq1 == control_freq + 10 ||
+ chandef->center_freq1 == control_freq - 10)
+ break;
+ fallthrough;
default:
return false;
}
@@ -712,6 +714,19 @@ static bool cfg80211_is_wiphy_oper_chan(struct wiphy *wiphy,
return false;
}
+static bool
+cfg80211_offchan_chain_is_active(struct cfg80211_registered_device *rdev,
+ struct ieee80211_channel *channel)
+{
+ if (!rdev->background_radar_wdev)
+ return false;
+
+ if (!cfg80211_chandef_valid(&rdev->background_radar_chandef))
+ return false;
+
+ return cfg80211_is_sub_chan(&rdev->background_radar_chandef, channel);
+}
+
bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
struct ieee80211_channel *chan)
{
@@ -728,6 +743,9 @@ bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
if (cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan))
return true;
+
+ if (cfg80211_offchan_chain_is_active(rdev, chan))
+ return true;
}
return false;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index eb297e1015e0..3a54c8e6b6c6 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -545,6 +545,10 @@ use_default_name:
INIT_WORK(&rdev->rfkill_block, cfg80211_rfkill_block_work);
INIT_WORK(&rdev->conn_work, cfg80211_conn_work);
INIT_WORK(&rdev->event_work, cfg80211_event_work);
+ INIT_WORK(&rdev->background_cac_abort_wk,
+ cfg80211_background_cac_abort_wk);
+ INIT_DELAYED_WORK(&rdev->background_cac_done_wk,
+ cfg80211_background_cac_done_wk);
init_waitqueue_head(&rdev->dev_wait);
@@ -733,6 +737,7 @@ int wiphy_register(struct wiphy *wiphy)
if (wiphy->interface_modes & ~(BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
@@ -1054,11 +1059,13 @@ void wiphy_unregister(struct wiphy *wiphy)
cancel_work_sync(&rdev->conn_work);
flush_work(&rdev->event_work);
cancel_delayed_work_sync(&rdev->dfs_update_channels_wk);
+ cancel_delayed_work_sync(&rdev->background_cac_done_wk);
flush_work(&rdev->destroy_work);
flush_work(&rdev->sched_scan_stop_wk);
flush_work(&rdev->propagate_radar_detect_wk);
flush_work(&rdev->propagate_cac_done_wk);
flush_work(&rdev->mgmt_registrations_update_wk);
+ flush_work(&rdev->background_cac_abort_wk);
#ifdef CONFIG_PM
if (rdev->wiphy.wowlan_config && rdev->ops->set_wakeup)
@@ -1207,6 +1214,8 @@ void __cfg80211_leave(struct cfg80211_registered_device *rdev,
cfg80211_pmsr_wdev_down(wdev);
+ cfg80211_stop_background_radar_detection(wdev);
+
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
__cfg80211_leave_ibss(rdev, dev, true);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 1720abf36f92..3a7dbd63d8c6 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -84,6 +84,11 @@ struct cfg80211_registered_device {
struct delayed_work dfs_update_channels_wk;
+ struct wireless_dev *background_radar_wdev;
+ struct cfg80211_chan_def background_radar_chandef;
+ struct delayed_work background_cac_done_wk;
+ struct work_struct background_cac_abort_wk;
+
/* netlink port which started critical protocol (0 means not started) */
u32 crit_proto_nlportid;
@@ -491,6 +496,17 @@ cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
void cfg80211_sched_dfs_chan_update(struct cfg80211_registered_device *rdev);
+int
+cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct cfg80211_chan_def *chandef);
+
+void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev);
+
+void cfg80211_background_cac_done_wk(struct work_struct *work);
+
+void cfg80211_background_cac_abort_wk(struct work_struct *work);
+
bool cfg80211_any_wiphy_oper_chan(struct wiphy *wiphy,
struct ieee80211_channel *chan);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 783acd2c4211..c8155a483ec2 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -905,13 +905,13 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work)
}
-void cfg80211_radar_event(struct wiphy *wiphy,
- struct cfg80211_chan_def *chandef,
- gfp_t gfp)
+void __cfg80211_radar_event(struct wiphy *wiphy,
+ struct cfg80211_chan_def *chandef,
+ bool offchan, gfp_t gfp)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- trace_cfg80211_radar_event(wiphy, chandef);
+ trace_cfg80211_radar_event(wiphy, chandef, offchan);
/* only set the chandef supplied channel to unavailable, in
* case the radar is detected on only one of multiple channels
@@ -919,6 +919,9 @@ void cfg80211_radar_event(struct wiphy *wiphy,
*/
cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_UNAVAILABLE);
+ if (offchan)
+ queue_work(cfg80211_wq, &rdev->background_cac_abort_wk);
+
cfg80211_sched_dfs_chan_update(rdev);
nl80211_radar_notify(rdev, chandef, NL80211_RADAR_DETECTED, NULL, gfp);
@@ -926,7 +929,7 @@ void cfg80211_radar_event(struct wiphy *wiphy,
memcpy(&rdev->radar_chandef, chandef, sizeof(struct cfg80211_chan_def));
queue_work(cfg80211_wq, &rdev->propagate_radar_detect_wk);
}
-EXPORT_SYMBOL(cfg80211_radar_event);
+EXPORT_SYMBOL(__cfg80211_radar_event);
void cfg80211_cac_event(struct net_device *netdev,
const struct cfg80211_chan_def *chandef,
@@ -970,3 +973,143 @@ void cfg80211_cac_event(struct net_device *netdev,
nl80211_radar_notify(rdev, chandef, event, netdev, gfp);
}
EXPORT_SYMBOL(cfg80211_cac_event);
+
+static void
+__cfg80211_background_cac_event(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_radar_event event)
+{
+ struct wiphy *wiphy = &rdev->wiphy;
+ struct net_device *netdev;
+
+ lockdep_assert_wiphy(&rdev->wiphy);
+
+ if (!cfg80211_chandef_valid(chandef))
+ return;
+
+ if (!rdev->background_radar_wdev)
+ return;
+
+ switch (event) {
+ case NL80211_RADAR_CAC_FINISHED:
+ cfg80211_set_dfs_state(wiphy, chandef, NL80211_DFS_AVAILABLE);
+ memcpy(&rdev->cac_done_chandef, chandef, sizeof(*chandef));
+ queue_work(cfg80211_wq, &rdev->propagate_cac_done_wk);
+ cfg80211_sched_dfs_chan_update(rdev);
+ wdev = rdev->background_radar_wdev;
+ break;
+ case NL80211_RADAR_CAC_ABORTED:
+ if (!cancel_delayed_work(&rdev->background_cac_done_wk))
+ return;
+ wdev = rdev->background_radar_wdev;
+ break;
+ case NL80211_RADAR_CAC_STARTED:
+ break;
+ default:
+ return;
+ }
+
+ netdev = wdev ? wdev->netdev : NULL;
+ nl80211_radar_notify(rdev, chandef, event, netdev, GFP_KERNEL);
+}
+
+static void
+cfg80211_background_cac_event(struct cfg80211_registered_device *rdev,
+ const struct cfg80211_chan_def *chandef,
+ enum nl80211_radar_event event)
+{
+ wiphy_lock(&rdev->wiphy);
+ __cfg80211_background_cac_event(rdev, rdev->background_radar_wdev,
+ chandef, event);
+ wiphy_unlock(&rdev->wiphy);
+}
+
+void cfg80211_background_cac_done_wk(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(delayed_work, struct cfg80211_registered_device,
+ background_cac_done_wk);
+ cfg80211_background_cac_event(rdev, &rdev->background_radar_chandef,
+ NL80211_RADAR_CAC_FINISHED);
+}
+
+void cfg80211_background_cac_abort_wk(struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(work, struct cfg80211_registered_device,
+ background_cac_abort_wk);
+ cfg80211_background_cac_event(rdev, &rdev->background_radar_chandef,
+ NL80211_RADAR_CAC_ABORTED);
+}
+
+void cfg80211_background_cac_abort(struct wiphy *wiphy)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+ queue_work(cfg80211_wq, &rdev->background_cac_abort_wk);
+}
+EXPORT_SYMBOL(cfg80211_background_cac_abort);
+
+int
+cfg80211_start_background_radar_detection(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct cfg80211_chan_def *chandef)
+{
+ unsigned int cac_time_ms;
+ int err;
+
+ lockdep_assert_wiphy(&rdev->wiphy);
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND))
+ return -EOPNOTSUPP;
+
+ /* Offchannel chain already locked by another wdev */
+ if (rdev->background_radar_wdev && rdev->background_radar_wdev != wdev)
+ return -EBUSY;
+
+ /* CAC already in progress on the offchannel chain */
+ if (rdev->background_radar_wdev == wdev &&
+ delayed_work_pending(&rdev->background_cac_done_wk))
+ return -EBUSY;
+
+ err = rdev_set_radar_background(rdev, chandef);
+ if (err)
+ return err;
+
+ cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, chandef);
+ if (!cac_time_ms)
+ cac_time_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+
+ rdev->background_radar_chandef = *chandef;
+ rdev->background_radar_wdev = wdev; /* Get offchain ownership */
+
+ __cfg80211_background_cac_event(rdev, wdev, chandef,
+ NL80211_RADAR_CAC_STARTED);
+ queue_delayed_work(cfg80211_wq, &rdev->background_cac_done_wk,
+ msecs_to_jiffies(cac_time_ms));
+
+ return 0;
+}
+
+void cfg80211_stop_background_radar_detection(struct wireless_dev *wdev)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+ lockdep_assert_wiphy(wiphy);
+
+ if (wdev != rdev->background_radar_wdev)
+ return;
+
+ rdev_set_radar_background(rdev, NULL);
+ rdev->background_radar_wdev = NULL; /* Release offchain ownership */
+
+ __cfg80211_background_cac_event(rdev, wdev,
+ &rdev->background_radar_chandef,
+ NL80211_RADAR_CAC_ABORTED);
+}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a27b3b5fa210..578bff9c378b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -776,6 +776,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MBSSID_CONFIG] =
NLA_POLICY_NESTED(nl80211_mbssid_config_policy),
[NL80211_ATTR_MBSSID_ELEMS] = { .type = NLA_NESTED },
+ [NL80211_ATTR_RADAR_BACKGROUND] = { .type = NLA_FLAG },
+ [NL80211_ATTR_AP_SETTINGS_FLAGS] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -3669,14 +3671,16 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_ADHOC: {
- const u8 *ssid_ie;
+ const struct element *ssid_elem;
+
if (!wdev->current_bss)
break;
rcu_read_lock();
- ssid_ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
- WLAN_EID_SSID);
- if (ssid_ie &&
- nla_put(msg, NL80211_ATTR_SSID, ssid_ie[1], ssid_ie + 2))
+ ssid_elem = ieee80211_bss_get_elem(&wdev->current_bss->pub,
+ WLAN_EID_SSID);
+ if (ssid_elem &&
+ nla_put(msg, NL80211_ATTR_SSID, ssid_elem->datalen,
+ ssid_elem->data))
goto nla_put_failure_rcu_locked;
rcu_read_unlock();
break;
@@ -5711,8 +5715,11 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
nl80211_calculate_ap_params(params);
- if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
- params->flags |= AP_SETTINGS_EXTERNAL_AUTH_SUPPORT;
+ if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS])
+ params->flags = nla_get_u32(
+ info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]);
+ else if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
+ params->flags |= NL80211_AP_SETTINGS_EXTERNAL_AUTH_SUPPORT;
wdev_lock(wdev);
err = rdev_start_ap(rdev, dev, params);
@@ -9274,38 +9281,60 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
struct cfg80211_chan_def chandef;
enum nl80211_dfs_regions dfs_region;
unsigned int cac_time_ms;
- int err;
+ int err = -EINVAL;
+
+ flush_delayed_work(&rdev->dfs_update_channels_wk);
+
+ wiphy_lock(wiphy);
dfs_region = reg_get_dfs_region(wiphy);
if (dfs_region == NL80211_DFS_UNSET)
- return -EINVAL;
+ goto unlock;
err = nl80211_parse_chandef(rdev, info, &chandef);
if (err)
- return err;
-
- if (netif_carrier_ok(dev))
- return -EBUSY;
-
- if (wdev->cac_started)
- return -EBUSY;
+ goto unlock;
err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype);
if (err < 0)
- return err;
+ goto unlock;
- if (err == 0)
- return -EINVAL;
+ if (err == 0) {
+ err = -EINVAL;
+ goto unlock;
+ }
- if (!cfg80211_chandef_dfs_usable(wiphy, &chandef))
- return -EINVAL;
+ if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) {
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND])) {
+ err = cfg80211_start_background_radar_detection(rdev, wdev,
+ &chandef);
+ goto unlock;
+ }
+
+ if (netif_carrier_ok(dev)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ if (wdev->cac_started) {
+ err = -EBUSY;
+ goto unlock;
+ }
/* CAC start is offloaded to HW and can't be started manually */
- if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD))
- return -EOPNOTSUPP;
+ if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
- if (!rdev->ops->start_radar_detection)
- return -EOPNOTSUPP;
+ if (!rdev->ops->start_radar_detection) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef);
if (WARN_ON(!cac_time_ms))
@@ -9318,6 +9347,9 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
wdev->cac_start_time = jiffies;
wdev->cac_time_ms = cac_time_ms;
}
+unlock:
+ wiphy_unlock(wiphy);
+
return err;
}
@@ -15954,7 +15986,8 @@ static const struct genl_small_ops nl80211_small_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nl80211_start_radar_detection,
.flags = GENL_UNS_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NO_WIPHY_MTX,
},
{
.cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
@@ -17035,6 +17068,44 @@ static void nl80211_send_remain_on_chan_event(
nlmsg_free(msg);
}
+void cfg80211_assoc_comeback(struct net_device *netdev,
+ struct cfg80211_bss *bss, u32 timeout)
+{
+ struct wireless_dev *wdev = netdev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ trace_cfg80211_assoc_comeback(wdev, bss->bssid, timeout);
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ASSOC_COMEBACK);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bss->bssid) ||
+ nla_put_u32(msg, NL80211_ATTR_TIMEOUT, timeout))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
+ NL80211_MCGRP_MLME, GFP_KERNEL);
+ return;
+
+ nla_put_failure:
+ nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_assoc_comeback);
+
void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
unsigned int duration, gfp_t gfp)
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index cc1efec4b27b..439bcf52369c 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -1395,4 +1395,21 @@ rdev_set_fils_aad(struct cfg80211_registered_device *rdev,
return ret;
}
+static inline int
+rdev_set_radar_background(struct cfg80211_registered_device *rdev,
+ struct cfg80211_chan_def *chandef)
+{
+ struct wiphy *wiphy = &rdev->wiphy;
+ int ret;
+
+ if (!rdev->ops->set_radar_background)
+ return -EOPNOTSUPP;
+
+ trace_rdev_set_radar_background(wiphy, chandef);
+ ret = rdev->ops->set_radar_background(wiphy, chandef);
+ trace_rdev_return_int(wiphy, ret);
+
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f8f01a3e020b..ec25924a1c26 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2371,6 +2371,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
switch (iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_MESH_POINT:
if (!wdev->beacon_interval)
goto wdev_inactive_unlock;
chandef = wdev->chandef;
@@ -2409,6 +2410,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
wiphy_lock(wiphy);
ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
wiphy_unlock(wiphy);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 22e92be61938..b888522f133b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -406,22 +406,20 @@ static int
cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
struct cfg80211_bss *nontrans_bss)
{
- const u8 *ssid;
- size_t ssid_len;
+ const struct element *ssid_elem;
struct cfg80211_bss *bss = NULL;
rcu_read_lock();
- ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
- if (!ssid) {
+ ssid_elem = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID);
+ if (!ssid_elem) {
rcu_read_unlock();
return -EINVAL;
}
- ssid_len = ssid[1];
- ssid = ssid + 2;
/* check if nontrans_bss is in the list */
list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
- if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len)) {
+ if (is_bss(bss, nontrans_bss->bssid, ssid_elem->data,
+ ssid_elem->datalen)) {
rcu_read_unlock();
return 0;
}
@@ -1795,33 +1793,52 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
}
int cfg80211_get_ies_channel_number(const u8 *ie, size_t ielen,
- enum nl80211_band band)
+ enum nl80211_band band,
+ enum cfg80211_bss_frame_type ftype)
{
- const u8 *tmp;
- int channel_number = -1;
+ const struct element *tmp;
+
+ if (band == NL80211_BAND_6GHZ) {
+ struct ieee80211_he_operation *he_oper;
- if (band == NL80211_BAND_S1GHZ) {
- tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen);
- if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) {
- struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2);
+ tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie,
+ ielen);
+ if (tmp && tmp->datalen >= sizeof(*he_oper) &&
+ tmp->datalen >= ieee80211_he_oper_size(&tmp->data[1])) {
+ const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
+
+ he_oper = (void *)&tmp->data[1];
+
+ he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper);
+ if (!he_6ghz_oper)
+ return -1;
- channel_number = s1gop->primary_ch;
+ if (ftype != CFG80211_BSS_FTYPE_BEACON ||
+ he_6ghz_oper->control & IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON)
+ return he_6ghz_oper->primary;
+ }
+ } else if (band == NL80211_BAND_S1GHZ) {
+ tmp = cfg80211_find_elem(WLAN_EID_S1G_OPERATION, ie, ielen);
+ if (tmp && tmp->datalen >= sizeof(struct ieee80211_s1g_oper_ie)) {
+ struct ieee80211_s1g_oper_ie *s1gop = (void *)tmp->data;
+
+ return s1gop->primary_ch;
}
} else {
- tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
- if (tmp && tmp[1] == 1) {
- channel_number = tmp[2];
- } else {
- tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen);
- if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) {
- struct ieee80211_ht_operation *htop = (void *)(tmp + 2);
+ tmp = cfg80211_find_elem(WLAN_EID_DS_PARAMS, ie, ielen);
+ if (tmp && tmp->datalen == 1)
+ return tmp->data[0];
- channel_number = htop->primary_chan;
- }
+ tmp = cfg80211_find_elem(WLAN_EID_HT_OPERATION, ie, ielen);
+ if (tmp &&
+ tmp->datalen >= sizeof(struct ieee80211_ht_operation)) {
+ struct ieee80211_ht_operation *htop = (void *)tmp->data;
+
+ return htop->primary_chan;
}
}
- return channel_number;
+ return -1;
}
EXPORT_SYMBOL(cfg80211_get_ies_channel_number);
@@ -1831,18 +1848,20 @@ EXPORT_SYMBOL(cfg80211_get_ies_channel_number);
* from neighboring channels and the Beacon frames use the DSSS Parameter Set
* element to indicate the current (transmitting) channel, but this might also
* be needed on other bands if RX frequency does not match with the actual
- * operating channel of a BSS.
+ * operating channel of a BSS, or if the AP reports a different primary channel.
*/
static struct ieee80211_channel *
cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
struct ieee80211_channel *channel,
- enum nl80211_bss_scan_width scan_width)
+ enum nl80211_bss_scan_width scan_width,
+ enum cfg80211_bss_frame_type ftype)
{
u32 freq;
int channel_number;
struct ieee80211_channel *alt_channel;
- channel_number = cfg80211_get_ies_channel_number(ie, ielen, channel->band);
+ channel_number = cfg80211_get_ies_channel_number(ie, ielen,
+ channel->band, ftype);
if (channel_number < 0) {
/* No channel information in frame payload */
@@ -1850,6 +1869,16 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
}
freq = ieee80211_channel_to_freq_khz(channel_number, channel->band);
+
+ /*
+ * In 6GHz, duplicated beacon indication is relevant for
+ * beacons only.
+ */
+ if (channel->band == NL80211_BAND_6GHZ &&
+ (freq == channel->center_freq ||
+ abs(freq - channel->center_freq) > 80))
+ return channel;
+
alt_channel = ieee80211_get_channel_khz(wiphy, freq);
if (!alt_channel) {
if (channel->band == NL80211_BAND_2GHZ) {
@@ -1911,7 +1940,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
return NULL;
channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
- data->scan_width);
+ data->scan_width, ftype);
if (!channel)
return NULL;
@@ -2234,7 +2263,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
struct ieee80211_mgmt *mgmt, size_t len)
{
u8 *ie, *new_ie, *pos;
- const u8 *nontrans_ssid, *trans_ssid, *mbssid;
+ const struct element *nontrans_ssid;
+ const u8 *trans_ssid, *mbssid;
size_t ielen = len - offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
size_t new_ie_len;
@@ -2261,11 +2291,11 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
return;
new_ie_len -= mbssid[1];
- nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
+ nontrans_ssid = ieee80211_bss_get_elem(nontrans_bss, WLAN_EID_SSID);
if (!nontrans_ssid)
return;
- new_ie_len += nontrans_ssid[1];
+ new_ie_len += nontrans_ssid->datalen;
/* generate new ie for nontrans BSS
* 1. replace SSID with nontrans BSS' SSID
@@ -2282,7 +2312,7 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
pos = new_ie;
/* copy the nontransmitted SSID */
- cpy_len = nontrans_ssid[1] + 2;
+ cpy_len = nontrans_ssid->datalen + 2;
memcpy(pos, nontrans_ssid, cpy_len);
pos += cpy_len;
/* copy the IEs between SSID and MBSSID */
@@ -2333,6 +2363,7 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
size_t ielen, min_hdr_len = offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
int bss_type;
+ enum cfg80211_bss_frame_type ftype;
BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
offsetof(struct ieee80211_mgmt, u.beacon.variable));
@@ -2369,8 +2400,16 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
variable = ext->u.s1g_beacon.variable;
}
+ if (ieee80211_is_beacon(mgmt->frame_control))
+ ftype = CFG80211_BSS_FTYPE_BEACON;
+ else if (ieee80211_is_probe_resp(mgmt->frame_control))
+ ftype = CFG80211_BSS_FTYPE_PRESP;
+ else
+ ftype = CFG80211_BSS_FTYPE_UNKNOWN;
+
channel = cfg80211_get_bss_channel(wiphy, variable,
- ielen, data->chan, data->scan_width);
+ ielen, data->chan, data->scan_width,
+ ftype);
if (!channel)
return NULL;
@@ -2687,7 +2726,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
struct cfg80211_registered_device *rdev;
struct wiphy *wiphy;
struct iw_scan_req *wreq = NULL;
- struct cfg80211_scan_request *creq = NULL;
+ struct cfg80211_scan_request *creq;
int i, err, n_channels = 0;
enum nl80211_band band;
@@ -2702,10 +2741,8 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (IS_ERR(rdev))
return PTR_ERR(rdev);
- if (rdev->scan_req || rdev->scan_msg) {
- err = -EBUSY;
- goto out;
- }
+ if (rdev->scan_req || rdev->scan_msg)
+ return -EBUSY;
wiphy = &rdev->wiphy;
@@ -2718,10 +2755,8 @@ int cfg80211_wext_siwscan(struct net_device *dev,
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
GFP_ATOMIC);
- if (!creq) {
- err = -ENOMEM;
- goto out;
- }
+ if (!creq)
+ return -ENOMEM;
creq->wiphy = wiphy;
creq->wdev = dev->ieee80211_ptr;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 08a70b4f090c..ff4d48fcbfb2 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -680,7 +680,9 @@ void __cfg80211_connect_result(struct net_device *dev,
bool wextev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- const u8 *country_ie;
+ const struct element *country_elem;
+ const u8 *country_data;
+ u8 country_datalen;
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
@@ -762,26 +764,22 @@ void __cfg80211_connect_result(struct net_device *dev,
cfg80211_upload_connect_keys(wdev);
rcu_read_lock();
- country_ie = ieee80211_bss_get_ie(cr->bss, WLAN_EID_COUNTRY);
- if (!country_ie) {
+ country_elem = ieee80211_bss_get_elem(cr->bss, WLAN_EID_COUNTRY);
+ if (!country_elem) {
rcu_read_unlock();
return;
}
- country_ie = kmemdup(country_ie, 2 + country_ie[1], GFP_ATOMIC);
+ country_datalen = country_elem->datalen;
+ country_data = kmemdup(country_elem->data, country_datalen, GFP_ATOMIC);
rcu_read_unlock();
- if (!country_ie)
+ if (!country_data)
return;
- /*
- * ieee80211_bss_get_ie() ensures we can access:
- * - country_ie + 2, the start of the country ie data, and
- * - and country_ie[1] which is the IE length
- */
regulatory_hint_country_ie(wdev->wiphy, cr->bss->channel->band,
- country_ie + 2, country_ie[1]);
- kfree(country_ie);
+ country_data, country_datalen);
+ kfree(country_data);
}
/* Consumes bss object one way or another */
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index ad6c16a06bcb..228079d7690a 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -3053,18 +3053,21 @@ TRACE_EVENT(cfg80211_ch_switch_started_notify,
);
TRACE_EVENT(cfg80211_radar_event,
- TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
- TP_ARGS(wiphy, chandef),
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
+ bool offchan),
+ TP_ARGS(wiphy, chandef, offchan),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_DEF_ENTRY
+ __field(bool, offchan)
),
TP_fast_assign(
WIPHY_ASSIGN;
CHAN_DEF_ASSIGN(chandef);
+ __entry->offchan = offchan;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
- WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+ TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", offchan %d",
+ WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->offchan)
);
TRACE_EVENT(cfg80211_cac_event,
@@ -3674,6 +3677,42 @@ TRACE_EVENT(cfg80211_bss_color_notify,
__entry->color_bitmap)
);
+TRACE_EVENT(rdev_set_radar_background,
+ TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef),
+
+ TP_ARGS(wiphy, chandef),
+
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ CHAN_DEF_ENTRY
+ ),
+
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ CHAN_DEF_ASSIGN(chandef)
+ ),
+
+ TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT,
+ WIPHY_PR_ARG, CHAN_DEF_PR_ARG)
+);
+
+TRACE_EVENT(cfg80211_assoc_comeback,
+ TP_PROTO(struct wireless_dev *wdev, const u8 *bssid, u32 timeout),
+ TP_ARGS(wdev, bssid, timeout),
+ TP_STRUCT__entry(
+ WDEV_ENTRY
+ MAC_ENTRY(bssid)
+ __field(u32, timeout)
+ ),
+ TP_fast_assign(
+ WDEV_ASSIGN;
+ MAC_ASSIGN(bssid, bssid);
+ __entry->timeout = timeout;
+ ),
+ TP_printk(WDEV_PR_FMT ", " MAC_PR_FMT ", timeout: %u TUs",
+ WDEV_PR_ARG, MAC_PR_ARG(bssid), __entry->timeout)
+);
+
#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 193a18a53142..cd09a9042261 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -212,18 +212,18 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
wdev_lock(wdev);
if (wdev->current_bss) {
- const u8 *ie;
+ const struct element *ssid_elem;
rcu_read_lock();
- ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
- WLAN_EID_SSID);
- if (ie) {
+ ssid_elem = ieee80211_bss_get_elem(&wdev->current_bss->pub,
+ WLAN_EID_SSID);
+ if (ssid_elem) {
data->flags = 1;
- data->length = ie[1];
+ data->length = ssid_elem->datalen;
if (data->length > IW_ESSID_MAX_SIZE)
ret = -EINVAL;
else
- memcpy(ssid, ie + 2, data->length);
+ memcpy(ssid, ssid_elem->data, data->length);
}
rcu_read_unlock();
} else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index e1c4197af468..b981a4828d08 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -41,7 +41,7 @@ static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
return 0;
}
- if (!more && x25->fraglen > 0) { /* End of fragment */
+ if (x25->fraglen > 0) { /* End of fragment */
int len = x25->fraglen + skb->len;
if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 7a466ea962c5..28ef3f4465ae 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -677,6 +677,8 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
struct xdp_sock *xs = xdp_sk(sk);
struct xsk_buff_pool *pool;
+ sock_poll_wait(file, sock, wait);
+
if (unlikely(!xsk_is_bound(xs)))
return mask;
@@ -688,8 +690,6 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
else
/* Poll needs to drive Tx also in copy mode */
__xsk_sendmsg(sk);
- } else {
- sock_poll_wait(file, sock, wait);
}
if (xs->rx && !xskq_prod_is_empty(xs->rx))
@@ -794,9 +794,7 @@ static int xsk_release(struct socket *sock)
sk_del_node_init_rcu(sk);
mutex_unlock(&net->xdp.lock);
- local_bh_disable();
sock_prot_inuse_add(net, sk->sk_prot, -1);
- local_bh_enable();
xsk_delete_from_maps(xs);
mutex_lock(&xs->mutex);
@@ -1396,9 +1394,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk_add_node_rcu(sk, &net->xdp.list);
mutex_unlock(&net->xdp.lock);
- local_bh_disable();
sock_prot_inuse_add(net, &xsk_proto, 1);
- local_bh_enable();
return 0;
}
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 2e48d0e094d9..65b53fb3de13 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -4,6 +4,7 @@
*/
#include <linux/bpf.h>
+#include <linux/filter.h>
#include <linux/capability.h>
#include <net/xdp_sock.h>
#include <linux/slab.h>
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 4dae3ab8d030..094734fbec96 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -341,6 +341,26 @@ static struct xfrm_algo_desc aalg_list[] = {
.pfkey_supported = 0,
},
+{
+ .name = "hmac(sm3)",
+ .compat = "sm3",
+
+ .uinfo = {
+ .auth = {
+ .icv_truncbits = 256,
+ .icv_fullbits = 256,
+ }
+ },
+
+ .pfkey_supported = 1,
+
+ .desc = {
+ .sadb_alg_id = SADB_X_AALG_SM3_256HMAC,
+ .sadb_alg_ivlen = 0,
+ .sadb_alg_minbits = 256,
+ .sadb_alg_maxbits = 256
+ }
+},
};
static struct xfrm_algo_desc ealg_list[] = {
@@ -552,6 +572,27 @@ static struct xfrm_algo_desc ealg_list[] = {
.sadb_alg_maxbits = 288
}
},
+{
+ .name = "cbc(sm4)",
+ .compat = "sm4",
+
+ .uinfo = {
+ .encr = {
+ .geniv = "echainiv",
+ .blockbits = 128,
+ .defkeybits = 128,
+ }
+ },
+
+ .pfkey_supported = 1,
+
+ .desc = {
+ .sadb_alg_id = SADB_X_EALG_SM4CBC,
+ .sadb_alg_ivlen = 16,
+ .sadb_alg_minbits = 128,
+ .sadb_alg_maxbits = 256
+ }
+},
};
static struct xfrm_algo_desc calg_list[] = {
diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
index 2bf269390163..a0f62fa02e06 100644
--- a/net/xfrm/xfrm_compat.c
+++ b/net/xfrm/xfrm_compat.c
@@ -127,6 +127,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
[XFRMA_SET_MARK] = { .type = NLA_U32 },
[XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
[XFRMA_IF_ID] = { .type = NLA_U32 },
+ [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
};
static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
@@ -274,9 +275,10 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
case XFRMA_SET_MARK:
case XFRMA_SET_MARK_MASK:
case XFRMA_IF_ID:
+ case XFRMA_MTIMER_THRESH:
return xfrm_nla_cpy(dst, src, nla_len(src));
default:
- BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
+ BUILD_BUG_ON(XFRMA_MAX != XFRMA_MTIMER_THRESH);
pr_warn_once("unsupported nla_type %d\n", src->nla_type);
return -EOPNOTSUPP;
}
@@ -431,7 +433,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
int err;
if (type > XFRMA_MAX) {
- BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
+ BUILD_BUG_ON(XFRMA_MAX != XFRMA_MTIMER_THRESH);
NL_SET_ERR_MSG(extack, "Bad attribute");
return -EOPNOTSUPP;
}
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index e843b0d9e2a6..3fa066419d37 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -259,6 +259,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
}
xso->dev = dev;
+ netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
xso->real_dev = dev;
xso->num_exthdrs = 1;
xso->flags = xuo->flags;
@@ -269,7 +270,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
xso->flags = 0;
xso->dev = NULL;
xso->real_dev = NULL;
- dev_put(dev);
+ dev_put_track(dev, &xso->dev_tracker);
if (err != -EOPNOTSUPP)
return err;
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 70a8c36f0ba6..144238a50f3d 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -669,6 +669,7 @@ resume:
x->curlft.bytes += skb->len;
x->curlft.packets++;
+ x->curlft.use_time = ktime_get_real_seconds();
spin_unlock(&x->lock);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 41de46b5ffa9..57448fc519fc 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -637,11 +637,16 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
struct netlink_ext_ack *extack)
{
struct net *net = dev_net(dev);
- struct xfrm_if_parms p;
+ struct xfrm_if_parms p = {};
struct xfrm_if *xi;
int err;
xfrmi_netlink_parms(data, &p);
+ if (!p.if_id) {
+ NL_SET_ERR_MSG(extack, "if_id must be non zero");
+ return -EINVAL;
+ }
+
xi = xfrmi_locate(net, &p);
if (xi)
return -EEXIST;
@@ -666,7 +671,12 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct xfrm_if *xi = netdev_priv(dev);
struct net *net = xi->net;
- struct xfrm_if_parms p;
+ struct xfrm_if_parms p = {};
+
+ if (!p.if_id) {
+ NL_SET_ERR_MSG(extack, "if_id must be non zero");
+ return -EINVAL;
+ }
xfrmi_netlink_parms(data, &p);
xi = xfrmi_locate(net, &p);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 229544bc70c2..d4935b3b9983 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -533,6 +533,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
x->curlft.bytes += skb->len;
x->curlft.packets++;
+ x->curlft.use_time = ktime_get_real_seconds();
spin_unlock_bh(&x->lock);
@@ -647,10 +648,12 @@ static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb
* This requires hardware to know the inner packet type to calculate
* the inner header checksum. Save inner ip protocol here to avoid
* traversing the packet in the vendor's xmit code.
- * If the encap type is IPIP, just save skb->inner_ipproto. Otherwise,
- * get the ip protocol from the IP header.
+ * For IPsec tunnel mode save the ip protocol from the IP header of the
+ * plain text packet. Otherwise If the encap type is IPIP, just save
+ * skb->inner_ipproto in any other case get the ip protocol from the IP
+ * header.
*/
-static void xfrm_get_inner_ipproto(struct sk_buff *skb)
+static void xfrm_get_inner_ipproto(struct sk_buff *skb, struct xfrm_state *x)
{
struct xfrm_offload *xo = xfrm_offload(skb);
const struct ethhdr *eth;
@@ -658,6 +661,25 @@ static void xfrm_get_inner_ipproto(struct sk_buff *skb)
if (!xo)
return;
+ if (x->outer_mode.encap == XFRM_MODE_TUNNEL) {
+ switch (x->outer_mode.family) {
+ case AF_INET:
+ xo->inner_ipproto = ip_hdr(skb)->protocol;
+ break;
+ case AF_INET6:
+ xo->inner_ipproto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ break;
+ }
+
+ return;
+ }
+
+ /* non-Tunnel Mode */
+ if (!skb->encapsulation)
+ return;
+
if (skb->inner_protocol_type == ENCAP_TYPE_IPPROTO) {
xo->inner_ipproto = skb->inner_ipproto;
return;
@@ -712,8 +734,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
sp->xvec[sp->len++] = x;
xfrm_state_hold(x);
- if (skb->encapsulation)
- xfrm_get_inner_ipproto(skb);
+ xfrm_get_inner_ipproto(skb, x);
skb->encapsulation = 1;
if (skb_is_gso(skb)) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 1a06585022ab..dccb8f3318ef 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -33,6 +33,7 @@
#include <net/flow.h>
#include <net/xfrm.h>
#include <net/ip.h>
+#include <net/gre.h>
#if IS_ENABLED(CONFIG_IPV6_MIP6)
#include <net/mip6.h>
#endif
@@ -2680,7 +2681,7 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
*num_xfrms = pols[0]->xfrm_nr;
#ifdef CONFIG_XFRM_SUB_POLICY
- if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
+ if (pols[0]->action == XFRM_POLICY_ALLOW &&
pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
XFRM_POLICY_TYPE_MAIN,
@@ -3392,7 +3393,6 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
case NEXTHDR_DEST:
offset += ipv6_optlen(exthdr);
nexthdr = exthdr->nexthdr;
- exthdr = (struct ipv6_opt_hdr *)(nh + offset);
break;
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
@@ -3422,6 +3422,26 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
}
fl6->flowi6_proto = nexthdr;
return;
+ case IPPROTO_GRE:
+ if (!onlyproto &&
+ (nh + offset + 12 < skb->data ||
+ pskb_may_pull(skb, nh + offset + 12 - skb->data))) {
+ struct gre_base_hdr *gre_hdr;
+ __be32 *gre_key;
+
+ nh = skb_network_header(skb);
+ gre_hdr = (struct gre_base_hdr *)(nh + offset);
+ gre_key = (__be32 *)(gre_hdr + 1);
+
+ if (gre_hdr->flags & GRE_KEY) {
+ if (gre_hdr->flags & GRE_CSUM)
+ gre_key++;
+ fl6->fl6_gre_key = *gre_key;
+ }
+ }
+ fl6->flowi6_proto = nexthdr;
+ return;
+
#if IS_ENABLED(CONFIG_IPV6_MIP6)
case IPPROTO_MH:
offset += ipv6_optlen(exthdr);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a2f4001221d1..ca6bee18346d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -14,6 +14,7 @@
*
*/
+#include <linux/compat.h>
#include <linux/workqueue.h>
#include <net/xfrm.h>
#include <linux/pfkeyv2.h>
@@ -1593,6 +1594,9 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
x->km.seq = orig->km.seq;
x->replay = orig->replay;
x->preplay = orig->preplay;
+ x->mapping_maxage = orig->mapping_maxage;
+ x->new_mapping = 0;
+ x->new_mapping_sport = 0;
return x;
@@ -2242,7 +2246,7 @@ int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
}
EXPORT_SYMBOL(km_query);
-int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
+static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
{
int err = -EINVAL;
struct xfrm_mgr *km;
@@ -2257,6 +2261,24 @@ int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
rcu_read_unlock();
return err;
}
+
+int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
+{
+ int ret = 0;
+
+ if (x->mapping_maxage) {
+ if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage ||
+ x->new_mapping_sport != sport) {
+ x->new_mapping_sport = sport;
+ x->new_mapping = jiffies / HZ;
+ ret = __km_new_mapping(x, ipaddr, sport);
+ }
+ } else {
+ ret = __km_new_mapping(x, ipaddr, sport);
+ }
+
+ return ret;
+}
EXPORT_SYMBOL(km_new_mapping);
void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7c36cc1f3d79..8cd6c8129004 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/compat.h>
#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -282,6 +283,10 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
err = 0;
+ if (attrs[XFRMA_MTIMER_THRESH])
+ if (!attrs[XFRMA_ENCAP])
+ err = -EINVAL;
+
out:
return err;
}
@@ -521,6 +526,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+ struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
if (re) {
struct xfrm_replay_state_esn *replay_esn;
@@ -552,6 +558,9 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
if (rt)
x->replay_maxdiff = nla_get_u32(rt);
+
+ if (mt)
+ x->mapping_maxage = nla_get_u32(mt);
}
static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
@@ -621,8 +630,13 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
xfrm_smark_init(attrs, &x->props.smark);
- if (attrs[XFRMA_IF_ID])
+ if (attrs[XFRMA_IF_ID]) {
x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ if (!x->if_id) {
+ err = -EINVAL;
+ goto error;
+ }
+ }
err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
if (err)
@@ -1024,8 +1038,13 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
if (ret)
goto out;
}
- if (x->security)
+ if (x->security) {
ret = copy_sec_ctx(x->security, skb);
+ if (ret)
+ goto out;
+ }
+ if (x->mapping_maxage)
+ ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
out:
return ret;
}
@@ -1413,8 +1432,13 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
mark = xfrm_mark_get(attrs, &m);
- if (attrs[XFRMA_IF_ID])
+ if (attrs[XFRMA_IF_ID]) {
if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ if (!if_id) {
+ err = -EINVAL;
+ goto out_noput;
+ }
+ }
if (p->info.seq) {
x = xfrm_find_acq_byseq(net, mark, p->info.seq);
@@ -1727,8 +1751,13 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_us
xfrm_mark_get(attrs, &xp->mark);
- if (attrs[XFRMA_IF_ID])
+ if (attrs[XFRMA_IF_ID]) {
xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
+ if (!xp->if_id) {
+ err = -EINVAL;
+ goto error;
+ }
+ }
return xp;
error:
@@ -3058,7 +3087,7 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
if (x->props.extra_flags)
l += nla_total_size(sizeof(x->props.extra_flags));
if (x->xso.dev)
- l += nla_total_size(sizeof(x->xso));
+ l += nla_total_size(sizeof(struct xfrm_user_offload));
if (x->props.smark.v | x->props.smark.m) {
l += nla_total_size(sizeof(x->props.smark.v));
l += nla_total_size(sizeof(x->props.smark.m));
@@ -3069,6 +3098,9 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
/* Must count x->lastused as it may become non-zero behind our back. */
l += nla_total_size_64bit(sizeof(u64));
+ if (x->mapping_maxage)
+ l += nla_total_size(sizeof(x->mapping_maxage));
+
return l;
}